blob: e890d2d4ec895e9bd94c1b6e935cff2e79219a3d [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050066/* Pipe tracepoints to printk */
67struct trace_iterator *tracepoint_print_iter;
68int tracepoint_printk;
69
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010070/* For tracers that don't implement custom flags */
71static struct tracer_opt dummy_tracer_opt[] = {
72 { }
73};
74
75static struct tracer_flags dummy_tracer_flags = {
76 .val = 0,
77 .opts = dummy_tracer_opt
78};
79
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050080static int
81dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010082{
83 return 0;
84}
Steven Rostedt0f048702008-11-05 16:05:44 -050085
86/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040087 * To prevent the comm cache from being overwritten when no
88 * tracing is active, only save the comm when a trace event
89 * occurred.
90 */
91static DEFINE_PER_CPU(bool, trace_cmdline_save);
92
93/*
Steven Rostedt0f048702008-11-05 16:05:44 -050094 * Kill all tracing for good (never come back).
95 * It is initialized to 1 but will turn to zero if the initialization
96 * of the tracer is successful. But that is the only place that sets
97 * this back to zero.
98 */
Hannes Eder4fd27352009-02-10 19:44:12 +010099static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500100
Christoph Lameter9288f992009-10-07 19:17:45 -0400101DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -0400102
Jason Wessel955b61e2010-08-05 09:22:23 -0500103cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200104
Steven Rostedt944ac422008-10-23 19:26:08 -0400105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400119 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200120
121enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400122
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500126static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500127
Li Zefanee6c2c12009-09-18 14:06:47 +0800128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500130static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500132static bool allocate_snapshot;
133
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200134static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100135{
Chen Gang67012ab2013-04-08 12:06:44 +0800136 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500137 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400138 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500139 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100140 return 1;
141}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200142__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100143
Steven Rostedt944ac422008-10-23 19:26:08 -0400144static int __init set_ftrace_dump_on_oops(char *str)
145{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200146 if (*str++ != '=' || !*str) {
147 ftrace_dump_on_oops = DUMP_ALL;
148 return 1;
149 }
150
151 if (!strcmp("orig_cpu", str)) {
152 ftrace_dump_on_oops = DUMP_ORIG;
153 return 1;
154 }
155
156 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400157}
158__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200159
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400160static int __init stop_trace_on_warning(char *str)
161{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200162 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
163 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400164 return 1;
165}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200166__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400167
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400168static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500169{
170 allocate_snapshot = true;
171 /* We also need the main ring buffer expanded */
172 ring_buffer_expanded = true;
173 return 1;
174}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400175__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500176
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400177
178static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
179static char *trace_boot_options __initdata;
180
181static int __init set_trace_boot_options(char *str)
182{
Chen Gang67012ab2013-04-08 12:06:44 +0800183 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400184 trace_boot_options = trace_boot_options_buf;
185 return 0;
186}
187__setup("trace_options=", set_trace_boot_options);
188
Steven Rostedte1e232c2014-02-10 23:38:46 -0500189static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
190static char *trace_boot_clock __initdata;
191
192static int __init set_trace_boot_clock(char *str)
193{
194 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
195 trace_boot_clock = trace_boot_clock_buf;
196 return 0;
197}
198__setup("trace_clock=", set_trace_boot_clock);
199
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500200static int __init set_tracepoint_printk(char *str)
201{
202 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
203 tracepoint_printk = 1;
204 return 1;
205}
206__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400207
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800208unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200209{
210 nsec += 500;
211 do_div(nsec, 1000);
212 return nsec;
213}
214
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200215/*
216 * The global_trace is the descriptor that holds the tracing
217 * buffers for the live tracing. For each CPU, it contains
218 * a link list of pages that will store trace entries. The
219 * page descriptor of the pages in the memory is used to hold
220 * the link list by linking the lru item in the page descriptor
221 * to each of the pages in the buffer per CPU.
222 *
223 * For each active CPU there is a data field that holds the
224 * pages for the buffer for that CPU. Each CPU has the same number
225 * of pages allocated for its buffer.
226 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200227static struct trace_array global_trace;
228
Steven Rostedtae63b312012-05-03 23:09:03 -0400229LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200230
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400231int trace_array_get(struct trace_array *this_tr)
232{
233 struct trace_array *tr;
234 int ret = -ENODEV;
235
236 mutex_lock(&trace_types_lock);
237 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
238 if (tr == this_tr) {
239 tr->ref++;
240 ret = 0;
241 break;
242 }
243 }
244 mutex_unlock(&trace_types_lock);
245
246 return ret;
247}
248
249static void __trace_array_put(struct trace_array *this_tr)
250{
251 WARN_ON(!this_tr->ref);
252 this_tr->ref--;
253}
254
255void trace_array_put(struct trace_array *this_tr)
256{
257 mutex_lock(&trace_types_lock);
258 __trace_array_put(this_tr);
259 mutex_unlock(&trace_types_lock);
260}
261
Tom Zanussif306cc82013-10-24 08:34:17 -0500262int filter_check_discard(struct ftrace_event_file *file, void *rec,
263 struct ring_buffer *buffer,
264 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500265{
Tom Zanussif306cc82013-10-24 08:34:17 -0500266 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
267 !filter_match_preds(file->filter, rec)) {
268 ring_buffer_discard_commit(buffer, event);
269 return 1;
270 }
271
272 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500273}
Tom Zanussif306cc82013-10-24 08:34:17 -0500274EXPORT_SYMBOL_GPL(filter_check_discard);
275
276int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
277 struct ring_buffer *buffer,
278 struct ring_buffer_event *event)
279{
280 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
281 !filter_match_preds(call->filter, rec)) {
282 ring_buffer_discard_commit(buffer, event);
283 return 1;
284 }
285
286 return 0;
287}
288EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500289
Fabian Frederickad1438a2014-04-17 21:44:42 +0200290static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400291{
292 u64 ts;
293
294 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700295 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400296 return trace_clock_local();
297
Alexander Z Lam94571582013-08-02 18:36:16 -0700298 ts = ring_buffer_time_stamp(buf->buffer, cpu);
299 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400300
301 return ts;
302}
303
Alexander Z Lam94571582013-08-02 18:36:16 -0700304cycle_t ftrace_now(int cpu)
305{
306 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
307}
308
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400309/**
310 * tracing_is_enabled - Show if global_trace has been disabled
311 *
312 * Shows if the global trace has been enabled or not. It uses the
313 * mirror flag "buffer_disabled" to be used in fast paths such as for
314 * the irqsoff tracer. But it may be inaccurate due to races. If you
315 * need to know the accurate state, use tracing_is_on() which is a little
316 * slower, but accurate.
317 */
Steven Rostedt90369902008-11-05 16:05:44 -0500318int tracing_is_enabled(void)
319{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400320 /*
321 * For quick access (irqsoff uses this in fast path), just
322 * return the mirror variable of the state of the ring buffer.
323 * It's a little racy, but we don't really care.
324 */
325 smp_rmb();
326 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500327}
328
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200329/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400330 * trace_buf_size is the size in bytes that is allocated
331 * for a buffer. Note, the number of bytes is always rounded
332 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400333 *
334 * This number is purposely set to a low number of 16384.
335 * If the dump on oops happens, it will be much appreciated
336 * to not have to wait for all that output. Anyway this can be
337 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400339#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400340
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400341static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200342
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200343/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200344static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200345
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200346/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200347 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200348 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700349DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200350
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800351/*
352 * serialize the access of the ring buffer
353 *
354 * ring buffer serializes readers, but it is low level protection.
355 * The validity of the events (which returns by ring_buffer_peek() ..etc)
356 * are not protected by ring buffer.
357 *
358 * The content of events may become garbage if we allow other process consumes
359 * these events concurrently:
360 * A) the page of the consumed events may become a normal page
361 * (not reader page) in ring buffer, and this page will be rewrited
362 * by events producer.
363 * B) The page of the consumed events may become a page for splice_read,
364 * and this page will be returned to system.
365 *
366 * These primitives allow multi process access to different cpu ring buffer
367 * concurrently.
368 *
369 * These primitives don't distinguish read-only and read-consume access.
370 * Multi read-only access are also serialized.
371 */
372
373#ifdef CONFIG_SMP
374static DECLARE_RWSEM(all_cpu_access_lock);
375static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
376
377static inline void trace_access_lock(int cpu)
378{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500379 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800380 /* gain it for accessing the whole ring buffer. */
381 down_write(&all_cpu_access_lock);
382 } else {
383 /* gain it for accessing a cpu ring buffer. */
384
Steven Rostedtae3b5092013-01-23 15:22:59 -0500385 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800386 down_read(&all_cpu_access_lock);
387
388 /* Secondly block other access to this @cpu ring buffer. */
389 mutex_lock(&per_cpu(cpu_access_lock, cpu));
390 }
391}
392
393static inline void trace_access_unlock(int cpu)
394{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500395 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800396 up_write(&all_cpu_access_lock);
397 } else {
398 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
399 up_read(&all_cpu_access_lock);
400 }
401}
402
403static inline void trace_access_lock_init(void)
404{
405 int cpu;
406
407 for_each_possible_cpu(cpu)
408 mutex_init(&per_cpu(cpu_access_lock, cpu));
409}
410
411#else
412
413static DEFINE_MUTEX(access_lock);
414
415static inline void trace_access_lock(int cpu)
416{
417 (void)cpu;
418 mutex_lock(&access_lock);
419}
420
421static inline void trace_access_unlock(int cpu)
422{
423 (void)cpu;
424 mutex_unlock(&access_lock);
425}
426
427static inline void trace_access_lock_init(void)
428{
429}
430
431#endif
432
Steven Rostedtee6bce52008-11-12 17:52:37 -0500433/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500434unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400435 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500436 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400437 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700438
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400439static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400440{
441 if (tr->trace_buffer.buffer)
442 ring_buffer_record_on(tr->trace_buffer.buffer);
443 /*
444 * This flag is looked at when buffers haven't been allocated
445 * yet, or by some tracers (like irqsoff), that just want to
446 * know if the ring buffer has been disabled, but it can handle
447 * races of where it gets disabled but we still do a record.
448 * As the check is in the fast path of the tracers, it is more
449 * important to be fast than accurate.
450 */
451 tr->buffer_disabled = 0;
452 /* Make the flag seen by readers */
453 smp_wmb();
454}
455
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200456/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500457 * tracing_on - enable tracing buffers
458 *
459 * This function enables tracing buffers that may have been
460 * disabled with tracing_off.
461 */
462void tracing_on(void)
463{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400464 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500465}
466EXPORT_SYMBOL_GPL(tracing_on);
467
468/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500469 * __trace_puts - write a constant string into the trace buffer.
470 * @ip: The address of the caller
471 * @str: The constant string to write
472 * @size: The size of the string.
473 */
474int __trace_puts(unsigned long ip, const char *str, int size)
475{
476 struct ring_buffer_event *event;
477 struct ring_buffer *buffer;
478 struct print_entry *entry;
479 unsigned long irq_flags;
480 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800481 int pc;
482
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800483 if (!(trace_flags & TRACE_ITER_PRINTK))
484 return 0;
485
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800486 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500487
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500488 if (unlikely(tracing_selftest_running || tracing_disabled))
489 return 0;
490
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500491 alloc = sizeof(*entry) + size + 2; /* possible \n added */
492
493 local_save_flags(irq_flags);
494 buffer = global_trace.trace_buffer.buffer;
495 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800496 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500497 if (!event)
498 return 0;
499
500 entry = ring_buffer_event_data(event);
501 entry->ip = ip;
502
503 memcpy(&entry->buf, str, size);
504
505 /* Add a newline if necessary */
506 if (entry->buf[size - 1] != '\n') {
507 entry->buf[size] = '\n';
508 entry->buf[size + 1] = '\0';
509 } else
510 entry->buf[size] = '\0';
511
512 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800513 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500514
515 return size;
516}
517EXPORT_SYMBOL_GPL(__trace_puts);
518
519/**
520 * __trace_bputs - write the pointer to a constant string into trace buffer
521 * @ip: The address of the caller
522 * @str: The constant string to write to the buffer to
523 */
524int __trace_bputs(unsigned long ip, const char *str)
525{
526 struct ring_buffer_event *event;
527 struct ring_buffer *buffer;
528 struct bputs_entry *entry;
529 unsigned long irq_flags;
530 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800531 int pc;
532
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800533 if (!(trace_flags & TRACE_ITER_PRINTK))
534 return 0;
535
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800536 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500537
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500538 if (unlikely(tracing_selftest_running || tracing_disabled))
539 return 0;
540
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500541 local_save_flags(irq_flags);
542 buffer = global_trace.trace_buffer.buffer;
543 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800544 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500545 if (!event)
546 return 0;
547
548 entry = ring_buffer_event_data(event);
549 entry->ip = ip;
550 entry->str = str;
551
552 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800553 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500554
555 return 1;
556}
557EXPORT_SYMBOL_GPL(__trace_bputs);
558
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500559#ifdef CONFIG_TRACER_SNAPSHOT
560/**
561 * trace_snapshot - take a snapshot of the current buffer.
562 *
563 * This causes a swap between the snapshot buffer and the current live
564 * tracing buffer. You can use this to take snapshots of the live
565 * trace when some condition is triggered, but continue to trace.
566 *
567 * Note, make sure to allocate the snapshot with either
568 * a tracing_snapshot_alloc(), or by doing it manually
569 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
570 *
571 * If the snapshot buffer is not allocated, it will stop tracing.
572 * Basically making a permanent snapshot.
573 */
574void tracing_snapshot(void)
575{
576 struct trace_array *tr = &global_trace;
577 struct tracer *tracer = tr->current_trace;
578 unsigned long flags;
579
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500580 if (in_nmi()) {
581 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
582 internal_trace_puts("*** snapshot is being ignored ***\n");
583 return;
584 }
585
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500586 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500587 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
588 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500589 tracing_off();
590 return;
591 }
592
593 /* Note, snapshot can not be used when the tracer uses it */
594 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500595 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
596 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500597 return;
598 }
599
600 local_irq_save(flags);
601 update_max_tr(tr, current, smp_processor_id());
602 local_irq_restore(flags);
603}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500604EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500605
606static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
607 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400608static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
609
610static int alloc_snapshot(struct trace_array *tr)
611{
612 int ret;
613
614 if (!tr->allocated_snapshot) {
615
616 /* allocate spare buffer */
617 ret = resize_buffer_duplicate_size(&tr->max_buffer,
618 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
619 if (ret < 0)
620 return ret;
621
622 tr->allocated_snapshot = true;
623 }
624
625 return 0;
626}
627
Fabian Frederickad1438a2014-04-17 21:44:42 +0200628static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400629{
630 /*
631 * We don't free the ring buffer. instead, resize it because
632 * The max_tr ring buffer has some state (e.g. ring->clock) and
633 * we want preserve it.
634 */
635 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
636 set_buffer_entries(&tr->max_buffer, 1);
637 tracing_reset_online_cpus(&tr->max_buffer);
638 tr->allocated_snapshot = false;
639}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500640
641/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500642 * tracing_alloc_snapshot - allocate snapshot buffer.
643 *
644 * This only allocates the snapshot buffer if it isn't already
645 * allocated - it doesn't also take a snapshot.
646 *
647 * This is meant to be used in cases where the snapshot buffer needs
648 * to be set up for events that can't sleep but need to be able to
649 * trigger a snapshot.
650 */
651int tracing_alloc_snapshot(void)
652{
653 struct trace_array *tr = &global_trace;
654 int ret;
655
656 ret = alloc_snapshot(tr);
657 WARN_ON(ret < 0);
658
659 return ret;
660}
661EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
662
663/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
665 *
666 * This is similar to trace_snapshot(), but it will allocate the
667 * snapshot buffer if it isn't already allocated. Use this only
668 * where it is safe to sleep, as the allocation may sleep.
669 *
670 * This causes a swap between the snapshot buffer and the current live
671 * tracing buffer. You can use this to take snapshots of the live
672 * trace when some condition is triggered, but continue to trace.
673 */
674void tracing_snapshot_alloc(void)
675{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500676 int ret;
677
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500678 ret = tracing_alloc_snapshot();
679 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400680 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500681
682 tracing_snapshot();
683}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500684EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500685#else
686void tracing_snapshot(void)
687{
688 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
689}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500690EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500691int tracing_alloc_snapshot(void)
692{
693 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
694 return -ENODEV;
695}
696EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500697void tracing_snapshot_alloc(void)
698{
699 /* Give warning */
700 tracing_snapshot();
701}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500702EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500703#endif /* CONFIG_TRACER_SNAPSHOT */
704
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400705static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706{
707 if (tr->trace_buffer.buffer)
708 ring_buffer_record_off(tr->trace_buffer.buffer);
709 /*
710 * This flag is looked at when buffers haven't been allocated
711 * yet, or by some tracers (like irqsoff), that just want to
712 * know if the ring buffer has been disabled, but it can handle
713 * races of where it gets disabled but we still do a record.
714 * As the check is in the fast path of the tracers, it is more
715 * important to be fast than accurate.
716 */
717 tr->buffer_disabled = 1;
718 /* Make the flag seen by readers */
719 smp_wmb();
720}
721
Steven Rostedt499e5472012-02-22 15:50:28 -0500722/**
723 * tracing_off - turn off tracing buffers
724 *
725 * This function stops the tracing buffers from recording data.
726 * It does not disable any overhead the tracers themselves may
727 * be causing. This function simply causes all recording to
728 * the ring buffers to fail.
729 */
730void tracing_off(void)
731{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400732 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500733}
734EXPORT_SYMBOL_GPL(tracing_off);
735
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400736void disable_trace_on_warning(void)
737{
738 if (__disable_trace_on_warning)
739 tracing_off();
740}
741
Steven Rostedt499e5472012-02-22 15:50:28 -0500742/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400743 * tracer_tracing_is_on - show real state of ring buffer enabled
744 * @tr : the trace array to know if ring buffer is enabled
745 *
746 * Shows real state of the ring buffer if it is enabled or not.
747 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400748static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400749{
750 if (tr->trace_buffer.buffer)
751 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
752 return !tr->buffer_disabled;
753}
754
Steven Rostedt499e5472012-02-22 15:50:28 -0500755/**
756 * tracing_is_on - show state of ring buffers enabled
757 */
758int tracing_is_on(void)
759{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400760 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500761}
762EXPORT_SYMBOL_GPL(tracing_is_on);
763
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400764static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200765{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400766 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200767
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200768 if (!str)
769 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800770 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200771 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800772 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200773 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400774 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200775 return 1;
776}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400777__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200778
Tim Bird0e950172010-02-25 15:36:43 -0800779static int __init set_tracing_thresh(char *str)
780{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800781 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800782 int ret;
783
784 if (!str)
785 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200786 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800787 if (ret < 0)
788 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800789 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800790 return 1;
791}
792__setup("tracing_thresh=", set_tracing_thresh);
793
Steven Rostedt57f50be2008-05-12 21:20:44 +0200794unsigned long nsecs_to_usecs(unsigned long nsecs)
795{
796 return nsecs / 1000;
797}
798
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200799/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200800static const char *trace_options[] = {
801 "print-parent",
802 "sym-offset",
803 "sym-addr",
804 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200805 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200806 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200807 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200808 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200809 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100810 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500811 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500812 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500813 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200814 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200815 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100816 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200817 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500818 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400819 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400820 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800821 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800822 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400823 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500824 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700825 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400826 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200827 NULL
828};
829
Zhaolei5079f322009-08-25 16:12:56 +0800830static struct {
831 u64 (*func)(void);
832 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800833 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800834} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000835 { trace_clock_local, "local", 1 },
836 { trace_clock_global, "global", 1 },
837 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700838 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000839 { trace_clock, "perf", 1 },
840 { ktime_get_mono_fast_ns, "mono", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800841 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800842};
843
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200844/*
845 * trace_parser_get_init - gets the buffer for trace parser
846 */
847int trace_parser_get_init(struct trace_parser *parser, int size)
848{
849 memset(parser, 0, sizeof(*parser));
850
851 parser->buffer = kmalloc(size, GFP_KERNEL);
852 if (!parser->buffer)
853 return 1;
854
855 parser->size = size;
856 return 0;
857}
858
859/*
860 * trace_parser_put - frees the buffer for trace parser
861 */
862void trace_parser_put(struct trace_parser *parser)
863{
864 kfree(parser->buffer);
865}
866
867/*
868 * trace_get_user - reads the user input string separated by space
869 * (matched by isspace(ch))
870 *
871 * For each string found the 'struct trace_parser' is updated,
872 * and the function returns.
873 *
874 * Returns number of bytes read.
875 *
876 * See kernel/trace/trace.h for 'struct trace_parser' details.
877 */
878int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
879 size_t cnt, loff_t *ppos)
880{
881 char ch;
882 size_t read = 0;
883 ssize_t ret;
884
885 if (!*ppos)
886 trace_parser_clear(parser);
887
888 ret = get_user(ch, ubuf++);
889 if (ret)
890 goto out;
891
892 read++;
893 cnt--;
894
895 /*
896 * The parser is not finished with the last write,
897 * continue reading the user input without skipping spaces.
898 */
899 if (!parser->cont) {
900 /* skip white space */
901 while (cnt && isspace(ch)) {
902 ret = get_user(ch, ubuf++);
903 if (ret)
904 goto out;
905 read++;
906 cnt--;
907 }
908
909 /* only spaces were written */
910 if (isspace(ch)) {
911 *ppos += read;
912 ret = read;
913 goto out;
914 }
915
916 parser->idx = 0;
917 }
918
919 /* read the non-space input */
920 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800921 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200922 parser->buffer[parser->idx++] = ch;
923 else {
924 ret = -EINVAL;
925 goto out;
926 }
927 ret = get_user(ch, ubuf++);
928 if (ret)
929 goto out;
930 read++;
931 cnt--;
932 }
933
934 /* We either got finished input or we have to wait for another call. */
935 if (isspace(ch)) {
936 parser->buffer[parser->idx] = 0;
937 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400938 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200939 parser->cont = true;
940 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400941 } else {
942 ret = -EINVAL;
943 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200944 }
945
946 *ppos += read;
947 ret = read;
948
949out:
950 return ret;
951}
952
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200953static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200954{
955 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200956
957 if (s->len <= s->readpos)
958 return -EBUSY;
959
960 len = s->len - s->readpos;
961 if (cnt > len)
962 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300963 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200964
Steven Rostedte74da522009-03-04 20:31:11 -0500965 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200966 return cnt;
967}
968
Tim Bird0e950172010-02-25 15:36:43 -0800969unsigned long __read_mostly tracing_thresh;
970
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400971#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400972/*
973 * Copy the new maximum trace into the separate maximum-trace
974 * structure. (this way the maximum trace is permanently saved,
975 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
976 */
977static void
978__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
979{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500980 struct trace_buffer *trace_buf = &tr->trace_buffer;
981 struct trace_buffer *max_buf = &tr->max_buffer;
982 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
983 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400984
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500985 max_buf->cpu = cpu;
986 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400987
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500988 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400989 max_data->critical_start = data->critical_start;
990 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400991
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300992 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400993 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400994 /*
995 * If tsk == current, then use current_uid(), as that does not use
996 * RCU. The irq tracer can be called out of RCU scope.
997 */
998 if (tsk == current)
999 max_data->uid = current_uid();
1000 else
1001 max_data->uid = task_uid(tsk);
1002
Steven Rostedt8248ac02009-09-02 12:27:41 -04001003 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1004 max_data->policy = tsk->policy;
1005 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001006
1007 /* record this tasks comm */
1008 tracing_record_cmdline(tsk);
1009}
1010
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001011/**
1012 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1013 * @tr: tracer
1014 * @tsk: the task with the latency
1015 * @cpu: The cpu that initiated the trace.
1016 *
1017 * Flip the buffers between the @tr and the max_tr and record information
1018 * about which task was the cause of this latency.
1019 */
Ingo Molnare309b412008-05-12 21:20:51 +02001020void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001021update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1022{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001023 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001024
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001025 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001026 return;
1027
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001028 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001029
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001030 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001031 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001032 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001033 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001034 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001035
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001036 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001037
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001038 buf = tr->trace_buffer.buffer;
1039 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1040 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001041
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001042 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001043 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001044}
1045
1046/**
1047 * update_max_tr_single - only copy one trace over, and reset the rest
1048 * @tr - tracer
1049 * @tsk - task with the latency
1050 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001051 *
1052 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001053 */
Ingo Molnare309b412008-05-12 21:20:51 +02001054void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001055update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1056{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001057 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001058
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001059 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001060 return;
1061
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001062 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001063 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001064 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001065 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001066 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001067 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001068
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001069 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001070
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001071 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001072
Steven Rostedte8165db2009-09-03 19:13:05 -04001073 if (ret == -EBUSY) {
1074 /*
1075 * We failed to swap the buffer due to a commit taking
1076 * place on this CPU. We fail to record, but we reset
1077 * the max trace buffer (no one writes directly to it)
1078 * and flag that it failed.
1079 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001080 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001081 "Failed to swap buffers due to commit in progress\n");
1082 }
1083
Steven Rostedte8165db2009-09-03 19:13:05 -04001084 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001085
1086 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001087 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001088}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001089#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001090
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001091static int wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001092{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001093 /* Iterators are static, they should be filled or empty */
1094 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001095 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001096
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001097 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001098}
1099
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001100#ifdef CONFIG_FTRACE_STARTUP_TEST
1101static int run_tracer_selftest(struct tracer *type)
1102{
1103 struct trace_array *tr = &global_trace;
1104 struct tracer *saved_tracer = tr->current_trace;
1105 int ret;
1106
1107 if (!type->selftest || tracing_selftest_disabled)
1108 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001109
1110 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001111 * Run a selftest on this tracer.
1112 * Here we reset the trace buffer, and set the current
1113 * tracer to be this tracer. The tracer can then run some
1114 * internal tracing to verify that everything is in order.
1115 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001116 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001117 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001118
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001119 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001120
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001121#ifdef CONFIG_TRACER_MAX_TRACE
1122 if (type->use_max_tr) {
1123 /* If we expanded the buffers, make sure the max is expanded too */
1124 if (ring_buffer_expanded)
1125 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1126 RING_BUFFER_ALL_CPUS);
1127 tr->allocated_snapshot = true;
1128 }
1129#endif
1130
1131 /* the test is responsible for initializing and enabling */
1132 pr_info("Testing tracer %s: ", type->name);
1133 ret = type->selftest(type, tr);
1134 /* the test is responsible for resetting too */
1135 tr->current_trace = saved_tracer;
1136 if (ret) {
1137 printk(KERN_CONT "FAILED!\n");
1138 /* Add the warning after printing 'FAILED' */
1139 WARN_ON(1);
1140 return -1;
1141 }
1142 /* Only reset on passing, to avoid touching corrupted buffers */
1143 tracing_reset_online_cpus(&tr->trace_buffer);
1144
1145#ifdef CONFIG_TRACER_MAX_TRACE
1146 if (type->use_max_tr) {
1147 tr->allocated_snapshot = false;
1148
1149 /* Shrink the max buffer again */
1150 if (ring_buffer_expanded)
1151 ring_buffer_resize(tr->max_buffer.buffer, 1,
1152 RING_BUFFER_ALL_CPUS);
1153 }
1154#endif
1155
1156 printk(KERN_CONT "PASSED\n");
1157 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001158}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001159#else
1160static inline int run_tracer_selftest(struct tracer *type)
1161{
1162 return 0;
1163}
1164#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001165
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001166/**
1167 * register_tracer - register a tracer with the ftrace system.
1168 * @type - the plugin for the tracer
1169 *
1170 * Register a new plugin tracer.
1171 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001172int register_tracer(struct tracer *type)
1173{
1174 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001175 int ret = 0;
1176
1177 if (!type->name) {
1178 pr_info("Tracer must have a name\n");
1179 return -1;
1180 }
1181
Dan Carpenter24a461d2010-07-10 12:06:44 +02001182 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001183 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1184 return -1;
1185 }
1186
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001187 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001188
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001189 tracing_selftest_running = true;
1190
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001191 for (t = trace_types; t; t = t->next) {
1192 if (strcmp(type->name, t->name) == 0) {
1193 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001194 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001195 type->name);
1196 ret = -1;
1197 goto out;
1198 }
1199 }
1200
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001201 if (!type->set_flag)
1202 type->set_flag = &dummy_set_flag;
1203 if (!type->flags)
1204 type->flags = &dummy_tracer_flags;
1205 else
1206 if (!type->flags->opts)
1207 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001208
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001209 ret = run_tracer_selftest(type);
1210 if (ret < 0)
1211 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001212
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001213 type->next = trace_types;
1214 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001215
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001216 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001217 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001218 mutex_unlock(&trace_types_lock);
1219
Steven Rostedtdac74942009-02-05 01:13:38 -05001220 if (ret || !default_bootup_tracer)
1221 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001222
Li Zefanee6c2c12009-09-18 14:06:47 +08001223 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001224 goto out_unlock;
1225
1226 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1227 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001228 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001229 default_bootup_tracer = NULL;
1230 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001231 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001232#ifdef CONFIG_FTRACE_STARTUP_TEST
1233 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1234 type->name);
1235#endif
1236
1237 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001238 return ret;
1239}
1240
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001241void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001242{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001243 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001244
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001245 if (!buffer)
1246 return;
1247
Steven Rostedtf6339032009-09-04 12:35:16 -04001248 ring_buffer_record_disable(buffer);
1249
1250 /* Make sure all commits have finished */
1251 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001252 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001253
1254 ring_buffer_record_enable(buffer);
1255}
1256
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001257void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001258{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001259 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001260 int cpu;
1261
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001262 if (!buffer)
1263 return;
1264
Steven Rostedt621968c2009-09-04 12:02:35 -04001265 ring_buffer_record_disable(buffer);
1266
1267 /* Make sure all commits have finished */
1268 synchronize_sched();
1269
Alexander Z Lam94571582013-08-02 18:36:16 -07001270 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001271
1272 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001273 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001274
1275 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001276}
1277
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001278/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001279void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001280{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001281 struct trace_array *tr;
1282
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001283 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001284 tracing_reset_online_cpus(&tr->trace_buffer);
1285#ifdef CONFIG_TRACER_MAX_TRACE
1286 tracing_reset_online_cpus(&tr->max_buffer);
1287#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001288 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001289}
1290
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001291#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001292#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001293static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001294struct saved_cmdlines_buffer {
1295 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1296 unsigned *map_cmdline_to_pid;
1297 unsigned cmdline_num;
1298 int cmdline_idx;
1299 char *saved_cmdlines;
1300};
1301static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001302
Steven Rostedt25b0b442008-05-12 21:21:00 +02001303/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001304static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001305
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001306static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001307{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001308 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1309}
1310
1311static inline void set_cmdline(int idx, const char *cmdline)
1312{
1313 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1314}
1315
1316static int allocate_cmdlines_buffer(unsigned int val,
1317 struct saved_cmdlines_buffer *s)
1318{
1319 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1320 GFP_KERNEL);
1321 if (!s->map_cmdline_to_pid)
1322 return -ENOMEM;
1323
1324 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1325 if (!s->saved_cmdlines) {
1326 kfree(s->map_cmdline_to_pid);
1327 return -ENOMEM;
1328 }
1329
1330 s->cmdline_idx = 0;
1331 s->cmdline_num = val;
1332 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1333 sizeof(s->map_pid_to_cmdline));
1334 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1335 val * sizeof(*s->map_cmdline_to_pid));
1336
1337 return 0;
1338}
1339
1340static int trace_create_savedcmd(void)
1341{
1342 int ret;
1343
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001344 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001345 if (!savedcmd)
1346 return -ENOMEM;
1347
1348 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1349 if (ret < 0) {
1350 kfree(savedcmd);
1351 savedcmd = NULL;
1352 return -ENOMEM;
1353 }
1354
1355 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001356}
1357
Carsten Emdeb5130b12009-09-13 01:43:07 +02001358int is_tracing_stopped(void)
1359{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001360 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001361}
1362
Steven Rostedt0f048702008-11-05 16:05:44 -05001363/**
1364 * tracing_start - quick start of the tracer
1365 *
1366 * If tracing is enabled but was stopped by tracing_stop,
1367 * this will start the tracer back up.
1368 */
1369void tracing_start(void)
1370{
1371 struct ring_buffer *buffer;
1372 unsigned long flags;
1373
1374 if (tracing_disabled)
1375 return;
1376
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001377 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1378 if (--global_trace.stop_count) {
1379 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001380 /* Someone screwed up their debugging */
1381 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001382 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001383 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001384 goto out;
1385 }
1386
Steven Rostedta2f80712010-03-12 19:56:00 -05001387 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001388 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001389
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001390 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001391 if (buffer)
1392 ring_buffer_record_enable(buffer);
1393
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001394#ifdef CONFIG_TRACER_MAX_TRACE
1395 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001396 if (buffer)
1397 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001398#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001399
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001400 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001401
Steven Rostedt0f048702008-11-05 16:05:44 -05001402 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001403 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1404}
1405
1406static void tracing_start_tr(struct trace_array *tr)
1407{
1408 struct ring_buffer *buffer;
1409 unsigned long flags;
1410
1411 if (tracing_disabled)
1412 return;
1413
1414 /* If global, we need to also start the max tracer */
1415 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1416 return tracing_start();
1417
1418 raw_spin_lock_irqsave(&tr->start_lock, flags);
1419
1420 if (--tr->stop_count) {
1421 if (tr->stop_count < 0) {
1422 /* Someone screwed up their debugging */
1423 WARN_ON_ONCE(1);
1424 tr->stop_count = 0;
1425 }
1426 goto out;
1427 }
1428
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001429 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001430 if (buffer)
1431 ring_buffer_record_enable(buffer);
1432
1433 out:
1434 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001435}
1436
1437/**
1438 * tracing_stop - quick stop of the tracer
1439 *
1440 * Light weight way to stop tracing. Use in conjunction with
1441 * tracing_start.
1442 */
1443void tracing_stop(void)
1444{
1445 struct ring_buffer *buffer;
1446 unsigned long flags;
1447
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001448 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1449 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001450 goto out;
1451
Steven Rostedta2f80712010-03-12 19:56:00 -05001452 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001453 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001454
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001455 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001456 if (buffer)
1457 ring_buffer_record_disable(buffer);
1458
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001459#ifdef CONFIG_TRACER_MAX_TRACE
1460 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001461 if (buffer)
1462 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001463#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001464
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001465 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001466
Steven Rostedt0f048702008-11-05 16:05:44 -05001467 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001468 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1469}
1470
1471static void tracing_stop_tr(struct trace_array *tr)
1472{
1473 struct ring_buffer *buffer;
1474 unsigned long flags;
1475
1476 /* If global, we need to also stop the max tracer */
1477 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1478 return tracing_stop();
1479
1480 raw_spin_lock_irqsave(&tr->start_lock, flags);
1481 if (tr->stop_count++)
1482 goto out;
1483
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001484 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001485 if (buffer)
1486 ring_buffer_record_disable(buffer);
1487
1488 out:
1489 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001490}
1491
Ingo Molnare309b412008-05-12 21:20:51 +02001492void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001493
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001494static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001495{
Carsten Emdea635cf02009-03-18 09:00:41 +01001496 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001497
1498 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001499 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001500
1501 /*
1502 * It's not the end of the world if we don't get
1503 * the lock, but we also don't want to spin
1504 * nor do we want to disable interrupts,
1505 * so if we miss here, then better luck next time.
1506 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001507 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001508 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001509
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001510 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001511 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001512 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001513
Carsten Emdea635cf02009-03-18 09:00:41 +01001514 /*
1515 * Check whether the cmdline buffer at idx has a pid
1516 * mapped. We are going to overwrite that entry so we
1517 * need to clear the map_pid_to_cmdline. Otherwise we
1518 * would read the new comm for the old pid.
1519 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001520 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001521 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001522 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001523
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001524 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1525 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001526
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001527 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528 }
1529
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001530 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001531
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001532 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001533
1534 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001535}
1536
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001537static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001538{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001539 unsigned map;
1540
Steven Rostedt4ca53082009-03-16 19:20:15 -04001541 if (!pid) {
1542 strcpy(comm, "<idle>");
1543 return;
1544 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001545
Steven Rostedt74bf4072010-01-25 15:11:53 -05001546 if (WARN_ON_ONCE(pid < 0)) {
1547 strcpy(comm, "<XXX>");
1548 return;
1549 }
1550
Steven Rostedt4ca53082009-03-16 19:20:15 -04001551 if (pid > PID_MAX_DEFAULT) {
1552 strcpy(comm, "<...>");
1553 return;
1554 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001555
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001556 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001557 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001558 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001559 else
1560 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001561}
1562
1563void trace_find_cmdline(int pid, char comm[])
1564{
1565 preempt_disable();
1566 arch_spin_lock(&trace_cmdline_lock);
1567
1568 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001569
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001570 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001571 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001572}
1573
Ingo Molnare309b412008-05-12 21:20:51 +02001574void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001575{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001576 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001577 return;
1578
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001579 if (!__this_cpu_read(trace_cmdline_save))
1580 return;
1581
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001582 if (trace_save_cmdline(tsk))
1583 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001584}
1585
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001586void
Steven Rostedt38697052008-10-01 13:14:09 -04001587tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1588 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001589{
1590 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591
Steven Rostedt777e2082008-09-29 23:02:42 -04001592 entry->preempt_count = pc & 0xff;
1593 entry->pid = (tsk) ? tsk->pid : 0;
1594 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001595#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001596 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001597#else
1598 TRACE_FLAG_IRQS_NOSUPPORT |
1599#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001600 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1601 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001602 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1603 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001604}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001605EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001606
Steven Rostedte77405a2009-09-02 14:17:06 -04001607struct ring_buffer_event *
1608trace_buffer_lock_reserve(struct ring_buffer *buffer,
1609 int type,
1610 unsigned long len,
1611 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001612{
1613 struct ring_buffer_event *event;
1614
Steven Rostedte77405a2009-09-02 14:17:06 -04001615 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001616 if (event != NULL) {
1617 struct trace_entry *ent = ring_buffer_event_data(event);
1618
1619 tracing_generic_entry_update(ent, flags, pc);
1620 ent->type = type;
1621 }
1622
1623 return event;
1624}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001625
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001626void
1627__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1628{
1629 __this_cpu_write(trace_cmdline_save, true);
1630 ring_buffer_unlock_commit(buffer, event);
1631}
1632
Steven Rostedte77405a2009-09-02 14:17:06 -04001633static inline void
1634__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1635 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001636 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001637{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001638 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001639
Steven Rostedte77405a2009-09-02 14:17:06 -04001640 ftrace_trace_stack(buffer, flags, 6, pc);
1641 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001642}
1643
Steven Rostedte77405a2009-09-02 14:17:06 -04001644void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1645 struct ring_buffer_event *event,
1646 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001647{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001648 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001649}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001650EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001651
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001652static struct ring_buffer *temp_buffer;
1653
Steven Rostedtef5580d2009-02-27 19:38:04 -05001654struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001655trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1656 struct ftrace_event_file *ftrace_file,
1657 int type, unsigned long len,
1658 unsigned long flags, int pc)
1659{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001660 struct ring_buffer_event *entry;
1661
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001662 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001663 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001664 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001665 /*
1666 * If tracing is off, but we have triggers enabled
1667 * we still need to look at the event data. Use the temp_buffer
1668 * to store the trace event for the tigger to use. It's recusive
1669 * safe and will not be recorded anywhere.
1670 */
1671 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1672 *current_rb = temp_buffer;
1673 entry = trace_buffer_lock_reserve(*current_rb,
1674 type, len, flags, pc);
1675 }
1676 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001677}
1678EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1679
1680struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001681trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1682 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001683 unsigned long flags, int pc)
1684{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001685 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001686 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001687 type, len, flags, pc);
1688}
Steven Rostedt94487d62009-05-05 19:22:53 -04001689EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001690
Steven Rostedte77405a2009-09-02 14:17:06 -04001691void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1692 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001693 unsigned long flags, int pc)
1694{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001695 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001696}
Steven Rostedt94487d62009-05-05 19:22:53 -04001697EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001698
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001699void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1700 struct ring_buffer_event *event,
1701 unsigned long flags, int pc,
1702 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001703{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001704 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001705
1706 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1707 ftrace_trace_userstack(buffer, flags, pc);
1708}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001709EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001710
Steven Rostedte77405a2009-09-02 14:17:06 -04001711void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1712 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001713{
Steven Rostedte77405a2009-09-02 14:17:06 -04001714 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001715}
Steven Rostedt12acd472009-04-17 16:01:56 -04001716EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001717
Ingo Molnare309b412008-05-12 21:20:51 +02001718void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001719trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001720 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1721 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001722{
Tom Zanussie1112b42009-03-31 00:48:49 -05001723 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001724 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001725 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001726 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001727
Steven Rostedtd7690412008-10-01 00:29:53 -04001728 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001729 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001730 return;
1731
Steven Rostedte77405a2009-09-02 14:17:06 -04001732 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001733 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001734 if (!event)
1735 return;
1736 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001737 entry->ip = ip;
1738 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001739
Tom Zanussif306cc82013-10-24 08:34:17 -05001740 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001741 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001742}
1743
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001744#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001745
1746#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1747struct ftrace_stack {
1748 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1749};
1750
1751static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1752static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1753
Steven Rostedte77405a2009-09-02 14:17:06 -04001754static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001755 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001756 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001757{
Tom Zanussie1112b42009-03-31 00:48:49 -05001758 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001759 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001760 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001761 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001762 int use_stack;
1763 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001764
1765 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001766 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001767
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001768 /*
1769 * Since events can happen in NMIs there's no safe way to
1770 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1771 * or NMI comes in, it will just have to use the default
1772 * FTRACE_STACK_SIZE.
1773 */
1774 preempt_disable_notrace();
1775
Shan Wei82146522012-11-19 13:21:01 +08001776 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001777 /*
1778 * We don't need any atomic variables, just a barrier.
1779 * If an interrupt comes in, we don't care, because it would
1780 * have exited and put the counter back to what we want.
1781 * We just need a barrier to keep gcc from moving things
1782 * around.
1783 */
1784 barrier();
1785 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001786 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001787 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1788
1789 if (regs)
1790 save_stack_trace_regs(regs, &trace);
1791 else
1792 save_stack_trace(&trace);
1793
1794 if (trace.nr_entries > size)
1795 size = trace.nr_entries;
1796 } else
1797 /* From now on, use_stack is a boolean */
1798 use_stack = 0;
1799
1800 size *= sizeof(unsigned long);
1801
1802 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1803 sizeof(*entry) + size, flags, pc);
1804 if (!event)
1805 goto out;
1806 entry = ring_buffer_event_data(event);
1807
1808 memset(&entry->caller, 0, size);
1809
1810 if (use_stack)
1811 memcpy(&entry->caller, trace.entries,
1812 trace.nr_entries * sizeof(unsigned long));
1813 else {
1814 trace.max_entries = FTRACE_STACK_ENTRIES;
1815 trace.entries = entry->caller;
1816 if (regs)
1817 save_stack_trace_regs(regs, &trace);
1818 else
1819 save_stack_trace(&trace);
1820 }
1821
1822 entry->size = trace.nr_entries;
1823
Tom Zanussif306cc82013-10-24 08:34:17 -05001824 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001825 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001826
1827 out:
1828 /* Again, don't let gcc optimize things here */
1829 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001830 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001831 preempt_enable_notrace();
1832
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001833}
1834
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001835void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1836 int skip, int pc, struct pt_regs *regs)
1837{
1838 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1839 return;
1840
1841 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1842}
1843
Steven Rostedte77405a2009-09-02 14:17:06 -04001844void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1845 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001846{
1847 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1848 return;
1849
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001850 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001851}
1852
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001853void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1854 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001855{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001856 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001857}
1858
Steven Rostedt03889382009-12-11 09:48:22 -05001859/**
1860 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001861 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001862 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001863void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001864{
1865 unsigned long flags;
1866
1867 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001868 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001869
1870 local_save_flags(flags);
1871
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001872 /*
1873 * Skip 3 more, seems to get us at the caller of
1874 * this function.
1875 */
1876 skip += 3;
1877 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1878 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001879}
1880
Steven Rostedt91e86e52010-11-10 12:56:12 +01001881static DEFINE_PER_CPU(int, user_stack_count);
1882
Steven Rostedte77405a2009-09-02 14:17:06 -04001883void
1884ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001885{
Tom Zanussie1112b42009-03-31 00:48:49 -05001886 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001887 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001888 struct userstack_entry *entry;
1889 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001890
1891 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1892 return;
1893
Steven Rostedtb6345872010-03-12 20:03:30 -05001894 /*
1895 * NMIs can not handle page faults, even with fix ups.
1896 * The save user stack can (and often does) fault.
1897 */
1898 if (unlikely(in_nmi()))
1899 return;
1900
Steven Rostedt91e86e52010-11-10 12:56:12 +01001901 /*
1902 * prevent recursion, since the user stack tracing may
1903 * trigger other kernel events.
1904 */
1905 preempt_disable();
1906 if (__this_cpu_read(user_stack_count))
1907 goto out;
1908
1909 __this_cpu_inc(user_stack_count);
1910
Steven Rostedte77405a2009-09-02 14:17:06 -04001911 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001912 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001913 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001914 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001915 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001916
Steven Rostedt48659d32009-09-11 11:36:23 -04001917 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001918 memset(&entry->caller, 0, sizeof(entry->caller));
1919
1920 trace.nr_entries = 0;
1921 trace.max_entries = FTRACE_STACK_ENTRIES;
1922 trace.skip = 0;
1923 trace.entries = entry->caller;
1924
1925 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001926 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001927 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001928
Li Zefan1dbd1952010-12-09 15:47:56 +08001929 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001930 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001931 out:
1932 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001933}
1934
Hannes Eder4fd27352009-02-10 19:44:12 +01001935#ifdef UNUSED
1936static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001937{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001938 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001939}
Hannes Eder4fd27352009-02-10 19:44:12 +01001940#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001941
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001942#endif /* CONFIG_STACKTRACE */
1943
Steven Rostedt07d777f2011-09-22 14:01:55 -04001944/* created for use with alloc_percpu */
1945struct trace_buffer_struct {
1946 char buffer[TRACE_BUF_SIZE];
1947};
1948
1949static struct trace_buffer_struct *trace_percpu_buffer;
1950static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1951static struct trace_buffer_struct *trace_percpu_irq_buffer;
1952static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1953
1954/*
1955 * The buffer used is dependent on the context. There is a per cpu
1956 * buffer for normal context, softirq contex, hard irq context and
1957 * for NMI context. Thise allows for lockless recording.
1958 *
1959 * Note, if the buffers failed to be allocated, then this returns NULL
1960 */
1961static char *get_trace_buf(void)
1962{
1963 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001964
1965 /*
1966 * If we have allocated per cpu buffers, then we do not
1967 * need to do any locking.
1968 */
1969 if (in_nmi())
1970 percpu_buffer = trace_percpu_nmi_buffer;
1971 else if (in_irq())
1972 percpu_buffer = trace_percpu_irq_buffer;
1973 else if (in_softirq())
1974 percpu_buffer = trace_percpu_sirq_buffer;
1975 else
1976 percpu_buffer = trace_percpu_buffer;
1977
1978 if (!percpu_buffer)
1979 return NULL;
1980
Shan Weid8a03492012-11-13 09:53:04 +08001981 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001982}
1983
1984static int alloc_percpu_trace_buffer(void)
1985{
1986 struct trace_buffer_struct *buffers;
1987 struct trace_buffer_struct *sirq_buffers;
1988 struct trace_buffer_struct *irq_buffers;
1989 struct trace_buffer_struct *nmi_buffers;
1990
1991 buffers = alloc_percpu(struct trace_buffer_struct);
1992 if (!buffers)
1993 goto err_warn;
1994
1995 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1996 if (!sirq_buffers)
1997 goto err_sirq;
1998
1999 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2000 if (!irq_buffers)
2001 goto err_irq;
2002
2003 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2004 if (!nmi_buffers)
2005 goto err_nmi;
2006
2007 trace_percpu_buffer = buffers;
2008 trace_percpu_sirq_buffer = sirq_buffers;
2009 trace_percpu_irq_buffer = irq_buffers;
2010 trace_percpu_nmi_buffer = nmi_buffers;
2011
2012 return 0;
2013
2014 err_nmi:
2015 free_percpu(irq_buffers);
2016 err_irq:
2017 free_percpu(sirq_buffers);
2018 err_sirq:
2019 free_percpu(buffers);
2020 err_warn:
2021 WARN(1, "Could not allocate percpu trace_printk buffer");
2022 return -ENOMEM;
2023}
2024
Steven Rostedt81698832012-10-11 10:15:05 -04002025static int buffers_allocated;
2026
Steven Rostedt07d777f2011-09-22 14:01:55 -04002027void trace_printk_init_buffers(void)
2028{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002029 if (buffers_allocated)
2030 return;
2031
2032 if (alloc_percpu_trace_buffer())
2033 return;
2034
Steven Rostedt2184db42014-05-28 13:14:40 -04002035 /* trace_printk() is for debug use only. Don't use it in production. */
2036
2037 pr_warning("\n**********************************************************\n");
2038 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2039 pr_warning("** **\n");
2040 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2041 pr_warning("** **\n");
2042 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2043 pr_warning("** unsafe for produciton use. **\n");
2044 pr_warning("** **\n");
2045 pr_warning("** If you see this message and you are not debugging **\n");
2046 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2047 pr_warning("** **\n");
2048 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2049 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002050
Steven Rostedtb382ede62012-10-10 21:44:34 -04002051 /* Expand the buffers to set size */
2052 tracing_update_buffers();
2053
Steven Rostedt07d777f2011-09-22 14:01:55 -04002054 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002055
2056 /*
2057 * trace_printk_init_buffers() can be called by modules.
2058 * If that happens, then we need to start cmdline recording
2059 * directly here. If the global_trace.buffer is already
2060 * allocated here, then this was called by module code.
2061 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002062 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002063 tracing_start_cmdline_record();
2064}
2065
2066void trace_printk_start_comm(void)
2067{
2068 /* Start tracing comms if trace printk is set */
2069 if (!buffers_allocated)
2070 return;
2071 tracing_start_cmdline_record();
2072}
2073
2074static void trace_printk_start_stop_comm(int enabled)
2075{
2076 if (!buffers_allocated)
2077 return;
2078
2079 if (enabled)
2080 tracing_start_cmdline_record();
2081 else
2082 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002083}
2084
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002085/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002086 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002087 *
2088 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002089int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002090{
Tom Zanussie1112b42009-03-31 00:48:49 -05002091 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002092 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002093 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002094 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002095 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002096 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002097 char *tbuffer;
2098 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002099
2100 if (unlikely(tracing_selftest_running || tracing_disabled))
2101 return 0;
2102
2103 /* Don't pollute graph traces with trace_vprintk internals */
2104 pause_graph_tracing();
2105
2106 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002107 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002108
Steven Rostedt07d777f2011-09-22 14:01:55 -04002109 tbuffer = get_trace_buf();
2110 if (!tbuffer) {
2111 len = 0;
2112 goto out;
2113 }
2114
2115 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2116
2117 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002118 goto out;
2119
Steven Rostedt07d777f2011-09-22 14:01:55 -04002120 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002121 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002122 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002123 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2124 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002125 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002126 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002127 entry = ring_buffer_event_data(event);
2128 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002129 entry->fmt = fmt;
2130
Steven Rostedt07d777f2011-09-22 14:01:55 -04002131 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002132 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002133 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002134 ftrace_trace_stack(buffer, flags, 6, pc);
2135 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002136
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002137out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002138 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002139 unpause_graph_tracing();
2140
2141 return len;
2142}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002143EXPORT_SYMBOL_GPL(trace_vbprintk);
2144
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002145static int
2146__trace_array_vprintk(struct ring_buffer *buffer,
2147 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002148{
Tom Zanussie1112b42009-03-31 00:48:49 -05002149 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002150 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002151 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002152 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002153 unsigned long flags;
2154 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002155
2156 if (tracing_disabled || tracing_selftest_running)
2157 return 0;
2158
Steven Rostedt07d777f2011-09-22 14:01:55 -04002159 /* Don't pollute graph traces with trace_vprintk internals */
2160 pause_graph_tracing();
2161
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002162 pc = preempt_count();
2163 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002164
Steven Rostedt07d777f2011-09-22 14:01:55 -04002165
2166 tbuffer = get_trace_buf();
2167 if (!tbuffer) {
2168 len = 0;
2169 goto out;
2170 }
2171
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002172 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002173
Steven Rostedt07d777f2011-09-22 14:01:55 -04002174 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002175 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002176 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002177 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002178 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002179 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002180 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002181 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002182
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002183 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002184 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002185 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002186 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002187 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002188 out:
2189 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002190 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002191
2192 return len;
2193}
Steven Rostedt659372d2009-09-03 19:11:07 -04002194
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002195int trace_array_vprintk(struct trace_array *tr,
2196 unsigned long ip, const char *fmt, va_list args)
2197{
2198 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2199}
2200
2201int trace_array_printk(struct trace_array *tr,
2202 unsigned long ip, const char *fmt, ...)
2203{
2204 int ret;
2205 va_list ap;
2206
2207 if (!(trace_flags & TRACE_ITER_PRINTK))
2208 return 0;
2209
2210 va_start(ap, fmt);
2211 ret = trace_array_vprintk(tr, ip, fmt, ap);
2212 va_end(ap);
2213 return ret;
2214}
2215
2216int trace_array_printk_buf(struct ring_buffer *buffer,
2217 unsigned long ip, const char *fmt, ...)
2218{
2219 int ret;
2220 va_list ap;
2221
2222 if (!(trace_flags & TRACE_ITER_PRINTK))
2223 return 0;
2224
2225 va_start(ap, fmt);
2226 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2227 va_end(ap);
2228 return ret;
2229}
2230
Steven Rostedt659372d2009-09-03 19:11:07 -04002231int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2232{
Steven Rostedta813a152009-10-09 01:41:35 -04002233 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002234}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002235EXPORT_SYMBOL_GPL(trace_vprintk);
2236
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002237static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002238{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002239 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2240
Steven Rostedt5a90f572008-09-03 17:42:51 -04002241 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002242 if (buf_iter)
2243 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002244}
2245
Ingo Molnare309b412008-05-12 21:20:51 +02002246static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002247peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2248 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002249{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002250 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002251 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002252
Steven Rostedtd7690412008-10-01 00:29:53 -04002253 if (buf_iter)
2254 event = ring_buffer_iter_peek(buf_iter, ts);
2255 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002256 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002257 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002258
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002259 if (event) {
2260 iter->ent_size = ring_buffer_event_length(event);
2261 return ring_buffer_event_data(event);
2262 }
2263 iter->ent_size = 0;
2264 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002265}
Steven Rostedtd7690412008-10-01 00:29:53 -04002266
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002267static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002268__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2269 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002270{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002271 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002272 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002273 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002274 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002275 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002276 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002277 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002278 int cpu;
2279
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002280 /*
2281 * If we are in a per_cpu trace file, don't bother by iterating over
2282 * all cpu and peek directly.
2283 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002284 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002285 if (ring_buffer_empty_cpu(buffer, cpu_file))
2286 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002287 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002288 if (ent_cpu)
2289 *ent_cpu = cpu_file;
2290
2291 return ent;
2292 }
2293
Steven Rostedtab464282008-05-12 21:21:00 +02002294 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002295
2296 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002297 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002298
Steven Rostedtbc21b472010-03-31 19:49:26 -04002299 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002300
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002301 /*
2302 * Pick the entry with the smallest timestamp:
2303 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002304 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002305 next = ent;
2306 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002307 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002308 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002309 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002310 }
2311 }
2312
Steven Rostedt12b5da32012-03-27 10:43:28 -04002313 iter->ent_size = next_size;
2314
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002315 if (ent_cpu)
2316 *ent_cpu = next_cpu;
2317
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002318 if (ent_ts)
2319 *ent_ts = next_ts;
2320
Steven Rostedtbc21b472010-03-31 19:49:26 -04002321 if (missing_events)
2322 *missing_events = next_lost;
2323
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002324 return next;
2325}
2326
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002327/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002328struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2329 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002330{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002331 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002332}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002333
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002334/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002335void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002336{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002337 iter->ent = __find_next_entry(iter, &iter->cpu,
2338 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002339
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002340 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002341 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002342
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002343 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002344}
2345
Ingo Molnare309b412008-05-12 21:20:51 +02002346static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002347{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002348 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002349 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002350}
2351
Ingo Molnare309b412008-05-12 21:20:51 +02002352static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002353{
2354 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002355 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002356 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002357
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002358 WARN_ON_ONCE(iter->leftover);
2359
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002360 (*pos)++;
2361
2362 /* can't go backwards */
2363 if (iter->idx > i)
2364 return NULL;
2365
2366 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002367 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002368 else
2369 ent = iter;
2370
2371 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002372 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002373
2374 iter->pos = *pos;
2375
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002376 return ent;
2377}
2378
Jason Wessel955b61e2010-08-05 09:22:23 -05002379void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002380{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002381 struct ring_buffer_event *event;
2382 struct ring_buffer_iter *buf_iter;
2383 unsigned long entries = 0;
2384 u64 ts;
2385
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002386 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002387
Steven Rostedt6d158a82012-06-27 20:46:14 -04002388 buf_iter = trace_buffer_iter(iter, cpu);
2389 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002390 return;
2391
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002392 ring_buffer_iter_reset(buf_iter);
2393
2394 /*
2395 * We could have the case with the max latency tracers
2396 * that a reset never took place on a cpu. This is evident
2397 * by the timestamp being before the start of the buffer.
2398 */
2399 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002400 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002401 break;
2402 entries++;
2403 ring_buffer_read(buf_iter, NULL);
2404 }
2405
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002406 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002407}
2408
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002409/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002410 * The current tracer is copied to avoid a global locking
2411 * all around.
2412 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002413static void *s_start(struct seq_file *m, loff_t *pos)
2414{
2415 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002416 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002417 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002418 void *p = NULL;
2419 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002420 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002421
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002422 /*
2423 * copy the tracer to avoid using a global lock all around.
2424 * iter->trace is a copy of current_trace, the pointer to the
2425 * name may be used instead of a strcmp(), as iter->trace->name
2426 * will point to the same string as current_trace->name.
2427 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002428 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002429 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2430 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002431 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002432
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002433#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002434 if (iter->snapshot && iter->trace->use_max_tr)
2435 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002436#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002437
2438 if (!iter->snapshot)
2439 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002440
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002441 if (*pos != iter->pos) {
2442 iter->ent = NULL;
2443 iter->cpu = 0;
2444 iter->idx = -1;
2445
Steven Rostedtae3b5092013-01-23 15:22:59 -05002446 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002447 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002448 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002449 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002450 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002451
Lai Jiangshanac91d852010-03-02 17:54:50 +08002452 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002453 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2454 ;
2455
2456 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002457 /*
2458 * If we overflowed the seq_file before, then we want
2459 * to just reuse the trace_seq buffer again.
2460 */
2461 if (iter->leftover)
2462 p = iter;
2463 else {
2464 l = *pos - 1;
2465 p = s_next(m, p, &l);
2466 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002467 }
2468
Lai Jiangshan4f535962009-05-18 19:35:34 +08002469 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002470 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002471 return p;
2472}
2473
2474static void s_stop(struct seq_file *m, void *p)
2475{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002476 struct trace_iterator *iter = m->private;
2477
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002478#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002479 if (iter->snapshot && iter->trace->use_max_tr)
2480 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002481#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002482
2483 if (!iter->snapshot)
2484 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002485
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002486 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002487 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002488}
2489
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002490static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002491get_total_entries(struct trace_buffer *buf,
2492 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002493{
2494 unsigned long count;
2495 int cpu;
2496
2497 *total = 0;
2498 *entries = 0;
2499
2500 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002501 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002502 /*
2503 * If this buffer has skipped entries, then we hold all
2504 * entries for the trace and we need to ignore the
2505 * ones before the time stamp.
2506 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002507 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2508 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002509 /* total is the same as the entries */
2510 *total += count;
2511 } else
2512 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002513 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002514 *entries += count;
2515 }
2516}
2517
Ingo Molnare309b412008-05-12 21:20:51 +02002518static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002519{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002520 seq_puts(m, "# _------=> CPU# \n"
2521 "# / _-----=> irqs-off \n"
2522 "# | / _----=> need-resched \n"
2523 "# || / _---=> hardirq/softirq \n"
2524 "# ||| / _--=> preempt-depth \n"
2525 "# |||| / delay \n"
2526 "# cmd pid ||||| time | caller \n"
2527 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002528}
2529
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002530static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002531{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002532 unsigned long total;
2533 unsigned long entries;
2534
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002535 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002536 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2537 entries, total, num_online_cpus());
2538 seq_puts(m, "#\n");
2539}
2540
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002541static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002542{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002543 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002544 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2545 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002546}
2547
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002548static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002549{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002550 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002551 seq_puts(m, "# _-----=> irqs-off\n"
2552 "# / _----=> need-resched\n"
2553 "# | / _---=> hardirq/softirq\n"
2554 "# || / _--=> preempt-depth\n"
2555 "# ||| / delay\n"
2556 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2557 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002558}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002559
Jiri Olsa62b915f2010-04-02 19:01:22 +02002560void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002561print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2562{
2563 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002564 struct trace_buffer *buf = iter->trace_buffer;
2565 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002566 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002567 unsigned long entries;
2568 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002569 const char *name = "preemption";
2570
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002571 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002572
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002573 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002575 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002576 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002577 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002578 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002579 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002580 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002581 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002582 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002583 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002584 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002585#if defined(CONFIG_PREEMPT_NONE)
2586 "server",
2587#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2588 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002589#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002590 "preempt",
2591#else
2592 "unknown",
2593#endif
2594 /* These are reserved for later use */
2595 0, 0, 0, 0);
2596#ifdef CONFIG_SMP
2597 seq_printf(m, " #P:%d)\n", num_online_cpus());
2598#else
2599 seq_puts(m, ")\n");
2600#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002601 seq_puts(m, "# -----------------\n");
2602 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002603 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002604 data->comm, data->pid,
2605 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002606 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002607 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002608
2609 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002610 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002611 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2612 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002613 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002614 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2615 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002616 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002617 }
2618
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002619 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002620}
2621
Steven Rostedta3097202008-11-07 22:36:02 -05002622static void test_cpu_buff_start(struct trace_iterator *iter)
2623{
2624 struct trace_seq *s = &iter->seq;
2625
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002626 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2627 return;
2628
2629 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2630 return;
2631
Rusty Russell44623442009-01-01 10:12:23 +10302632 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002633 return;
2634
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002635 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002636 return;
2637
Rusty Russell44623442009-01-01 10:12:23 +10302638 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002639
2640 /* Don't print started cpu buffer for the first entry of the trace */
2641 if (iter->idx > 1)
2642 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2643 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002644}
2645
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002646static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002647{
Steven Rostedt214023c2008-05-12 21:20:46 +02002648 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002649 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002650 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002651 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002652
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002653 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002654
Steven Rostedta3097202008-11-07 22:36:02 -05002655 test_cpu_buff_start(iter);
2656
Steven Rostedtf633cef2008-12-23 23:24:13 -05002657 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002658
2659 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002660 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2661 trace_print_lat_context(iter);
2662 else
2663 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002664 }
2665
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002666 if (trace_seq_has_overflowed(s))
2667 return TRACE_TYPE_PARTIAL_LINE;
2668
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002669 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002670 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002671
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002672 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002673
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002674 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002675}
2676
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002677static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002678{
2679 struct trace_seq *s = &iter->seq;
2680 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002681 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002682
2683 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002684
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002685 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2686 trace_seq_printf(s, "%d %d %llu ",
2687 entry->pid, iter->cpu, iter->ts);
2688
2689 if (trace_seq_has_overflowed(s))
2690 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002691
Steven Rostedtf633cef2008-12-23 23:24:13 -05002692 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002693 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002694 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002695
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002696 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002697
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002698 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002699}
2700
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002701static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002702{
2703 struct trace_seq *s = &iter->seq;
2704 unsigned char newline = '\n';
2705 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002706 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002707
2708 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002709
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002710 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002711 SEQ_PUT_HEX_FIELD(s, entry->pid);
2712 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2713 SEQ_PUT_HEX_FIELD(s, iter->ts);
2714 if (trace_seq_has_overflowed(s))
2715 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002716 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002717
Steven Rostedtf633cef2008-12-23 23:24:13 -05002718 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002719 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002720 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002721 if (ret != TRACE_TYPE_HANDLED)
2722 return ret;
2723 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002724
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002725 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002726
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002727 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002728}
2729
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002730static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002731{
2732 struct trace_seq *s = &iter->seq;
2733 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002734 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002735
2736 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002737
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002738 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002739 SEQ_PUT_FIELD(s, entry->pid);
2740 SEQ_PUT_FIELD(s, iter->cpu);
2741 SEQ_PUT_FIELD(s, iter->ts);
2742 if (trace_seq_has_overflowed(s))
2743 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002744 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002745
Steven Rostedtf633cef2008-12-23 23:24:13 -05002746 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002747 return event ? event->funcs->binary(iter, 0, event) :
2748 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002749}
2750
Jiri Olsa62b915f2010-04-02 19:01:22 +02002751int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002752{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002753 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002754 int cpu;
2755
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002756 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002757 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002758 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002759 buf_iter = trace_buffer_iter(iter, cpu);
2760 if (buf_iter) {
2761 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002762 return 0;
2763 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002764 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002765 return 0;
2766 }
2767 return 1;
2768 }
2769
Steven Rostedtab464282008-05-12 21:21:00 +02002770 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002771 buf_iter = trace_buffer_iter(iter, cpu);
2772 if (buf_iter) {
2773 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002774 return 0;
2775 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002776 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002777 return 0;
2778 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002779 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002780
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002781 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002782}
2783
Lai Jiangshan4f535962009-05-18 19:35:34 +08002784/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002785enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002786{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002787 enum print_line_t ret;
2788
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002789 if (iter->lost_events) {
2790 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2791 iter->cpu, iter->lost_events);
2792 if (trace_seq_has_overflowed(&iter->seq))
2793 return TRACE_TYPE_PARTIAL_LINE;
2794 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002795
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002796 if (iter->trace && iter->trace->print_line) {
2797 ret = iter->trace->print_line(iter);
2798 if (ret != TRACE_TYPE_UNHANDLED)
2799 return ret;
2800 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002801
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002802 if (iter->ent->type == TRACE_BPUTS &&
2803 trace_flags & TRACE_ITER_PRINTK &&
2804 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2805 return trace_print_bputs_msg_only(iter);
2806
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002807 if (iter->ent->type == TRACE_BPRINT &&
2808 trace_flags & TRACE_ITER_PRINTK &&
2809 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002810 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002811
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002812 if (iter->ent->type == TRACE_PRINT &&
2813 trace_flags & TRACE_ITER_PRINTK &&
2814 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002815 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002816
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002817 if (trace_flags & TRACE_ITER_BIN)
2818 return print_bin_fmt(iter);
2819
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002820 if (trace_flags & TRACE_ITER_HEX)
2821 return print_hex_fmt(iter);
2822
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002823 if (trace_flags & TRACE_ITER_RAW)
2824 return print_raw_fmt(iter);
2825
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002826 return print_trace_fmt(iter);
2827}
2828
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002829void trace_latency_header(struct seq_file *m)
2830{
2831 struct trace_iterator *iter = m->private;
2832
2833 /* print nothing if the buffers are empty */
2834 if (trace_empty(iter))
2835 return;
2836
2837 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2838 print_trace_header(m, iter);
2839
2840 if (!(trace_flags & TRACE_ITER_VERBOSE))
2841 print_lat_help_header(m);
2842}
2843
Jiri Olsa62b915f2010-04-02 19:01:22 +02002844void trace_default_header(struct seq_file *m)
2845{
2846 struct trace_iterator *iter = m->private;
2847
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002848 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2849 return;
2850
Jiri Olsa62b915f2010-04-02 19:01:22 +02002851 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2852 /* print nothing if the buffers are empty */
2853 if (trace_empty(iter))
2854 return;
2855 print_trace_header(m, iter);
2856 if (!(trace_flags & TRACE_ITER_VERBOSE))
2857 print_lat_help_header(m);
2858 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002859 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2860 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002861 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002862 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002863 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002864 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002865 }
2866}
2867
Steven Rostedte0a413f2011-09-29 21:26:16 -04002868static void test_ftrace_alive(struct seq_file *m)
2869{
2870 if (!ftrace_is_dead())
2871 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002872 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2873 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002874}
2875
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002876#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002877static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002878{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002879 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2880 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2881 "# Takes a snapshot of the main buffer.\n"
2882 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2883 "# (Doesn't have to be '2' works with any number that\n"
2884 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002885}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002886
2887static void show_snapshot_percpu_help(struct seq_file *m)
2888{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002889 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002890#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002891 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2892 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002893#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002894 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2895 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002896#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002897 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2898 "# (Doesn't have to be '2' works with any number that\n"
2899 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002900}
2901
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002902static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2903{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002904 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002905 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002906 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002907 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002908
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002909 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002910 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2911 show_snapshot_main_help(m);
2912 else
2913 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002914}
2915#else
2916/* Should never be called */
2917static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2918#endif
2919
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002920static int s_show(struct seq_file *m, void *v)
2921{
2922 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002923 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002924
2925 if (iter->ent == NULL) {
2926 if (iter->tr) {
2927 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2928 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002929 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002930 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002931 if (iter->snapshot && trace_empty(iter))
2932 print_snapshot_help(m, iter);
2933 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002934 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002935 else
2936 trace_default_header(m);
2937
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002938 } else if (iter->leftover) {
2939 /*
2940 * If we filled the seq_file buffer earlier, we
2941 * want to just show it now.
2942 */
2943 ret = trace_print_seq(m, &iter->seq);
2944
2945 /* ret should this time be zero, but you never know */
2946 iter->leftover = ret;
2947
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002948 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002949 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002950 ret = trace_print_seq(m, &iter->seq);
2951 /*
2952 * If we overflow the seq_file buffer, then it will
2953 * ask us for this data again at start up.
2954 * Use that instead.
2955 * ret is 0 if seq_file write succeeded.
2956 * -1 otherwise.
2957 */
2958 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002959 }
2960
2961 return 0;
2962}
2963
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002964/*
2965 * Should be used after trace_array_get(), trace_types_lock
2966 * ensures that i_cdev was already initialized.
2967 */
2968static inline int tracing_get_cpu(struct inode *inode)
2969{
2970 if (inode->i_cdev) /* See trace_create_cpu_file() */
2971 return (long)inode->i_cdev - 1;
2972 return RING_BUFFER_ALL_CPUS;
2973}
2974
James Morris88e9d342009-09-22 16:43:43 -07002975static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002976 .start = s_start,
2977 .next = s_next,
2978 .stop = s_stop,
2979 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002980};
2981
Ingo Molnare309b412008-05-12 21:20:51 +02002982static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002983__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002984{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002985 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002986 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002987 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002988
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002989 if (tracing_disabled)
2990 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002991
Jiri Olsa50e18b92012-04-25 10:23:39 +02002992 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002993 if (!iter)
2994 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002995
Steven Rostedt6d158a82012-06-27 20:46:14 -04002996 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2997 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002998 if (!iter->buffer_iter)
2999 goto release;
3000
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003001 /*
3002 * We make a copy of the current tracer to avoid concurrent
3003 * changes on it while we are reading.
3004 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003005 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003006 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003007 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003008 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003009
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003010 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003011
Li Zefan79f55992009-06-15 14:58:26 +08003012 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003013 goto fail;
3014
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003015 iter->tr = tr;
3016
3017#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003018 /* Currently only the top directory has a snapshot */
3019 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003020 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003021 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003022#endif
3023 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003024 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003025 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003026 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003027 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003028
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003029 /* Notify the tracer early; before we stop tracing. */
3030 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003031 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003032
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003033 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003034 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003035 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3036
David Sharp8be07092012-11-13 12:18:22 -08003037 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003038 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003039 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3040
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003041 /* stop the trace while dumping if we are not opening "snapshot" */
3042 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003043 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003044
Steven Rostedtae3b5092013-01-23 15:22:59 -05003045 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003046 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003047 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003048 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003049 }
3050 ring_buffer_read_prepare_sync();
3051 for_each_tracing_cpu(cpu) {
3052 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003053 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003054 }
3055 } else {
3056 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003057 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003058 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003059 ring_buffer_read_prepare_sync();
3060 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003061 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003062 }
3063
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003064 mutex_unlock(&trace_types_lock);
3065
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003066 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003067
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003068 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003069 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003070 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003071 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003072release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003073 seq_release_private(inode, file);
3074 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003075}
3076
3077int tracing_open_generic(struct inode *inode, struct file *filp)
3078{
Steven Rostedt60a11772008-05-12 21:20:44 +02003079 if (tracing_disabled)
3080 return -ENODEV;
3081
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003082 filp->private_data = inode->i_private;
3083 return 0;
3084}
3085
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003086bool tracing_is_disabled(void)
3087{
3088 return (tracing_disabled) ? true: false;
3089}
3090
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003091/*
3092 * Open and update trace_array ref count.
3093 * Must have the current trace_array passed to it.
3094 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003095static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003096{
3097 struct trace_array *tr = inode->i_private;
3098
3099 if (tracing_disabled)
3100 return -ENODEV;
3101
3102 if (trace_array_get(tr) < 0)
3103 return -ENODEV;
3104
3105 filp->private_data = inode->i_private;
3106
3107 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003108}
3109
Hannes Eder4fd27352009-02-10 19:44:12 +01003110static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003111{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003112 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003113 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003114 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003115 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003116
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003117 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003118 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003119 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003120 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003121
Oleg Nesterov6484c712013-07-23 17:26:10 +02003122 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003123 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003124 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003125
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003126 for_each_tracing_cpu(cpu) {
3127 if (iter->buffer_iter[cpu])
3128 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3129 }
3130
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003131 if (iter->trace && iter->trace->close)
3132 iter->trace->close(iter);
3133
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003134 if (!iter->snapshot)
3135 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003136 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003137
3138 __trace_array_put(tr);
3139
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003140 mutex_unlock(&trace_types_lock);
3141
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003142 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003143 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003144 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003145 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003146 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003147
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003148 return 0;
3149}
3150
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003151static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3152{
3153 struct trace_array *tr = inode->i_private;
3154
3155 trace_array_put(tr);
3156 return 0;
3157}
3158
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003159static int tracing_single_release_tr(struct inode *inode, struct file *file)
3160{
3161 struct trace_array *tr = inode->i_private;
3162
3163 trace_array_put(tr);
3164
3165 return single_release(inode, file);
3166}
3167
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003168static int tracing_open(struct inode *inode, struct file *file)
3169{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003170 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003171 struct trace_iterator *iter;
3172 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003173
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003174 if (trace_array_get(tr) < 0)
3175 return -ENODEV;
3176
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003177 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003178 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3179 int cpu = tracing_get_cpu(inode);
3180
3181 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003182 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003183 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003184 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003185 }
3186
3187 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003188 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003189 if (IS_ERR(iter))
3190 ret = PTR_ERR(iter);
3191 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3192 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3193 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003194
3195 if (ret < 0)
3196 trace_array_put(tr);
3197
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003198 return ret;
3199}
3200
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003201/*
3202 * Some tracers are not suitable for instance buffers.
3203 * A tracer is always available for the global array (toplevel)
3204 * or if it explicitly states that it is.
3205 */
3206static bool
3207trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3208{
3209 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3210}
3211
3212/* Find the next tracer that this trace array may use */
3213static struct tracer *
3214get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3215{
3216 while (t && !trace_ok_for_array(t, tr))
3217 t = t->next;
3218
3219 return t;
3220}
3221
Ingo Molnare309b412008-05-12 21:20:51 +02003222static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003223t_next(struct seq_file *m, void *v, loff_t *pos)
3224{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003225 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003226 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003227
3228 (*pos)++;
3229
3230 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003231 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003232
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003233 return t;
3234}
3235
3236static void *t_start(struct seq_file *m, loff_t *pos)
3237{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003238 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003239 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003240 loff_t l = 0;
3241
3242 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003243
3244 t = get_tracer_for_array(tr, trace_types);
3245 for (; t && l < *pos; t = t_next(m, t, &l))
3246 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003247
3248 return t;
3249}
3250
3251static void t_stop(struct seq_file *m, void *p)
3252{
3253 mutex_unlock(&trace_types_lock);
3254}
3255
3256static int t_show(struct seq_file *m, void *v)
3257{
3258 struct tracer *t = v;
3259
3260 if (!t)
3261 return 0;
3262
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003263 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003264 if (t->next)
3265 seq_putc(m, ' ');
3266 else
3267 seq_putc(m, '\n');
3268
3269 return 0;
3270}
3271
James Morris88e9d342009-09-22 16:43:43 -07003272static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003273 .start = t_start,
3274 .next = t_next,
3275 .stop = t_stop,
3276 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003277};
3278
3279static int show_traces_open(struct inode *inode, struct file *file)
3280{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003281 struct trace_array *tr = inode->i_private;
3282 struct seq_file *m;
3283 int ret;
3284
Steven Rostedt60a11772008-05-12 21:20:44 +02003285 if (tracing_disabled)
3286 return -ENODEV;
3287
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003288 ret = seq_open(file, &show_traces_seq_ops);
3289 if (ret)
3290 return ret;
3291
3292 m = file->private_data;
3293 m->private = tr;
3294
3295 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003296}
3297
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003298static ssize_t
3299tracing_write_stub(struct file *filp, const char __user *ubuf,
3300 size_t count, loff_t *ppos)
3301{
3302 return count;
3303}
3304
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003305loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003306{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003307 int ret;
3308
Slava Pestov364829b2010-11-24 15:13:16 -08003309 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003310 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003311 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003312 file->f_pos = ret = 0;
3313
3314 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003315}
3316
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003317static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003318 .open = tracing_open,
3319 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003320 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003321 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003322 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003323};
3324
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003325static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003326 .open = show_traces_open,
3327 .read = seq_read,
3328 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003329 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003330};
3331
Ingo Molnar36dfe922008-05-12 21:20:52 +02003332/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003333 * The tracer itself will not take this lock, but still we want
3334 * to provide a consistent cpumask to user-space:
3335 */
3336static DEFINE_MUTEX(tracing_cpumask_update_lock);
3337
3338/*
3339 * Temporary storage for the character representation of the
3340 * CPU bitmask (and one more byte for the newline):
3341 */
3342static char mask_str[NR_CPUS + 1];
3343
Ingo Molnarc7078de2008-05-12 21:20:52 +02003344static ssize_t
3345tracing_cpumask_read(struct file *filp, char __user *ubuf,
3346 size_t count, loff_t *ppos)
3347{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003348 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003349 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003350
3351 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003352
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003353 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003354 if (count - len < 2) {
3355 count = -EINVAL;
3356 goto out_err;
3357 }
3358 len += sprintf(mask_str + len, "\n");
3359 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3360
3361out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003362 mutex_unlock(&tracing_cpumask_update_lock);
3363
3364 return count;
3365}
3366
3367static ssize_t
3368tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3369 size_t count, loff_t *ppos)
3370{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003371 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303372 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003373 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303374
3375 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3376 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003377
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303378 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003379 if (err)
3380 goto err_unlock;
3381
Li Zefan215368e2009-06-15 10:56:42 +08003382 mutex_lock(&tracing_cpumask_update_lock);
3383
Steven Rostedta5e25882008-12-02 15:34:05 -05003384 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003385 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003386 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003387 /*
3388 * Increase/decrease the disabled counter if we are
3389 * about to flip a bit in the cpumask:
3390 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003391 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303392 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003393 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3394 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003395 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003396 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303397 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003398 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3399 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003400 }
3401 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003402 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003403 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003404
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003405 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003406
Ingo Molnarc7078de2008-05-12 21:20:52 +02003407 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303408 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003409
Ingo Molnarc7078de2008-05-12 21:20:52 +02003410 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003411
3412err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003413 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003414
3415 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003416}
3417
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003418static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003419 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003420 .read = tracing_cpumask_read,
3421 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003422 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003423 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003424};
3425
Li Zefanfdb372e2009-12-08 11:15:59 +08003426static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003427{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003428 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003429 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003430 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003431 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003432
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003433 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003434 tracer_flags = tr->current_trace->flags->val;
3435 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003436
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003437 for (i = 0; trace_options[i]; i++) {
3438 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003439 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003440 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003441 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003442 }
3443
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003444 for (i = 0; trace_opts[i].name; i++) {
3445 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003446 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003447 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003448 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003449 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003450 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003451
Li Zefanfdb372e2009-12-08 11:15:59 +08003452 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003453}
3454
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003455static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003456 struct tracer_flags *tracer_flags,
3457 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003458{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003459 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003460 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003461
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003462 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003463 if (ret)
3464 return ret;
3465
3466 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003467 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003468 else
Zhaolei77708412009-08-07 18:53:21 +08003469 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003470 return 0;
3471}
3472
Li Zefan8d18eaa2009-12-08 11:17:06 +08003473/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003474static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003475{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003476 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003477 struct tracer_flags *tracer_flags = trace->flags;
3478 struct tracer_opt *opts = NULL;
3479 int i;
3480
3481 for (i = 0; tracer_flags->opts[i].name; i++) {
3482 opts = &tracer_flags->opts[i];
3483
3484 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003485 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003486 }
3487
3488 return -EINVAL;
3489}
3490
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003491/* Some tracers require overwrite to stay enabled */
3492int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3493{
3494 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3495 return -1;
3496
3497 return 0;
3498}
3499
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003500int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003501{
3502 /* do nothing if flag is already set */
3503 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003504 return 0;
3505
3506 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003507 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003508 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003509 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003510
3511 if (enabled)
3512 trace_flags |= mask;
3513 else
3514 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003515
3516 if (mask == TRACE_ITER_RECORD_CMD)
3517 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003518
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003519 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003520 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003521#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003522 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003523#endif
3524 }
Steven Rostedt81698832012-10-11 10:15:05 -04003525
3526 if (mask == TRACE_ITER_PRINTK)
3527 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003528
3529 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003530}
3531
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003532static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003533{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003534 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003535 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003536 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003537 int i;
3538
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003539 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003540
Li Zefan8d18eaa2009-12-08 11:17:06 +08003541 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003542 neg = 1;
3543 cmp += 2;
3544 }
3545
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003546 mutex_lock(&trace_types_lock);
3547
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003548 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003549 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003550 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003551 break;
3552 }
3553 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003554
3555 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003556 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003557 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003558
3559 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003560
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003561 return ret;
3562}
3563
3564static ssize_t
3565tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3566 size_t cnt, loff_t *ppos)
3567{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003568 struct seq_file *m = filp->private_data;
3569 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003570 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003571 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003572
3573 if (cnt >= sizeof(buf))
3574 return -EINVAL;
3575
3576 if (copy_from_user(&buf, ubuf, cnt))
3577 return -EFAULT;
3578
Steven Rostedta8dd2172013-01-09 20:54:17 -05003579 buf[cnt] = 0;
3580
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003581 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003582 if (ret < 0)
3583 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003584
Jiri Olsacf8517c2009-10-23 19:36:16 -04003585 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003586
3587 return cnt;
3588}
3589
Li Zefanfdb372e2009-12-08 11:15:59 +08003590static int tracing_trace_options_open(struct inode *inode, struct file *file)
3591{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003592 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003593 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003594
Li Zefanfdb372e2009-12-08 11:15:59 +08003595 if (tracing_disabled)
3596 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003597
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003598 if (trace_array_get(tr) < 0)
3599 return -ENODEV;
3600
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003601 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3602 if (ret < 0)
3603 trace_array_put(tr);
3604
3605 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003606}
3607
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003608static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003609 .open = tracing_trace_options_open,
3610 .read = seq_read,
3611 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003612 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003613 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003614};
3615
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003616static const char readme_msg[] =
3617 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003618 "# echo 0 > tracing_on : quick way to disable tracing\n"
3619 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3620 " Important files:\n"
3621 " trace\t\t\t- The static contents of the buffer\n"
3622 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3623 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3624 " current_tracer\t- function and latency tracers\n"
3625 " available_tracers\t- list of configured tracers for current_tracer\n"
3626 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3627 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3628 " trace_clock\t\t-change the clock used to order events\n"
3629 " local: Per cpu clock but may not be synced across CPUs\n"
3630 " global: Synced across CPUs but slows tracing down.\n"
3631 " counter: Not a clock, but just an increment\n"
3632 " uptime: Jiffy counter from time of boot\n"
3633 " perf: Same clock that perf events use\n"
3634#ifdef CONFIG_X86_64
3635 " x86-tsc: TSC cycle counter\n"
3636#endif
3637 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3638 " tracing_cpumask\t- Limit which CPUs to trace\n"
3639 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3640 "\t\t\t Remove sub-buffer with rmdir\n"
3641 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003642 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3643 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003644 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003645#ifdef CONFIG_DYNAMIC_FTRACE
3646 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003647 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3648 "\t\t\t functions\n"
3649 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3650 "\t modules: Can select a group via module\n"
3651 "\t Format: :mod:<module-name>\n"
3652 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3653 "\t triggers: a command to perform when function is hit\n"
3654 "\t Format: <function>:<trigger>[:count]\n"
3655 "\t trigger: traceon, traceoff\n"
3656 "\t\t enable_event:<system>:<event>\n"
3657 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003658#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003659 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003660#endif
3661#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003662 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003663#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003664 "\t\t dump\n"
3665 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003666 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3667 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3668 "\t The first one will disable tracing every time do_fault is hit\n"
3669 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3670 "\t The first time do trap is hit and it disables tracing, the\n"
3671 "\t counter will decrement to 2. If tracing is already disabled,\n"
3672 "\t the counter will not decrement. It only decrements when the\n"
3673 "\t trigger did work\n"
3674 "\t To remove trigger without count:\n"
3675 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3676 "\t To remove trigger with a count:\n"
3677 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003678 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003679 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3680 "\t modules: Can select a group via module command :mod:\n"
3681 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003682#endif /* CONFIG_DYNAMIC_FTRACE */
3683#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003684 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3685 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003686#endif
3687#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3688 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003689 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003690 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3691#endif
3692#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003693 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3694 "\t\t\t snapshot buffer. Read the contents for more\n"
3695 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003696#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003697#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003698 " stack_trace\t\t- Shows the max stack trace when active\n"
3699 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003700 "\t\t\t Write into this file to reset the max size (trigger a\n"
3701 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003702#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003703 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3704 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003705#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003706#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003707 " events/\t\t- Directory containing all trace event subsystems:\n"
3708 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3709 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003710 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3711 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003712 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003713 " events/<system>/<event>/\t- Directory containing control files for\n"
3714 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003715 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3716 " filter\t\t- If set, only events passing filter are traced\n"
3717 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003718 "\t Format: <trigger>[:count][if <filter>]\n"
3719 "\t trigger: traceon, traceoff\n"
3720 "\t enable_event:<system>:<event>\n"
3721 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003722#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003723 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003724#endif
3725#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003726 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003727#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003728 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3729 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3730 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3731 "\t events/block/block_unplug/trigger\n"
3732 "\t The first disables tracing every time block_unplug is hit.\n"
3733 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3734 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3735 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3736 "\t Like function triggers, the counter is only decremented if it\n"
3737 "\t enabled or disabled tracing.\n"
3738 "\t To remove a trigger without a count:\n"
3739 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3740 "\t To remove a trigger with a count:\n"
3741 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3742 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003743;
3744
3745static ssize_t
3746tracing_readme_read(struct file *filp, char __user *ubuf,
3747 size_t cnt, loff_t *ppos)
3748{
3749 return simple_read_from_buffer(ubuf, cnt, ppos,
3750 readme_msg, strlen(readme_msg));
3751}
3752
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003753static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003754 .open = tracing_open_generic,
3755 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003756 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003757};
3758
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003759static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003760{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003761 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003762
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003763 if (*pos || m->count)
3764 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003765
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003766 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003767
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003768 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3769 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003770 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003771 continue;
3772
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003773 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003774 }
3775
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003776 return NULL;
3777}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003778
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003779static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3780{
3781 void *v;
3782 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003783
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003784 preempt_disable();
3785 arch_spin_lock(&trace_cmdline_lock);
3786
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003787 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003788 while (l <= *pos) {
3789 v = saved_cmdlines_next(m, v, &l);
3790 if (!v)
3791 return NULL;
3792 }
3793
3794 return v;
3795}
3796
3797static void saved_cmdlines_stop(struct seq_file *m, void *v)
3798{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003799 arch_spin_unlock(&trace_cmdline_lock);
3800 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003801}
3802
3803static int saved_cmdlines_show(struct seq_file *m, void *v)
3804{
3805 char buf[TASK_COMM_LEN];
3806 unsigned int *pid = v;
3807
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003808 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003809 seq_printf(m, "%d %s\n", *pid, buf);
3810 return 0;
3811}
3812
3813static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3814 .start = saved_cmdlines_start,
3815 .next = saved_cmdlines_next,
3816 .stop = saved_cmdlines_stop,
3817 .show = saved_cmdlines_show,
3818};
3819
3820static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3821{
3822 if (tracing_disabled)
3823 return -ENODEV;
3824
3825 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003826}
3827
3828static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003829 .open = tracing_saved_cmdlines_open,
3830 .read = seq_read,
3831 .llseek = seq_lseek,
3832 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003833};
3834
3835static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003836tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3837 size_t cnt, loff_t *ppos)
3838{
3839 char buf[64];
3840 int r;
3841
3842 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003843 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003844 arch_spin_unlock(&trace_cmdline_lock);
3845
3846 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3847}
3848
3849static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3850{
3851 kfree(s->saved_cmdlines);
3852 kfree(s->map_cmdline_to_pid);
3853 kfree(s);
3854}
3855
3856static int tracing_resize_saved_cmdlines(unsigned int val)
3857{
3858 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3859
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003860 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003861 if (!s)
3862 return -ENOMEM;
3863
3864 if (allocate_cmdlines_buffer(val, s) < 0) {
3865 kfree(s);
3866 return -ENOMEM;
3867 }
3868
3869 arch_spin_lock(&trace_cmdline_lock);
3870 savedcmd_temp = savedcmd;
3871 savedcmd = s;
3872 arch_spin_unlock(&trace_cmdline_lock);
3873 free_saved_cmdlines_buffer(savedcmd_temp);
3874
3875 return 0;
3876}
3877
3878static ssize_t
3879tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3880 size_t cnt, loff_t *ppos)
3881{
3882 unsigned long val;
3883 int ret;
3884
3885 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3886 if (ret)
3887 return ret;
3888
3889 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3890 if (!val || val > PID_MAX_DEFAULT)
3891 return -EINVAL;
3892
3893 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3894 if (ret < 0)
3895 return ret;
3896
3897 *ppos += cnt;
3898
3899 return cnt;
3900}
3901
3902static const struct file_operations tracing_saved_cmdlines_size_fops = {
3903 .open = tracing_open_generic,
3904 .read = tracing_saved_cmdlines_size_read,
3905 .write = tracing_saved_cmdlines_size_write,
3906};
3907
3908static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003909tracing_set_trace_read(struct file *filp, char __user *ubuf,
3910 size_t cnt, loff_t *ppos)
3911{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003912 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003913 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003914 int r;
3915
3916 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003917 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003918 mutex_unlock(&trace_types_lock);
3919
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003920 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003921}
3922
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003923int tracer_init(struct tracer *t, struct trace_array *tr)
3924{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003925 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003926 return t->init(tr);
3927}
3928
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003929static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003930{
3931 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003932
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003933 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003934 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003935}
3936
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003937#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003938/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003939static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3940 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003941{
3942 int cpu, ret = 0;
3943
3944 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3945 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003946 ret = ring_buffer_resize(trace_buf->buffer,
3947 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003948 if (ret < 0)
3949 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003950 per_cpu_ptr(trace_buf->data, cpu)->entries =
3951 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003952 }
3953 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003954 ret = ring_buffer_resize(trace_buf->buffer,
3955 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003956 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003957 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3958 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003959 }
3960
3961 return ret;
3962}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003963#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003964
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003965static int __tracing_resize_ring_buffer(struct trace_array *tr,
3966 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003967{
3968 int ret;
3969
3970 /*
3971 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003972 * we use the size that was given, and we can forget about
3973 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003974 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003975 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003976
Steven Rostedtb382ede62012-10-10 21:44:34 -04003977 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003978 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003979 return 0;
3980
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003981 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003982 if (ret < 0)
3983 return ret;
3984
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003985#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003986 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3987 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003988 goto out;
3989
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003990 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003991 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003992 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3993 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003994 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003995 /*
3996 * AARGH! We are left with different
3997 * size max buffer!!!!
3998 * The max buffer is our "snapshot" buffer.
3999 * When a tracer needs a snapshot (one of the
4000 * latency tracers), it swaps the max buffer
4001 * with the saved snap shot. We succeeded to
4002 * update the size of the main buffer, but failed to
4003 * update the size of the max buffer. But when we tried
4004 * to reset the main buffer to the original size, we
4005 * failed there too. This is very unlikely to
4006 * happen, but if it does, warn and kill all
4007 * tracing.
4008 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004009 WARN_ON(1);
4010 tracing_disabled = 1;
4011 }
4012 return ret;
4013 }
4014
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004015 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004016 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004017 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004018 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004019
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004020 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004021#endif /* CONFIG_TRACER_MAX_TRACE */
4022
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004023 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004024 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004025 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004026 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004027
4028 return ret;
4029}
4030
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004031static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4032 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004033{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004034 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004035
4036 mutex_lock(&trace_types_lock);
4037
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004038 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4039 /* make sure, this cpu is enabled in the mask */
4040 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4041 ret = -EINVAL;
4042 goto out;
4043 }
4044 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004045
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004046 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004047 if (ret < 0)
4048 ret = -ENOMEM;
4049
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004050out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004051 mutex_unlock(&trace_types_lock);
4052
4053 return ret;
4054}
4055
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004056
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004057/**
4058 * tracing_update_buffers - used by tracing facility to expand ring buffers
4059 *
4060 * To save on memory when the tracing is never used on a system with it
4061 * configured in. The ring buffers are set to a minimum size. But once
4062 * a user starts to use the tracing facility, then they need to grow
4063 * to their default size.
4064 *
4065 * This function is to be called when a tracer is about to be used.
4066 */
4067int tracing_update_buffers(void)
4068{
4069 int ret = 0;
4070
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004071 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004072 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004073 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004074 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004075 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004076
4077 return ret;
4078}
4079
Steven Rostedt577b7852009-02-26 23:43:05 -05004080struct trace_option_dentry;
4081
4082static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004083create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004084
4085static void
4086destroy_trace_option_files(struct trace_option_dentry *topts);
4087
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004088/*
4089 * Used to clear out the tracer before deletion of an instance.
4090 * Must have trace_types_lock held.
4091 */
4092static void tracing_set_nop(struct trace_array *tr)
4093{
4094 if (tr->current_trace == &nop_trace)
4095 return;
4096
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004097 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004098
4099 if (tr->current_trace->reset)
4100 tr->current_trace->reset(tr);
4101
4102 tr->current_trace = &nop_trace;
4103}
4104
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004105static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004106{
Steven Rostedt577b7852009-02-26 23:43:05 -05004107 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004108 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004109#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004110 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004111#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004112 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004113
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004114 mutex_lock(&trace_types_lock);
4115
Steven Rostedt73c51622009-03-11 13:42:01 -04004116 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004117 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004118 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004119 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004120 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004121 ret = 0;
4122 }
4123
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004124 for (t = trace_types; t; t = t->next) {
4125 if (strcmp(t->name, buf) == 0)
4126 break;
4127 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004128 if (!t) {
4129 ret = -EINVAL;
4130 goto out;
4131 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004132 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004133 goto out;
4134
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004135 /* Some tracers are only allowed for the top level buffer */
4136 if (!trace_ok_for_array(t, tr)) {
4137 ret = -EINVAL;
4138 goto out;
4139 }
4140
Steven Rostedt9f029e82008-11-12 15:24:24 -05004141 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004142
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004143 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004144
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004145 if (tr->current_trace->reset)
4146 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004147
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004148 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004149 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004150
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004151#ifdef CONFIG_TRACER_MAX_TRACE
4152 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004153
4154 if (had_max_tr && !t->use_max_tr) {
4155 /*
4156 * We need to make sure that the update_max_tr sees that
4157 * current_trace changed to nop_trace to keep it from
4158 * swapping the buffers after we resize it.
4159 * The update_max_tr is called from interrupts disabled
4160 * so a synchronized_sched() is sufficient.
4161 */
4162 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004163 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004164 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004165#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004166 /* Currently, only the top instance has options */
4167 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4168 destroy_trace_option_files(topts);
4169 topts = create_trace_option_files(tr, t);
4170 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004171
4172#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004173 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004174 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004175 if (ret < 0)
4176 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004177 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004178#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004179
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004180 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004181 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004182 if (ret)
4183 goto out;
4184 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004185
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004186 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004187 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004188 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004189 out:
4190 mutex_unlock(&trace_types_lock);
4191
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004192 return ret;
4193}
4194
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004195static ssize_t
4196tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4197 size_t cnt, loff_t *ppos)
4198{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004199 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004200 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004201 int i;
4202 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004203 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004204
Steven Rostedt60063a62008-10-28 10:44:24 -04004205 ret = cnt;
4206
Li Zefanee6c2c12009-09-18 14:06:47 +08004207 if (cnt > MAX_TRACER_SIZE)
4208 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004209
4210 if (copy_from_user(&buf, ubuf, cnt))
4211 return -EFAULT;
4212
4213 buf[cnt] = 0;
4214
4215 /* strip ending whitespace. */
4216 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4217 buf[i] = 0;
4218
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004219 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004220 if (err)
4221 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004222
Jiri Olsacf8517c2009-10-23 19:36:16 -04004223 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004224
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004225 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004226}
4227
4228static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004229tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4230 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004231{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004232 char buf[64];
4233 int r;
4234
Steven Rostedtcffae432008-05-12 21:21:00 +02004235 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004236 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004237 if (r > sizeof(buf))
4238 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004239 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004240}
4241
4242static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004243tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4244 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004245{
Hannes Eder5e398412009-02-10 19:44:34 +01004246 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004247 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004248
Peter Huewe22fe9b52011-06-07 21:58:27 +02004249 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4250 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004251 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004252
4253 *ptr = val * 1000;
4254
4255 return cnt;
4256}
4257
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004258static ssize_t
4259tracing_thresh_read(struct file *filp, char __user *ubuf,
4260 size_t cnt, loff_t *ppos)
4261{
4262 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4263}
4264
4265static ssize_t
4266tracing_thresh_write(struct file *filp, const char __user *ubuf,
4267 size_t cnt, loff_t *ppos)
4268{
4269 struct trace_array *tr = filp->private_data;
4270 int ret;
4271
4272 mutex_lock(&trace_types_lock);
4273 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4274 if (ret < 0)
4275 goto out;
4276
4277 if (tr->current_trace->update_thresh) {
4278 ret = tr->current_trace->update_thresh(tr);
4279 if (ret < 0)
4280 goto out;
4281 }
4282
4283 ret = cnt;
4284out:
4285 mutex_unlock(&trace_types_lock);
4286
4287 return ret;
4288}
4289
4290static ssize_t
4291tracing_max_lat_read(struct file *filp, char __user *ubuf,
4292 size_t cnt, loff_t *ppos)
4293{
4294 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4295}
4296
4297static ssize_t
4298tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4299 size_t cnt, loff_t *ppos)
4300{
4301 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4302}
4303
Steven Rostedtb3806b42008-05-12 21:20:46 +02004304static int tracing_open_pipe(struct inode *inode, struct file *filp)
4305{
Oleg Nesterov15544202013-07-23 17:25:57 +02004306 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004307 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004308 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004309
4310 if (tracing_disabled)
4311 return -ENODEV;
4312
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004313 if (trace_array_get(tr) < 0)
4314 return -ENODEV;
4315
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004316 mutex_lock(&trace_types_lock);
4317
Steven Rostedtb3806b42008-05-12 21:20:46 +02004318 /* create a buffer to store the information to pass to userspace */
4319 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004320 if (!iter) {
4321 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004322 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004323 goto out;
4324 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004325
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004326 /*
4327 * We make a copy of the current tracer to avoid concurrent
4328 * changes on it while we are reading.
4329 */
4330 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4331 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004332 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004333 goto fail;
4334 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004335 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004336
4337 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4338 ret = -ENOMEM;
4339 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304340 }
4341
Steven Rostedta3097202008-11-07 22:36:02 -05004342 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304343 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004344
Steven Rostedt112f38a72009-06-01 15:16:05 -04004345 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4346 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4347
David Sharp8be07092012-11-13 12:18:22 -08004348 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004349 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004350 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4351
Oleg Nesterov15544202013-07-23 17:25:57 +02004352 iter->tr = tr;
4353 iter->trace_buffer = &tr->trace_buffer;
4354 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004355 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004356 filp->private_data = iter;
4357
Steven Rostedt107bad82008-05-12 21:21:01 +02004358 if (iter->trace->pipe_open)
4359 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004360
Arnd Bergmannb4447862010-07-07 23:40:11 +02004361 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004362out:
4363 mutex_unlock(&trace_types_lock);
4364 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004365
4366fail:
4367 kfree(iter->trace);
4368 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004369 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004370 mutex_unlock(&trace_types_lock);
4371 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004372}
4373
4374static int tracing_release_pipe(struct inode *inode, struct file *file)
4375{
4376 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004377 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004378
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004379 mutex_lock(&trace_types_lock);
4380
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004381 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004382 iter->trace->pipe_close(iter);
4383
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004384 mutex_unlock(&trace_types_lock);
4385
Rusty Russell44623442009-01-01 10:12:23 +10304386 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004387 mutex_destroy(&iter->mutex);
4388 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004389 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004390
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004391 trace_array_put(tr);
4392
Steven Rostedtb3806b42008-05-12 21:20:46 +02004393 return 0;
4394}
4395
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004396static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004397trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004398{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004399 /* Iterators are static, they should be filled or empty */
4400 if (trace_buffer_iter(iter, iter->cpu_file))
4401 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004402
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004403 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004404 /*
4405 * Always select as readable when in blocking mode
4406 */
4407 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004408 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004409 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004410 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004411}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004412
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004413static unsigned int
4414tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4415{
4416 struct trace_iterator *iter = filp->private_data;
4417
4418 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004419}
4420
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004421/* Must be called with trace_types_lock mutex held. */
4422static int tracing_wait_pipe(struct file *filp)
4423{
4424 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004425 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004426
4427 while (trace_empty(iter)) {
4428
4429 if ((filp->f_flags & O_NONBLOCK)) {
4430 return -EAGAIN;
4431 }
4432
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004433 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004434 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004435 * We still block if tracing is disabled, but we have never
4436 * read anything. This allows a user to cat this file, and
4437 * then enable tracing. But after we have read something,
4438 * we give an EOF when tracing is again disabled.
4439 *
4440 * iter->pos will be 0 if we haven't read anything.
4441 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004442 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004443 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004444
4445 mutex_unlock(&iter->mutex);
4446
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004447 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004448
4449 mutex_lock(&iter->mutex);
4450
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004451 if (ret)
4452 return ret;
4453
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004454 if (signal_pending(current))
4455 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004456 }
4457
4458 return 1;
4459}
4460
Steven Rostedtb3806b42008-05-12 21:20:46 +02004461/*
4462 * Consumer reader.
4463 */
4464static ssize_t
4465tracing_read_pipe(struct file *filp, char __user *ubuf,
4466 size_t cnt, loff_t *ppos)
4467{
4468 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004469 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004470 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004471
4472 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004473 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4474 if (sret != -EBUSY)
4475 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004476
Steven Rostedtf9520752009-03-02 14:04:40 -05004477 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004478
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004479 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004480 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004481 if (unlikely(iter->trace->name != tr->current_trace->name))
4482 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004483 mutex_unlock(&trace_types_lock);
4484
4485 /*
4486 * Avoid more than one consumer on a single file descriptor
4487 * This is just a matter of traces coherency, the ring buffer itself
4488 * is protected.
4489 */
4490 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004491 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004492 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4493 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004494 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004495 }
4496
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004497waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004498 sret = tracing_wait_pipe(filp);
4499 if (sret <= 0)
4500 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004501
4502 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004503 if (trace_empty(iter)) {
4504 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004505 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004506 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004507
4508 if (cnt >= PAGE_SIZE)
4509 cnt = PAGE_SIZE - 1;
4510
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004511 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004512 memset(&iter->seq, 0,
4513 sizeof(struct trace_iterator) -
4514 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004515 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004516 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004517
Lai Jiangshan4f535962009-05-18 19:35:34 +08004518 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004519 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004520 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004521 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004522 int len = iter->seq.len;
4523
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004524 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004525 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004526 /* don't print partial lines */
4527 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004528 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004529 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004530 if (ret != TRACE_TYPE_NO_CONSUME)
4531 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004532
4533 if (iter->seq.len >= cnt)
4534 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004535
4536 /*
4537 * Setting the full flag means we reached the trace_seq buffer
4538 * size and we should leave by partial output condition above.
4539 * One of the trace_seq_* functions is not used properly.
4540 */
4541 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4542 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004543 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004544 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004545 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004546
Steven Rostedtb3806b42008-05-12 21:20:46 +02004547 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004548 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4549 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004550 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004551
4552 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004553 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004554 * entries, go back to wait for more entries.
4555 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004556 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004557 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004558
Steven Rostedt107bad82008-05-12 21:21:01 +02004559out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004560 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004561
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004562 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004563}
4564
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004565static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4566 unsigned int idx)
4567{
4568 __free_page(spd->pages[idx]);
4569}
4570
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004571static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004572 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004573 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004574 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004575 .steal = generic_pipe_buf_steal,
4576 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004577};
4578
Steven Rostedt34cd4992009-02-09 12:06:29 -05004579static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004580tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004581{
4582 size_t count;
4583 int ret;
4584
4585 /* Seq buffer is page-sized, exactly what we need. */
4586 for (;;) {
4587 count = iter->seq.len;
4588 ret = print_trace_line(iter);
4589 count = iter->seq.len - count;
4590 if (rem < count) {
4591 rem = 0;
4592 iter->seq.len -= count;
4593 break;
4594 }
4595 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4596 iter->seq.len -= count;
4597 break;
4598 }
4599
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004600 if (ret != TRACE_TYPE_NO_CONSUME)
4601 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004602 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004603 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004604 rem = 0;
4605 iter->ent = NULL;
4606 break;
4607 }
4608 }
4609
4610 return rem;
4611}
4612
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004613static ssize_t tracing_splice_read_pipe(struct file *filp,
4614 loff_t *ppos,
4615 struct pipe_inode_info *pipe,
4616 size_t len,
4617 unsigned int flags)
4618{
Jens Axboe35f3d142010-05-20 10:43:18 +02004619 struct page *pages_def[PIPE_DEF_BUFFERS];
4620 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004621 struct trace_iterator *iter = filp->private_data;
4622 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004623 .pages = pages_def,
4624 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004625 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004626 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004627 .flags = flags,
4628 .ops = &tracing_pipe_buf_ops,
4629 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004630 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004631 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004632 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004633 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004634 unsigned int i;
4635
Jens Axboe35f3d142010-05-20 10:43:18 +02004636 if (splice_grow_spd(pipe, &spd))
4637 return -ENOMEM;
4638
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004639 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004640 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004641 if (unlikely(iter->trace->name != tr->current_trace->name))
4642 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004643 mutex_unlock(&trace_types_lock);
4644
4645 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004646
4647 if (iter->trace->splice_read) {
4648 ret = iter->trace->splice_read(iter, filp,
4649 ppos, pipe, len, flags);
4650 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004651 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004652 }
4653
4654 ret = tracing_wait_pipe(filp);
4655 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004656 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004657
Jason Wessel955b61e2010-08-05 09:22:23 -05004658 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004659 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004660 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004661 }
4662
Lai Jiangshan4f535962009-05-18 19:35:34 +08004663 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004664 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004665
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004666 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004667 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004668 spd.pages[i] = alloc_page(GFP_KERNEL);
4669 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004670 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004671
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004672 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004673
4674 /* Copy the data into the page, so we can start over. */
4675 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004676 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004677 iter->seq.len);
4678 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004679 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004680 break;
4681 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004682 spd.partial[i].offset = 0;
4683 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004684
Steven Rostedtf9520752009-03-02 14:04:40 -05004685 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004686 }
4687
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004688 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004689 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004690 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004691
4692 spd.nr_pages = i;
4693
Jens Axboe35f3d142010-05-20 10:43:18 +02004694 ret = splice_to_pipe(pipe, &spd);
4695out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004696 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004697 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004698
Steven Rostedt34cd4992009-02-09 12:06:29 -05004699out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004700 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004701 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004702}
4703
Steven Rostedta98a3c32008-05-12 21:20:59 +02004704static ssize_t
4705tracing_entries_read(struct file *filp, char __user *ubuf,
4706 size_t cnt, loff_t *ppos)
4707{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004708 struct inode *inode = file_inode(filp);
4709 struct trace_array *tr = inode->i_private;
4710 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004711 char buf[64];
4712 int r = 0;
4713 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004714
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004715 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004716
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004717 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004718 int cpu, buf_size_same;
4719 unsigned long size;
4720
4721 size = 0;
4722 buf_size_same = 1;
4723 /* check if all cpu sizes are same */
4724 for_each_tracing_cpu(cpu) {
4725 /* fill in the size from first enabled cpu */
4726 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004727 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4728 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004729 buf_size_same = 0;
4730 break;
4731 }
4732 }
4733
4734 if (buf_size_same) {
4735 if (!ring_buffer_expanded)
4736 r = sprintf(buf, "%lu (expanded: %lu)\n",
4737 size >> 10,
4738 trace_buf_size >> 10);
4739 else
4740 r = sprintf(buf, "%lu\n", size >> 10);
4741 } else
4742 r = sprintf(buf, "X\n");
4743 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004744 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004745
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004746 mutex_unlock(&trace_types_lock);
4747
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004748 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4749 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004750}
4751
4752static ssize_t
4753tracing_entries_write(struct file *filp, const char __user *ubuf,
4754 size_t cnt, loff_t *ppos)
4755{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004756 struct inode *inode = file_inode(filp);
4757 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004758 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004759 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004760
Peter Huewe22fe9b52011-06-07 21:58:27 +02004761 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4762 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004763 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004764
4765 /* must have at least 1 entry */
4766 if (!val)
4767 return -EINVAL;
4768
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004769 /* value is in KB */
4770 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004771 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004772 if (ret < 0)
4773 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004774
Jiri Olsacf8517c2009-10-23 19:36:16 -04004775 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004776
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004777 return cnt;
4778}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004779
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004780static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004781tracing_total_entries_read(struct file *filp, char __user *ubuf,
4782 size_t cnt, loff_t *ppos)
4783{
4784 struct trace_array *tr = filp->private_data;
4785 char buf[64];
4786 int r, cpu;
4787 unsigned long size = 0, expanded_size = 0;
4788
4789 mutex_lock(&trace_types_lock);
4790 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004791 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004792 if (!ring_buffer_expanded)
4793 expanded_size += trace_buf_size >> 10;
4794 }
4795 if (ring_buffer_expanded)
4796 r = sprintf(buf, "%lu\n", size);
4797 else
4798 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4799 mutex_unlock(&trace_types_lock);
4800
4801 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4802}
4803
4804static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004805tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4806 size_t cnt, loff_t *ppos)
4807{
4808 /*
4809 * There is no need to read what the user has written, this function
4810 * is just to make sure that there is no error when "echo" is used
4811 */
4812
4813 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004814
4815 return cnt;
4816}
4817
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004818static int
4819tracing_free_buffer_release(struct inode *inode, struct file *filp)
4820{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004821 struct trace_array *tr = inode->i_private;
4822
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004823 /* disable tracing ? */
4824 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004825 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004826 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004827 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004828
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004829 trace_array_put(tr);
4830
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004831 return 0;
4832}
4833
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004834static ssize_t
4835tracing_mark_write(struct file *filp, const char __user *ubuf,
4836 size_t cnt, loff_t *fpos)
4837{
Steven Rostedtd696b582011-09-22 11:50:27 -04004838 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004839 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004840 struct ring_buffer_event *event;
4841 struct ring_buffer *buffer;
4842 struct print_entry *entry;
4843 unsigned long irq_flags;
4844 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004845 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004846 int nr_pages = 1;
4847 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004848 int offset;
4849 int size;
4850 int len;
4851 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004852 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004853
Steven Rostedtc76f0692008-11-07 22:36:02 -05004854 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004855 return -EINVAL;
4856
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004857 if (!(trace_flags & TRACE_ITER_MARKERS))
4858 return -EINVAL;
4859
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004860 if (cnt > TRACE_BUF_SIZE)
4861 cnt = TRACE_BUF_SIZE;
4862
Steven Rostedtd696b582011-09-22 11:50:27 -04004863 /*
4864 * Userspace is injecting traces into the kernel trace buffer.
4865 * We want to be as non intrusive as possible.
4866 * To do so, we do not want to allocate any special buffers
4867 * or take any locks, but instead write the userspace data
4868 * straight into the ring buffer.
4869 *
4870 * First we need to pin the userspace buffer into memory,
4871 * which, most likely it is, because it just referenced it.
4872 * But there's no guarantee that it is. By using get_user_pages_fast()
4873 * and kmap_atomic/kunmap_atomic() we can get access to the
4874 * pages directly. We then write the data directly into the
4875 * ring buffer.
4876 */
4877 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004878
Steven Rostedtd696b582011-09-22 11:50:27 -04004879 /* check if we cross pages */
4880 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4881 nr_pages = 2;
4882
4883 offset = addr & (PAGE_SIZE - 1);
4884 addr &= PAGE_MASK;
4885
4886 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4887 if (ret < nr_pages) {
4888 while (--ret >= 0)
4889 put_page(pages[ret]);
4890 written = -EFAULT;
4891 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004892 }
4893
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004894 for (i = 0; i < nr_pages; i++)
4895 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004896
4897 local_save_flags(irq_flags);
4898 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004899 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004900 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4901 irq_flags, preempt_count());
4902 if (!event) {
4903 /* Ring buffer disabled, return as if not open for write */
4904 written = -EBADF;
4905 goto out_unlock;
4906 }
4907
4908 entry = ring_buffer_event_data(event);
4909 entry->ip = _THIS_IP_;
4910
4911 if (nr_pages == 2) {
4912 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004913 memcpy(&entry->buf, map_page[0] + offset, len);
4914 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004915 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004916 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004917
4918 if (entry->buf[cnt - 1] != '\n') {
4919 entry->buf[cnt] = '\n';
4920 entry->buf[cnt + 1] = '\0';
4921 } else
4922 entry->buf[cnt] = '\0';
4923
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004924 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004925
4926 written = cnt;
4927
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004928 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004929
Steven Rostedtd696b582011-09-22 11:50:27 -04004930 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004931 for (i = 0; i < nr_pages; i++){
4932 kunmap_atomic(map_page[i]);
4933 put_page(pages[i]);
4934 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004935 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004936 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004937}
4938
Li Zefan13f16d22009-12-08 11:16:11 +08004939static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004940{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004941 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004942 int i;
4943
4944 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004945 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004946 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004947 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4948 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004949 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004950
Li Zefan13f16d22009-12-08 11:16:11 +08004951 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004952}
4953
Steven Rostedte1e232c2014-02-10 23:38:46 -05004954static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004955{
Zhaolei5079f322009-08-25 16:12:56 +08004956 int i;
4957
Zhaolei5079f322009-08-25 16:12:56 +08004958 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4959 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4960 break;
4961 }
4962 if (i == ARRAY_SIZE(trace_clocks))
4963 return -EINVAL;
4964
Zhaolei5079f322009-08-25 16:12:56 +08004965 mutex_lock(&trace_types_lock);
4966
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004967 tr->clock_id = i;
4968
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004969 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004970
David Sharp60303ed2012-10-11 16:27:52 -07004971 /*
4972 * New clock may not be consistent with the previous clock.
4973 * Reset the buffer so that it doesn't have incomparable timestamps.
4974 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004975 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004976
4977#ifdef CONFIG_TRACER_MAX_TRACE
4978 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4979 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004980 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004981#endif
David Sharp60303ed2012-10-11 16:27:52 -07004982
Zhaolei5079f322009-08-25 16:12:56 +08004983 mutex_unlock(&trace_types_lock);
4984
Steven Rostedte1e232c2014-02-10 23:38:46 -05004985 return 0;
4986}
4987
4988static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4989 size_t cnt, loff_t *fpos)
4990{
4991 struct seq_file *m = filp->private_data;
4992 struct trace_array *tr = m->private;
4993 char buf[64];
4994 const char *clockstr;
4995 int ret;
4996
4997 if (cnt >= sizeof(buf))
4998 return -EINVAL;
4999
5000 if (copy_from_user(&buf, ubuf, cnt))
5001 return -EFAULT;
5002
5003 buf[cnt] = 0;
5004
5005 clockstr = strstrip(buf);
5006
5007 ret = tracing_set_clock(tr, clockstr);
5008 if (ret)
5009 return ret;
5010
Zhaolei5079f322009-08-25 16:12:56 +08005011 *fpos += cnt;
5012
5013 return cnt;
5014}
5015
Li Zefan13f16d22009-12-08 11:16:11 +08005016static int tracing_clock_open(struct inode *inode, struct file *file)
5017{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005018 struct trace_array *tr = inode->i_private;
5019 int ret;
5020
Li Zefan13f16d22009-12-08 11:16:11 +08005021 if (tracing_disabled)
5022 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005023
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005024 if (trace_array_get(tr))
5025 return -ENODEV;
5026
5027 ret = single_open(file, tracing_clock_show, inode->i_private);
5028 if (ret < 0)
5029 trace_array_put(tr);
5030
5031 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005032}
5033
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005034struct ftrace_buffer_info {
5035 struct trace_iterator iter;
5036 void *spare;
5037 unsigned int read;
5038};
5039
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005040#ifdef CONFIG_TRACER_SNAPSHOT
5041static int tracing_snapshot_open(struct inode *inode, struct file *file)
5042{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005043 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005044 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005045 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005046 int ret = 0;
5047
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005048 if (trace_array_get(tr) < 0)
5049 return -ENODEV;
5050
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005051 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005052 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005053 if (IS_ERR(iter))
5054 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005055 } else {
5056 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005057 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005058 m = kzalloc(sizeof(*m), GFP_KERNEL);
5059 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005060 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005061 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5062 if (!iter) {
5063 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005064 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005065 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005066 ret = 0;
5067
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005068 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005069 iter->trace_buffer = &tr->max_buffer;
5070 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005071 m->private = iter;
5072 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005073 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005074out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005075 if (ret < 0)
5076 trace_array_put(tr);
5077
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005078 return ret;
5079}
5080
5081static ssize_t
5082tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5083 loff_t *ppos)
5084{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005085 struct seq_file *m = filp->private_data;
5086 struct trace_iterator *iter = m->private;
5087 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005088 unsigned long val;
5089 int ret;
5090
5091 ret = tracing_update_buffers();
5092 if (ret < 0)
5093 return ret;
5094
5095 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5096 if (ret)
5097 return ret;
5098
5099 mutex_lock(&trace_types_lock);
5100
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005101 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005102 ret = -EBUSY;
5103 goto out;
5104 }
5105
5106 switch (val) {
5107 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005108 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5109 ret = -EINVAL;
5110 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005111 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005112 if (tr->allocated_snapshot)
5113 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005114 break;
5115 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005116/* Only allow per-cpu swap if the ring buffer supports it */
5117#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5118 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5119 ret = -EINVAL;
5120 break;
5121 }
5122#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005123 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005124 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005125 if (ret < 0)
5126 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005127 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005128 local_irq_disable();
5129 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005130 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005131 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005132 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005133 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005134 local_irq_enable();
5135 break;
5136 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005137 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005138 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5139 tracing_reset_online_cpus(&tr->max_buffer);
5140 else
5141 tracing_reset(&tr->max_buffer, iter->cpu_file);
5142 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005143 break;
5144 }
5145
5146 if (ret >= 0) {
5147 *ppos += cnt;
5148 ret = cnt;
5149 }
5150out:
5151 mutex_unlock(&trace_types_lock);
5152 return ret;
5153}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005154
5155static int tracing_snapshot_release(struct inode *inode, struct file *file)
5156{
5157 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005158 int ret;
5159
5160 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005161
5162 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005163 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005164
5165 /* If write only, the seq_file is just a stub */
5166 if (m)
5167 kfree(m->private);
5168 kfree(m);
5169
5170 return 0;
5171}
5172
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005173static int tracing_buffers_open(struct inode *inode, struct file *filp);
5174static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5175 size_t count, loff_t *ppos);
5176static int tracing_buffers_release(struct inode *inode, struct file *file);
5177static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5178 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5179
5180static int snapshot_raw_open(struct inode *inode, struct file *filp)
5181{
5182 struct ftrace_buffer_info *info;
5183 int ret;
5184
5185 ret = tracing_buffers_open(inode, filp);
5186 if (ret < 0)
5187 return ret;
5188
5189 info = filp->private_data;
5190
5191 if (info->iter.trace->use_max_tr) {
5192 tracing_buffers_release(inode, filp);
5193 return -EBUSY;
5194 }
5195
5196 info->iter.snapshot = true;
5197 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5198
5199 return ret;
5200}
5201
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005202#endif /* CONFIG_TRACER_SNAPSHOT */
5203
5204
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005205static const struct file_operations tracing_thresh_fops = {
5206 .open = tracing_open_generic,
5207 .read = tracing_thresh_read,
5208 .write = tracing_thresh_write,
5209 .llseek = generic_file_llseek,
5210};
5211
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005212static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005213 .open = tracing_open_generic,
5214 .read = tracing_max_lat_read,
5215 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005216 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005217};
5218
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005219static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005220 .open = tracing_open_generic,
5221 .read = tracing_set_trace_read,
5222 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005223 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005224};
5225
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005226static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005227 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005228 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005229 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005230 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005231 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005232 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005233};
5234
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005235static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005236 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005237 .read = tracing_entries_read,
5238 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005239 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005240 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005241};
5242
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005243static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005244 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005245 .read = tracing_total_entries_read,
5246 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005247 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005248};
5249
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005250static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005251 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005252 .write = tracing_free_buffer_write,
5253 .release = tracing_free_buffer_release,
5254};
5255
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005256static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005257 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005258 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005259 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005260 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005261};
5262
Zhaolei5079f322009-08-25 16:12:56 +08005263static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005264 .open = tracing_clock_open,
5265 .read = seq_read,
5266 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005267 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005268 .write = tracing_clock_write,
5269};
5270
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005271#ifdef CONFIG_TRACER_SNAPSHOT
5272static const struct file_operations snapshot_fops = {
5273 .open = tracing_snapshot_open,
5274 .read = seq_read,
5275 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005276 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005277 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005278};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005279
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005280static const struct file_operations snapshot_raw_fops = {
5281 .open = snapshot_raw_open,
5282 .read = tracing_buffers_read,
5283 .release = tracing_buffers_release,
5284 .splice_read = tracing_buffers_splice_read,
5285 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005286};
5287
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005288#endif /* CONFIG_TRACER_SNAPSHOT */
5289
Steven Rostedt2cadf912008-12-01 22:20:19 -05005290static int tracing_buffers_open(struct inode *inode, struct file *filp)
5291{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005292 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005293 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005294 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005295
5296 if (tracing_disabled)
5297 return -ENODEV;
5298
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005299 if (trace_array_get(tr) < 0)
5300 return -ENODEV;
5301
Steven Rostedt2cadf912008-12-01 22:20:19 -05005302 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005303 if (!info) {
5304 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005305 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005306 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005307
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005308 mutex_lock(&trace_types_lock);
5309
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005310 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005311 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005312 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005313 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005314 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005315 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005316 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005317
5318 filp->private_data = info;
5319
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005320 mutex_unlock(&trace_types_lock);
5321
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005322 ret = nonseekable_open(inode, filp);
5323 if (ret < 0)
5324 trace_array_put(tr);
5325
5326 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005327}
5328
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005329static unsigned int
5330tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5331{
5332 struct ftrace_buffer_info *info = filp->private_data;
5333 struct trace_iterator *iter = &info->iter;
5334
5335 return trace_poll(iter, filp, poll_table);
5336}
5337
Steven Rostedt2cadf912008-12-01 22:20:19 -05005338static ssize_t
5339tracing_buffers_read(struct file *filp, char __user *ubuf,
5340 size_t count, loff_t *ppos)
5341{
5342 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005343 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005344 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005345 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005346
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005347 if (!count)
5348 return 0;
5349
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005350 mutex_lock(&trace_types_lock);
5351
5352#ifdef CONFIG_TRACER_MAX_TRACE
5353 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5354 size = -EBUSY;
5355 goto out_unlock;
5356 }
5357#endif
5358
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005359 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005360 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5361 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005362 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005363 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005364 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005365
Steven Rostedt2cadf912008-12-01 22:20:19 -05005366 /* Do we have previous read data to read? */
5367 if (info->read < PAGE_SIZE)
5368 goto read;
5369
Steven Rostedtb6273442013-02-28 13:44:11 -05005370 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005371 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005372 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005373 &info->spare,
5374 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005375 iter->cpu_file, 0);
5376 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005377
5378 if (ret < 0) {
5379 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005380 if ((filp->f_flags & O_NONBLOCK)) {
5381 size = -EAGAIN;
5382 goto out_unlock;
5383 }
5384 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005385 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005386 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005387 if (ret) {
5388 size = ret;
5389 goto out_unlock;
5390 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005391 if (signal_pending(current)) {
5392 size = -EINTR;
5393 goto out_unlock;
5394 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005395 goto again;
5396 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005397 size = 0;
5398 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005399 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005400
Steven Rostedt436fc282011-10-14 10:44:25 -04005401 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005402 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005403 size = PAGE_SIZE - info->read;
5404 if (size > count)
5405 size = count;
5406
5407 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005408 if (ret == size) {
5409 size = -EFAULT;
5410 goto out_unlock;
5411 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005412 size -= ret;
5413
Steven Rostedt2cadf912008-12-01 22:20:19 -05005414 *ppos += size;
5415 info->read += size;
5416
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005417 out_unlock:
5418 mutex_unlock(&trace_types_lock);
5419
Steven Rostedt2cadf912008-12-01 22:20:19 -05005420 return size;
5421}
5422
5423static int tracing_buffers_release(struct inode *inode, struct file *file)
5424{
5425 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005426 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005427
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005428 mutex_lock(&trace_types_lock);
5429
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005430 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005431
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005432 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005433 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005434 kfree(info);
5435
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005436 mutex_unlock(&trace_types_lock);
5437
Steven Rostedt2cadf912008-12-01 22:20:19 -05005438 return 0;
5439}
5440
5441struct buffer_ref {
5442 struct ring_buffer *buffer;
5443 void *page;
5444 int ref;
5445};
5446
5447static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5448 struct pipe_buffer *buf)
5449{
5450 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5451
5452 if (--ref->ref)
5453 return;
5454
5455 ring_buffer_free_read_page(ref->buffer, ref->page);
5456 kfree(ref);
5457 buf->private = 0;
5458}
5459
Steven Rostedt2cadf912008-12-01 22:20:19 -05005460static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5461 struct pipe_buffer *buf)
5462{
5463 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5464
5465 ref->ref++;
5466}
5467
5468/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005469static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005470 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005471 .confirm = generic_pipe_buf_confirm,
5472 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005473 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005474 .get = buffer_pipe_buf_get,
5475};
5476
5477/*
5478 * Callback from splice_to_pipe(), if we need to release some pages
5479 * at the end of the spd in case we error'ed out in filling the pipe.
5480 */
5481static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5482{
5483 struct buffer_ref *ref =
5484 (struct buffer_ref *)spd->partial[i].private;
5485
5486 if (--ref->ref)
5487 return;
5488
5489 ring_buffer_free_read_page(ref->buffer, ref->page);
5490 kfree(ref);
5491 spd->partial[i].private = 0;
5492}
5493
5494static ssize_t
5495tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5496 struct pipe_inode_info *pipe, size_t len,
5497 unsigned int flags)
5498{
5499 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005500 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005501 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5502 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005503 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005504 .pages = pages_def,
5505 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005506 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005507 .flags = flags,
5508 .ops = &buffer_pipe_buf_ops,
5509 .spd_release = buffer_spd_release,
5510 };
5511 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005512 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005513 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005514
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005515 mutex_lock(&trace_types_lock);
5516
5517#ifdef CONFIG_TRACER_MAX_TRACE
5518 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5519 ret = -EBUSY;
5520 goto out;
5521 }
5522#endif
5523
5524 if (splice_grow_spd(pipe, &spd)) {
5525 ret = -ENOMEM;
5526 goto out;
5527 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005528
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005529 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005530 ret = -EINVAL;
5531 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005532 }
5533
5534 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005535 if (len < PAGE_SIZE) {
5536 ret = -EINVAL;
5537 goto out;
5538 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005539 len &= PAGE_MASK;
5540 }
5541
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005542 again:
5543 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005544 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005545
Al Viroa786c062014-04-11 12:01:03 -04005546 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005547 struct page *page;
5548 int r;
5549
5550 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5551 if (!ref)
5552 break;
5553
Steven Rostedt7267fa62009-04-29 00:16:21 -04005554 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005555 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005556 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005557 if (!ref->page) {
5558 kfree(ref);
5559 break;
5560 }
5561
5562 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005563 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005564 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005565 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005566 kfree(ref);
5567 break;
5568 }
5569
5570 /*
5571 * zero out any left over data, this is going to
5572 * user land.
5573 */
5574 size = ring_buffer_page_len(ref->page);
5575 if (size < PAGE_SIZE)
5576 memset(ref->page + size, 0, PAGE_SIZE - size);
5577
5578 page = virt_to_page(ref->page);
5579
5580 spd.pages[i] = page;
5581 spd.partial[i].len = PAGE_SIZE;
5582 spd.partial[i].offset = 0;
5583 spd.partial[i].private = (unsigned long)ref;
5584 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005585 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005586
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005587 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005588 }
5589
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005590 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005591 spd.nr_pages = i;
5592
5593 /* did we read anything? */
5594 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005595 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005596 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005597 goto out;
5598 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005599 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005600 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005601 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005602 if (ret)
5603 goto out;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005604 if (signal_pending(current)) {
5605 ret = -EINTR;
5606 goto out;
5607 }
5608 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005609 }
5610
5611 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005612 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005613out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005614 mutex_unlock(&trace_types_lock);
5615
Steven Rostedt2cadf912008-12-01 22:20:19 -05005616 return ret;
5617}
5618
5619static const struct file_operations tracing_buffers_fops = {
5620 .open = tracing_buffers_open,
5621 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005622 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005623 .release = tracing_buffers_release,
5624 .splice_read = tracing_buffers_splice_read,
5625 .llseek = no_llseek,
5626};
5627
Steven Rostedtc8d77182009-04-29 18:03:45 -04005628static ssize_t
5629tracing_stats_read(struct file *filp, char __user *ubuf,
5630 size_t count, loff_t *ppos)
5631{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005632 struct inode *inode = file_inode(filp);
5633 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005634 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005635 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005636 struct trace_seq *s;
5637 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005638 unsigned long long t;
5639 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005640
Li Zefane4f2d102009-06-15 10:57:28 +08005641 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005642 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005643 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005644
5645 trace_seq_init(s);
5646
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005647 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005648 trace_seq_printf(s, "entries: %ld\n", cnt);
5649
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005650 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005651 trace_seq_printf(s, "overrun: %ld\n", cnt);
5652
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005653 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005654 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5655
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005656 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005657 trace_seq_printf(s, "bytes: %ld\n", cnt);
5658
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005659 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005660 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005661 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005662 usec_rem = do_div(t, USEC_PER_SEC);
5663 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5664 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005665
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005666 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005667 usec_rem = do_div(t, USEC_PER_SEC);
5668 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5669 } else {
5670 /* counter or tsc mode for trace_clock */
5671 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005672 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005673
5674 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005675 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005676 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005677
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005678 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005679 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5680
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005681 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005682 trace_seq_printf(s, "read events: %ld\n", cnt);
5683
Steven Rostedtc8d77182009-04-29 18:03:45 -04005684 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5685
5686 kfree(s);
5687
5688 return count;
5689}
5690
5691static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005692 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005693 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005694 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005695 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005696};
5697
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005698#ifdef CONFIG_DYNAMIC_FTRACE
5699
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005700int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005701{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005702 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005703}
5704
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005705static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005706tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005707 size_t cnt, loff_t *ppos)
5708{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005709 static char ftrace_dyn_info_buffer[1024];
5710 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005711 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005712 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005713 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005714 int r;
5715
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005716 mutex_lock(&dyn_info_mutex);
5717 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005718
Steven Rostedta26a2a22008-10-31 00:03:22 -04005719 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005720 buf[r++] = '\n';
5721
5722 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5723
5724 mutex_unlock(&dyn_info_mutex);
5725
5726 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005727}
5728
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005729static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005730 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005731 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005732 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005733};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005734#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005735
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005736#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5737static void
5738ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005739{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005740 tracing_snapshot();
5741}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005742
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005743static void
5744ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5745{
5746 unsigned long *count = (long *)data;
5747
5748 if (!*count)
5749 return;
5750
5751 if (*count != -1)
5752 (*count)--;
5753
5754 tracing_snapshot();
5755}
5756
5757static int
5758ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5759 struct ftrace_probe_ops *ops, void *data)
5760{
5761 long count = (long)data;
5762
5763 seq_printf(m, "%ps:", (void *)ip);
5764
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005765 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005766
5767 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005768 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005769 else
5770 seq_printf(m, ":count=%ld\n", count);
5771
5772 return 0;
5773}
5774
5775static struct ftrace_probe_ops snapshot_probe_ops = {
5776 .func = ftrace_snapshot,
5777 .print = ftrace_snapshot_print,
5778};
5779
5780static struct ftrace_probe_ops snapshot_count_probe_ops = {
5781 .func = ftrace_count_snapshot,
5782 .print = ftrace_snapshot_print,
5783};
5784
5785static int
5786ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5787 char *glob, char *cmd, char *param, int enable)
5788{
5789 struct ftrace_probe_ops *ops;
5790 void *count = (void *)-1;
5791 char *number;
5792 int ret;
5793
5794 /* hash funcs only work with set_ftrace_filter */
5795 if (!enable)
5796 return -EINVAL;
5797
5798 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5799
5800 if (glob[0] == '!') {
5801 unregister_ftrace_function_probe_func(glob+1, ops);
5802 return 0;
5803 }
5804
5805 if (!param)
5806 goto out_reg;
5807
5808 number = strsep(&param, ":");
5809
5810 if (!strlen(number))
5811 goto out_reg;
5812
5813 /*
5814 * We use the callback data field (which is a pointer)
5815 * as our counter.
5816 */
5817 ret = kstrtoul(number, 0, (unsigned long *)&count);
5818 if (ret)
5819 return ret;
5820
5821 out_reg:
5822 ret = register_ftrace_function_probe(glob, ops, count);
5823
5824 if (ret >= 0)
5825 alloc_snapshot(&global_trace);
5826
5827 return ret < 0 ? ret : 0;
5828}
5829
5830static struct ftrace_func_command ftrace_snapshot_cmd = {
5831 .name = "snapshot",
5832 .func = ftrace_trace_snapshot_callback,
5833};
5834
Tom Zanussi38de93a2013-10-24 08:34:18 -05005835static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005836{
5837 return register_ftrace_command(&ftrace_snapshot_cmd);
5838}
5839#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005840static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005841#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005842
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005843struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005844{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005845 if (tr->dir)
5846 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005847
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005848 if (!debugfs_initialized())
5849 return NULL;
5850
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005851 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5852 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005853
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005854 if (!tr->dir)
5855 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005856
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005857 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005858}
5859
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005860struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005861{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005862 return tracing_init_dentry_tr(&global_trace);
5863}
5864
5865static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5866{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005867 struct dentry *d_tracer;
5868
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005869 if (tr->percpu_dir)
5870 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005871
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005872 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005873 if (!d_tracer)
5874 return NULL;
5875
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005876 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005877
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005878 WARN_ONCE(!tr->percpu_dir,
5879 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005880
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005881 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005882}
5883
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005884static struct dentry *
5885trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5886 void *data, long cpu, const struct file_operations *fops)
5887{
5888 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5889
5890 if (ret) /* See tracing_get_cpu() */
5891 ret->d_inode->i_cdev = (void *)(cpu + 1);
5892 return ret;
5893}
5894
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005895static void
5896tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005897{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005898 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005899 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005900 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005901
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005902 if (!d_percpu)
5903 return;
5904
Steven Rostedtdd49a382010-10-20 21:51:26 -04005905 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005906 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5907 if (!d_cpu) {
5908 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5909 return;
5910 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005911
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005912 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005913 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005914 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005915
5916 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005917 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005918 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005919
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005920 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005921 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005922
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005923 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005924 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005925
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005926 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005927 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005928
5929#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005930 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005931 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005932
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005933 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005934 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005935#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005936}
5937
Steven Rostedt60a11772008-05-12 21:20:44 +02005938#ifdef CONFIG_FTRACE_SELFTEST
5939/* Let selftest have access to static functions in this file */
5940#include "trace_selftest.c"
5941#endif
5942
Steven Rostedt577b7852009-02-26 23:43:05 -05005943struct trace_option_dentry {
5944 struct tracer_opt *opt;
5945 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005946 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005947 struct dentry *entry;
5948};
5949
5950static ssize_t
5951trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5952 loff_t *ppos)
5953{
5954 struct trace_option_dentry *topt = filp->private_data;
5955 char *buf;
5956
5957 if (topt->flags->val & topt->opt->bit)
5958 buf = "1\n";
5959 else
5960 buf = "0\n";
5961
5962 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5963}
5964
5965static ssize_t
5966trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5967 loff_t *ppos)
5968{
5969 struct trace_option_dentry *topt = filp->private_data;
5970 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005971 int ret;
5972
Peter Huewe22fe9b52011-06-07 21:58:27 +02005973 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5974 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005975 return ret;
5976
Li Zefan8d18eaa2009-12-08 11:17:06 +08005977 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005978 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005979
5980 if (!!(topt->flags->val & topt->opt->bit) != val) {
5981 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005982 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005983 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005984 mutex_unlock(&trace_types_lock);
5985 if (ret)
5986 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005987 }
5988
5989 *ppos += cnt;
5990
5991 return cnt;
5992}
5993
5994
5995static const struct file_operations trace_options_fops = {
5996 .open = tracing_open_generic,
5997 .read = trace_options_read,
5998 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005999 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006000};
6001
Steven Rostedta8259072009-02-26 22:19:12 -05006002static ssize_t
6003trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6004 loff_t *ppos)
6005{
6006 long index = (long)filp->private_data;
6007 char *buf;
6008
6009 if (trace_flags & (1 << index))
6010 buf = "1\n";
6011 else
6012 buf = "0\n";
6013
6014 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6015}
6016
6017static ssize_t
6018trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6019 loff_t *ppos)
6020{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006021 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006022 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05006023 unsigned long val;
6024 int ret;
6025
Peter Huewe22fe9b52011-06-07 21:58:27 +02006026 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6027 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006028 return ret;
6029
Zhaoleif2d84b62009-08-07 18:55:48 +08006030 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006031 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006032
6033 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006034 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006035 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006036
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006037 if (ret < 0)
6038 return ret;
6039
Steven Rostedta8259072009-02-26 22:19:12 -05006040 *ppos += cnt;
6041
6042 return cnt;
6043}
6044
Steven Rostedta8259072009-02-26 22:19:12 -05006045static const struct file_operations trace_options_core_fops = {
6046 .open = tracing_open_generic,
6047 .read = trace_options_core_read,
6048 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006049 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006050};
6051
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006052struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006053 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006054 struct dentry *parent,
6055 void *data,
6056 const struct file_operations *fops)
6057{
6058 struct dentry *ret;
6059
6060 ret = debugfs_create_file(name, mode, parent, data, fops);
6061 if (!ret)
6062 pr_warning("Could not create debugfs '%s' entry\n", name);
6063
6064 return ret;
6065}
6066
6067
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006068static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006069{
6070 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006071
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006072 if (tr->options)
6073 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006074
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006075 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006076 if (!d_tracer)
6077 return NULL;
6078
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006079 tr->options = debugfs_create_dir("options", d_tracer);
6080 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006081 pr_warning("Could not create debugfs directory 'options'\n");
6082 return NULL;
6083 }
6084
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006085 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006086}
6087
Steven Rostedt577b7852009-02-26 23:43:05 -05006088static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006089create_trace_option_file(struct trace_array *tr,
6090 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006091 struct tracer_flags *flags,
6092 struct tracer_opt *opt)
6093{
6094 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006095
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006096 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006097 if (!t_options)
6098 return;
6099
6100 topt->flags = flags;
6101 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006102 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006103
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006104 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006105 &trace_options_fops);
6106
Steven Rostedt577b7852009-02-26 23:43:05 -05006107}
6108
6109static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006110create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006111{
6112 struct trace_option_dentry *topts;
6113 struct tracer_flags *flags;
6114 struct tracer_opt *opts;
6115 int cnt;
6116
6117 if (!tracer)
6118 return NULL;
6119
6120 flags = tracer->flags;
6121
6122 if (!flags || !flags->opts)
6123 return NULL;
6124
6125 opts = flags->opts;
6126
6127 for (cnt = 0; opts[cnt].name; cnt++)
6128 ;
6129
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006130 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006131 if (!topts)
6132 return NULL;
6133
6134 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006135 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006136 &opts[cnt]);
6137
6138 return topts;
6139}
6140
6141static void
6142destroy_trace_option_files(struct trace_option_dentry *topts)
6143{
6144 int cnt;
6145
6146 if (!topts)
6147 return;
6148
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006149 for (cnt = 0; topts[cnt].opt; cnt++)
6150 debugfs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006151
6152 kfree(topts);
6153}
6154
Steven Rostedta8259072009-02-26 22:19:12 -05006155static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006156create_trace_option_core_file(struct trace_array *tr,
6157 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006158{
6159 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006160
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006161 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006162 if (!t_options)
6163 return NULL;
6164
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006165 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006166 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006167}
6168
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006169static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006170{
6171 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006172 int i;
6173
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006174 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006175 if (!t_options)
6176 return;
6177
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006178 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006179 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006180}
6181
Steven Rostedt499e5472012-02-22 15:50:28 -05006182static ssize_t
6183rb_simple_read(struct file *filp, char __user *ubuf,
6184 size_t cnt, loff_t *ppos)
6185{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006186 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006187 char buf[64];
6188 int r;
6189
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006190 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006191 r = sprintf(buf, "%d\n", r);
6192
6193 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6194}
6195
6196static ssize_t
6197rb_simple_write(struct file *filp, const char __user *ubuf,
6198 size_t cnt, loff_t *ppos)
6199{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006200 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006201 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006202 unsigned long val;
6203 int ret;
6204
6205 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6206 if (ret)
6207 return ret;
6208
6209 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006210 mutex_lock(&trace_types_lock);
6211 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006212 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006213 if (tr->current_trace->start)
6214 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006215 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006216 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006217 if (tr->current_trace->stop)
6218 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006219 }
6220 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006221 }
6222
6223 (*ppos)++;
6224
6225 return cnt;
6226}
6227
6228static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006229 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006230 .read = rb_simple_read,
6231 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006232 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006233 .llseek = default_llseek,
6234};
6235
Steven Rostedt277ba042012-08-03 16:10:49 -04006236struct dentry *trace_instance_dir;
6237
6238static void
6239init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6240
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006241static int
6242allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006243{
6244 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006245
6246 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6247
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006248 buf->tr = tr;
6249
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006250 buf->buffer = ring_buffer_alloc(size, rb_flags);
6251 if (!buf->buffer)
6252 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006253
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006254 buf->data = alloc_percpu(struct trace_array_cpu);
6255 if (!buf->data) {
6256 ring_buffer_free(buf->buffer);
6257 return -ENOMEM;
6258 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006259
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006260 /* Allocate the first page for all buffers */
6261 set_buffer_entries(&tr->trace_buffer,
6262 ring_buffer_size(tr->trace_buffer.buffer, 0));
6263
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006264 return 0;
6265}
6266
6267static int allocate_trace_buffers(struct trace_array *tr, int size)
6268{
6269 int ret;
6270
6271 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6272 if (ret)
6273 return ret;
6274
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006275#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006276 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6277 allocate_snapshot ? size : 1);
6278 if (WARN_ON(ret)) {
6279 ring_buffer_free(tr->trace_buffer.buffer);
6280 free_percpu(tr->trace_buffer.data);
6281 return -ENOMEM;
6282 }
6283 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006284
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006285 /*
6286 * Only the top level trace array gets its snapshot allocated
6287 * from the kernel command line.
6288 */
6289 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006290#endif
6291 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006292}
6293
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006294static void free_trace_buffer(struct trace_buffer *buf)
6295{
6296 if (buf->buffer) {
6297 ring_buffer_free(buf->buffer);
6298 buf->buffer = NULL;
6299 free_percpu(buf->data);
6300 buf->data = NULL;
6301 }
6302}
6303
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006304static void free_trace_buffers(struct trace_array *tr)
6305{
6306 if (!tr)
6307 return;
6308
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006309 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006310
6311#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006312 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006313#endif
6314}
6315
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006316static int new_instance_create(const char *name)
6317{
Steven Rostedt277ba042012-08-03 16:10:49 -04006318 struct trace_array *tr;
6319 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006320
6321 mutex_lock(&trace_types_lock);
6322
6323 ret = -EEXIST;
6324 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6325 if (tr->name && strcmp(tr->name, name) == 0)
6326 goto out_unlock;
6327 }
6328
6329 ret = -ENOMEM;
6330 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6331 if (!tr)
6332 goto out_unlock;
6333
6334 tr->name = kstrdup(name, GFP_KERNEL);
6335 if (!tr->name)
6336 goto out_free_tr;
6337
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006338 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6339 goto out_free_tr;
6340
6341 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6342
Steven Rostedt277ba042012-08-03 16:10:49 -04006343 raw_spin_lock_init(&tr->start_lock);
6344
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006345 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6346
Steven Rostedt277ba042012-08-03 16:10:49 -04006347 tr->current_trace = &nop_trace;
6348
6349 INIT_LIST_HEAD(&tr->systems);
6350 INIT_LIST_HEAD(&tr->events);
6351
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006352 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006353 goto out_free_tr;
6354
Steven Rostedt277ba042012-08-03 16:10:49 -04006355 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6356 if (!tr->dir)
6357 goto out_free_tr;
6358
6359 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006360 if (ret) {
6361 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006362 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006363 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006364
6365 init_tracer_debugfs(tr, tr->dir);
6366
6367 list_add(&tr->list, &ftrace_trace_arrays);
6368
6369 mutex_unlock(&trace_types_lock);
6370
6371 return 0;
6372
6373 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006374 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006375 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006376 kfree(tr->name);
6377 kfree(tr);
6378
6379 out_unlock:
6380 mutex_unlock(&trace_types_lock);
6381
6382 return ret;
6383
6384}
6385
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006386static int instance_delete(const char *name)
6387{
6388 struct trace_array *tr;
6389 int found = 0;
6390 int ret;
6391
6392 mutex_lock(&trace_types_lock);
6393
6394 ret = -ENODEV;
6395 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6396 if (tr->name && strcmp(tr->name, name) == 0) {
6397 found = 1;
6398 break;
6399 }
6400 }
6401 if (!found)
6402 goto out_unlock;
6403
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006404 ret = -EBUSY;
6405 if (tr->ref)
6406 goto out_unlock;
6407
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006408 list_del(&tr->list);
6409
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006410 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006411 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006412 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006413 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006414 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006415
6416 kfree(tr->name);
6417 kfree(tr);
6418
6419 ret = 0;
6420
6421 out_unlock:
6422 mutex_unlock(&trace_types_lock);
6423
6424 return ret;
6425}
6426
Steven Rostedt277ba042012-08-03 16:10:49 -04006427static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6428{
6429 struct dentry *parent;
6430 int ret;
6431
6432 /* Paranoid: Make sure the parent is the "instances" directory */
6433 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6434 if (WARN_ON_ONCE(parent != trace_instance_dir))
6435 return -ENOENT;
6436
6437 /*
6438 * The inode mutex is locked, but debugfs_create_dir() will also
6439 * take the mutex. As the instances directory can not be destroyed
6440 * or changed in any other way, it is safe to unlock it, and
6441 * let the dentry try. If two users try to make the same dir at
6442 * the same time, then the new_instance_create() will determine the
6443 * winner.
6444 */
6445 mutex_unlock(&inode->i_mutex);
6446
6447 ret = new_instance_create(dentry->d_iname);
6448
6449 mutex_lock(&inode->i_mutex);
6450
6451 return ret;
6452}
6453
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006454static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6455{
6456 struct dentry *parent;
6457 int ret;
6458
6459 /* Paranoid: Make sure the parent is the "instances" directory */
6460 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6461 if (WARN_ON_ONCE(parent != trace_instance_dir))
6462 return -ENOENT;
6463
6464 /* The caller did a dget() on dentry */
6465 mutex_unlock(&dentry->d_inode->i_mutex);
6466
6467 /*
6468 * The inode mutex is locked, but debugfs_create_dir() will also
6469 * take the mutex. As the instances directory can not be destroyed
6470 * or changed in any other way, it is safe to unlock it, and
6471 * let the dentry try. If two users try to make the same dir at
6472 * the same time, then the instance_delete() will determine the
6473 * winner.
6474 */
6475 mutex_unlock(&inode->i_mutex);
6476
6477 ret = instance_delete(dentry->d_iname);
6478
6479 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6480 mutex_lock(&dentry->d_inode->i_mutex);
6481
6482 return ret;
6483}
6484
Steven Rostedt277ba042012-08-03 16:10:49 -04006485static const struct inode_operations instance_dir_inode_operations = {
6486 .lookup = simple_lookup,
6487 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006488 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006489};
6490
6491static __init void create_trace_instances(struct dentry *d_tracer)
6492{
6493 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6494 if (WARN_ON(!trace_instance_dir))
6495 return;
6496
6497 /* Hijack the dir inode operations, to allow mkdir */
6498 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6499}
6500
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006501static void
6502init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6503{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006504 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006505
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006506 trace_create_file("available_tracers", 0444, d_tracer,
6507 tr, &show_traces_fops);
6508
6509 trace_create_file("current_tracer", 0644, d_tracer,
6510 tr, &set_tracer_fops);
6511
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006512 trace_create_file("tracing_cpumask", 0644, d_tracer,
6513 tr, &tracing_cpumask_fops);
6514
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006515 trace_create_file("trace_options", 0644, d_tracer,
6516 tr, &tracing_iter_fops);
6517
6518 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006519 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006520
6521 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006522 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006523
6524 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006525 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006526
6527 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6528 tr, &tracing_total_entries_fops);
6529
Wang YanQing238ae932013-05-26 16:52:01 +08006530 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006531 tr, &tracing_free_buffer_fops);
6532
6533 trace_create_file("trace_marker", 0220, d_tracer,
6534 tr, &tracing_mark_fops);
6535
6536 trace_create_file("trace_clock", 0644, d_tracer, tr,
6537 &trace_clock_fops);
6538
6539 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006540 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006541
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006542#ifdef CONFIG_TRACER_MAX_TRACE
6543 trace_create_file("tracing_max_latency", 0644, d_tracer,
6544 &tr->max_latency, &tracing_max_lat_fops);
6545#endif
6546
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006547 if (ftrace_create_function_files(tr, d_tracer))
6548 WARN(1, "Could not allocate function filter files");
6549
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006550#ifdef CONFIG_TRACER_SNAPSHOT
6551 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006552 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006553#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006554
6555 for_each_tracing_cpu(cpu)
6556 tracing_init_debugfs_percpu(tr, cpu);
6557
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006558}
6559
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006560static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006561{
6562 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006563
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006564 trace_access_lock_init();
6565
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006566 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006567 if (!d_tracer)
6568 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006569
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006570 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006571
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006572 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006573 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006574
Li Zefan339ae5d2009-04-17 10:34:30 +08006575 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006576 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006577
Avadh Patel69abe6a2009-04-10 16:04:48 -04006578 trace_create_file("saved_cmdlines", 0444, d_tracer,
6579 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006580
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006581 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6582 NULL, &tracing_saved_cmdlines_size_fops);
6583
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006584#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006585 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6586 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006587#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006588
Steven Rostedt277ba042012-08-03 16:10:49 -04006589 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006590
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006591 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006592
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006593 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006594}
6595
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006596static int trace_panic_handler(struct notifier_block *this,
6597 unsigned long event, void *unused)
6598{
Steven Rostedt944ac422008-10-23 19:26:08 -04006599 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006600 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006601 return NOTIFY_OK;
6602}
6603
6604static struct notifier_block trace_panic_notifier = {
6605 .notifier_call = trace_panic_handler,
6606 .next = NULL,
6607 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6608};
6609
6610static int trace_die_handler(struct notifier_block *self,
6611 unsigned long val,
6612 void *data)
6613{
6614 switch (val) {
6615 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006616 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006617 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006618 break;
6619 default:
6620 break;
6621 }
6622 return NOTIFY_OK;
6623}
6624
6625static struct notifier_block trace_die_notifier = {
6626 .notifier_call = trace_die_handler,
6627 .priority = 200
6628};
6629
6630/*
6631 * printk is set to max of 1024, we really don't need it that big.
6632 * Nothing should be printing 1000 characters anyway.
6633 */
6634#define TRACE_MAX_PRINT 1000
6635
6636/*
6637 * Define here KERN_TRACE so that we have one place to modify
6638 * it if we decide to change what log level the ftrace dump
6639 * should be at.
6640 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006641#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006642
Jason Wessel955b61e2010-08-05 09:22:23 -05006643void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006644trace_printk_seq(struct trace_seq *s)
6645{
6646 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006647 if (s->len >= TRACE_MAX_PRINT)
6648 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006649
6650 /* should be zero ended, but we are paranoid. */
6651 s->buffer[s->len] = 0;
6652
6653 printk(KERN_TRACE "%s", s->buffer);
6654
Steven Rostedtf9520752009-03-02 14:04:40 -05006655 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006656}
6657
Jason Wessel955b61e2010-08-05 09:22:23 -05006658void trace_init_global_iter(struct trace_iterator *iter)
6659{
6660 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006661 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006662 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006663 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006664
6665 if (iter->trace && iter->trace->open)
6666 iter->trace->open(iter);
6667
6668 /* Annotate start of buffers if we had overruns */
6669 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6670 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6671
6672 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6673 if (trace_clocks[iter->tr->clock_id].in_ns)
6674 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006675}
6676
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006677void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006678{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006679 /* use static because iter can be a bit big for the stack */
6680 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006681 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006682 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006683 unsigned long flags;
6684 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006685
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006686 /* Only allow one dump user at a time. */
6687 if (atomic_inc_return(&dump_running) != 1) {
6688 atomic_dec(&dump_running);
6689 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006690 }
6691
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006692 /*
6693 * Always turn off tracing when we dump.
6694 * We don't need to show trace output of what happens
6695 * between multiple crashes.
6696 *
6697 * If the user does a sysrq-z, then they can re-enable
6698 * tracing with echo 1 > tracing_on.
6699 */
6700 tracing_off();
6701
6702 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006703
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006704 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006705 trace_init_global_iter(&iter);
6706
Steven Rostedtd7690412008-10-01 00:29:53 -04006707 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006708 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006709 }
6710
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006711 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6712
Török Edwinb54d3de2008-11-22 13:28:48 +02006713 /* don't look at user memory in panic mode */
6714 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6715
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006716 switch (oops_dump_mode) {
6717 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006718 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006719 break;
6720 case DUMP_ORIG:
6721 iter.cpu_file = raw_smp_processor_id();
6722 break;
6723 case DUMP_NONE:
6724 goto out_enable;
6725 default:
6726 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006727 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006728 }
6729
6730 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006731
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006732 /* Did function tracer already get disabled? */
6733 if (ftrace_is_dead()) {
6734 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6735 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6736 }
6737
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006738 /*
6739 * We need to stop all tracing on all CPUS to read the
6740 * the next buffer. This is a bit expensive, but is
6741 * not done often. We fill all what we can read,
6742 * and then release the locks again.
6743 */
6744
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006745 while (!trace_empty(&iter)) {
6746
6747 if (!cnt)
6748 printk(KERN_TRACE "---------------------------------\n");
6749
6750 cnt++;
6751
6752 /* reset all but tr, trace, and overruns */
6753 memset(&iter.seq, 0,
6754 sizeof(struct trace_iterator) -
6755 offsetof(struct trace_iterator, seq));
6756 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6757 iter.pos = -1;
6758
Jason Wessel955b61e2010-08-05 09:22:23 -05006759 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006760 int ret;
6761
6762 ret = print_trace_line(&iter);
6763 if (ret != TRACE_TYPE_NO_CONSUME)
6764 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006765 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006766 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006767
6768 trace_printk_seq(&iter.seq);
6769 }
6770
6771 if (!cnt)
6772 printk(KERN_TRACE " (ftrace buffer empty)\n");
6773 else
6774 printk(KERN_TRACE "---------------------------------\n");
6775
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006776 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006777 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006778
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006779 for_each_tracing_cpu(cpu) {
6780 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006781 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006782 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006783 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006784}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006785EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006786
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006787__init static int tracer_alloc_buffers(void)
6788{
Steven Rostedt73c51622009-03-11 13:42:01 -04006789 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306790 int ret = -ENOMEM;
6791
David Sharp750912f2010-12-08 13:46:47 -08006792
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306793 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6794 goto out;
6795
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006796 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306797 goto out_free_buffer_mask;
6798
Steven Rostedt07d777f2011-09-22 14:01:55 -04006799 /* Only allocate trace_printk buffers if a trace_printk exists */
6800 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006801 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006802 trace_printk_init_buffers();
6803
Steven Rostedt73c51622009-03-11 13:42:01 -04006804 /* To save memory, keep the ring buffer size to its minimum */
6805 if (ring_buffer_expanded)
6806 ring_buf_size = trace_buf_size;
6807 else
6808 ring_buf_size = 1;
6809
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306810 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006811 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006812
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006813 raw_spin_lock_init(&global_trace.start_lock);
6814
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006815 /* Used for event triggers */
6816 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6817 if (!temp_buffer)
6818 goto out_free_cpumask;
6819
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006820 if (trace_create_savedcmd() < 0)
6821 goto out_free_temp_buffer;
6822
Steven Rostedtab464282008-05-12 21:21:00 +02006823 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006824 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006825 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6826 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006827 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006828 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006829
Steven Rostedt499e5472012-02-22 15:50:28 -05006830 if (global_trace.buffer_disabled)
6831 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006832
Steven Rostedte1e232c2014-02-10 23:38:46 -05006833 if (trace_boot_clock) {
6834 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6835 if (ret < 0)
6836 pr_warning("Trace clock %s not defined, going back to default\n",
6837 trace_boot_clock);
6838 }
6839
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006840 /*
6841 * register_tracer() might reference current_trace, so it
6842 * needs to be set before we register anything. This is
6843 * just a bootstrap of current_trace anyway.
6844 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006845 global_trace.current_trace = &nop_trace;
6846
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006847 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6848
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006849 ftrace_init_global_array_ops(&global_trace);
6850
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006851 register_tracer(&nop_trace);
6852
Steven Rostedt60a11772008-05-12 21:20:44 +02006853 /* All seems OK, enable tracing */
6854 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006855
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006856 atomic_notifier_chain_register(&panic_notifier_list,
6857 &trace_panic_notifier);
6858
6859 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006860
Steven Rostedtae63b312012-05-03 23:09:03 -04006861 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6862
6863 INIT_LIST_HEAD(&global_trace.systems);
6864 INIT_LIST_HEAD(&global_trace.events);
6865 list_add(&global_trace.list, &ftrace_trace_arrays);
6866
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006867 while (trace_boot_options) {
6868 char *option;
6869
6870 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006871 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006872 }
6873
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006874 register_snapshot_cmd();
6875
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006876 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006877
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006878out_free_savedcmd:
6879 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006880out_free_temp_buffer:
6881 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306882out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006883 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306884out_free_buffer_mask:
6885 free_cpumask_var(tracing_buffer_mask);
6886out:
6887 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006888}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006889
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006890void __init trace_init(void)
6891{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05006892 if (tracepoint_printk) {
6893 tracepoint_print_iter =
6894 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
6895 if (WARN_ON(!tracepoint_print_iter))
6896 tracepoint_printk = 0;
6897 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006898 tracer_alloc_buffers();
6899 init_ftrace_syscalls();
6900 trace_event_init();
6901}
6902
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006903__init static int clear_boot_tracer(void)
6904{
6905 /*
6906 * The default tracer at boot buffer is an init section.
6907 * This function is called in lateinit. If we did not
6908 * find the boot tracer, then clear it out, to prevent
6909 * later registration from accessing the buffer that is
6910 * about to be freed.
6911 */
6912 if (!default_bootup_tracer)
6913 return 0;
6914
6915 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6916 default_bootup_tracer);
6917 default_bootup_tracer = NULL;
6918
6919 return 0;
6920}
6921
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006922fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006923late_initcall(clear_boot_tracer);