blob: acd27555dc5be84f937b365ff323f72bc2752cb4 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050066/* Pipe tracepoints to printk */
67struct trace_iterator *tracepoint_print_iter;
68int tracepoint_printk;
69
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010070/* For tracers that don't implement custom flags */
71static struct tracer_opt dummy_tracer_opt[] = {
72 { }
73};
74
75static struct tracer_flags dummy_tracer_flags = {
76 .val = 0,
77 .opts = dummy_tracer_opt
78};
79
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050080static int
81dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010082{
83 return 0;
84}
Steven Rostedt0f048702008-11-05 16:05:44 -050085
86/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040087 * To prevent the comm cache from being overwritten when no
88 * tracing is active, only save the comm when a trace event
89 * occurred.
90 */
91static DEFINE_PER_CPU(bool, trace_cmdline_save);
92
93/*
Steven Rostedt0f048702008-11-05 16:05:44 -050094 * Kill all tracing for good (never come back).
95 * It is initialized to 1 but will turn to zero if the initialization
96 * of the tracer is successful. But that is the only place that sets
97 * this back to zero.
98 */
Hannes Eder4fd27352009-02-10 19:44:12 +010099static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500100
Christoph Lameter9288f992009-10-07 19:17:45 -0400101DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -0400102
Jason Wessel955b61e2010-08-05 09:22:23 -0500103cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200104
Steven Rostedt944ac422008-10-23 19:26:08 -0400105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400119 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200120
121enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400122
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500126static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500127
Li Zefanee6c2c12009-09-18 14:06:47 +0800128#define MAX_TRACER_SIZE 100
129static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500130static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500132static bool allocate_snapshot;
133
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200134static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100135{
Chen Gang67012ab2013-04-08 12:06:44 +0800136 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500137 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400138 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500139 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100140 return 1;
141}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200142__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100143
Steven Rostedt944ac422008-10-23 19:26:08 -0400144static int __init set_ftrace_dump_on_oops(char *str)
145{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200146 if (*str++ != '=' || !*str) {
147 ftrace_dump_on_oops = DUMP_ALL;
148 return 1;
149 }
150
151 if (!strcmp("orig_cpu", str)) {
152 ftrace_dump_on_oops = DUMP_ORIG;
153 return 1;
154 }
155
156 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400157}
158__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200159
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400160static int __init stop_trace_on_warning(char *str)
161{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200162 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
163 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400164 return 1;
165}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200166__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400167
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400168static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500169{
170 allocate_snapshot = true;
171 /* We also need the main ring buffer expanded */
172 ring_buffer_expanded = true;
173 return 1;
174}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400175__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500176
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400177
178static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
179static char *trace_boot_options __initdata;
180
181static int __init set_trace_boot_options(char *str)
182{
Chen Gang67012ab2013-04-08 12:06:44 +0800183 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400184 trace_boot_options = trace_boot_options_buf;
185 return 0;
186}
187__setup("trace_options=", set_trace_boot_options);
188
Steven Rostedte1e232c2014-02-10 23:38:46 -0500189static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
190static char *trace_boot_clock __initdata;
191
192static int __init set_trace_boot_clock(char *str)
193{
194 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
195 trace_boot_clock = trace_boot_clock_buf;
196 return 0;
197}
198__setup("trace_clock=", set_trace_boot_clock);
199
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500200static int __init set_tracepoint_printk(char *str)
201{
202 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
203 tracepoint_printk = 1;
204 return 1;
205}
206__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400207
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800208unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200209{
210 nsec += 500;
211 do_div(nsec, 1000);
212 return nsec;
213}
214
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200215/*
216 * The global_trace is the descriptor that holds the tracing
217 * buffers for the live tracing. For each CPU, it contains
218 * a link list of pages that will store trace entries. The
219 * page descriptor of the pages in the memory is used to hold
220 * the link list by linking the lru item in the page descriptor
221 * to each of the pages in the buffer per CPU.
222 *
223 * For each active CPU there is a data field that holds the
224 * pages for the buffer for that CPU. Each CPU has the same number
225 * of pages allocated for its buffer.
226 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200227static struct trace_array global_trace;
228
Steven Rostedtae63b312012-05-03 23:09:03 -0400229LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200230
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400231int trace_array_get(struct trace_array *this_tr)
232{
233 struct trace_array *tr;
234 int ret = -ENODEV;
235
236 mutex_lock(&trace_types_lock);
237 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
238 if (tr == this_tr) {
239 tr->ref++;
240 ret = 0;
241 break;
242 }
243 }
244 mutex_unlock(&trace_types_lock);
245
246 return ret;
247}
248
249static void __trace_array_put(struct trace_array *this_tr)
250{
251 WARN_ON(!this_tr->ref);
252 this_tr->ref--;
253}
254
255void trace_array_put(struct trace_array *this_tr)
256{
257 mutex_lock(&trace_types_lock);
258 __trace_array_put(this_tr);
259 mutex_unlock(&trace_types_lock);
260}
261
Tom Zanussif306cc82013-10-24 08:34:17 -0500262int filter_check_discard(struct ftrace_event_file *file, void *rec,
263 struct ring_buffer *buffer,
264 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500265{
Tom Zanussif306cc82013-10-24 08:34:17 -0500266 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
267 !filter_match_preds(file->filter, rec)) {
268 ring_buffer_discard_commit(buffer, event);
269 return 1;
270 }
271
272 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500273}
Tom Zanussif306cc82013-10-24 08:34:17 -0500274EXPORT_SYMBOL_GPL(filter_check_discard);
275
276int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
277 struct ring_buffer *buffer,
278 struct ring_buffer_event *event)
279{
280 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
281 !filter_match_preds(call->filter, rec)) {
282 ring_buffer_discard_commit(buffer, event);
283 return 1;
284 }
285
286 return 0;
287}
288EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500289
Fabian Frederickad1438a2014-04-17 21:44:42 +0200290static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400291{
292 u64 ts;
293
294 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700295 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400296 return trace_clock_local();
297
Alexander Z Lam94571582013-08-02 18:36:16 -0700298 ts = ring_buffer_time_stamp(buf->buffer, cpu);
299 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400300
301 return ts;
302}
303
Alexander Z Lam94571582013-08-02 18:36:16 -0700304cycle_t ftrace_now(int cpu)
305{
306 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
307}
308
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400309/**
310 * tracing_is_enabled - Show if global_trace has been disabled
311 *
312 * Shows if the global trace has been enabled or not. It uses the
313 * mirror flag "buffer_disabled" to be used in fast paths such as for
314 * the irqsoff tracer. But it may be inaccurate due to races. If you
315 * need to know the accurate state, use tracing_is_on() which is a little
316 * slower, but accurate.
317 */
Steven Rostedt90369902008-11-05 16:05:44 -0500318int tracing_is_enabled(void)
319{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400320 /*
321 * For quick access (irqsoff uses this in fast path), just
322 * return the mirror variable of the state of the ring buffer.
323 * It's a little racy, but we don't really care.
324 */
325 smp_rmb();
326 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500327}
328
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200329/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400330 * trace_buf_size is the size in bytes that is allocated
331 * for a buffer. Note, the number of bytes is always rounded
332 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400333 *
334 * This number is purposely set to a low number of 16384.
335 * If the dump on oops happens, it will be much appreciated
336 * to not have to wait for all that output. Anyway this can be
337 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400339#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400340
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400341static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200342
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200343/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200344static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200345
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200346/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200347 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200348 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700349DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200350
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800351/*
352 * serialize the access of the ring buffer
353 *
354 * ring buffer serializes readers, but it is low level protection.
355 * The validity of the events (which returns by ring_buffer_peek() ..etc)
356 * are not protected by ring buffer.
357 *
358 * The content of events may become garbage if we allow other process consumes
359 * these events concurrently:
360 * A) the page of the consumed events may become a normal page
361 * (not reader page) in ring buffer, and this page will be rewrited
362 * by events producer.
363 * B) The page of the consumed events may become a page for splice_read,
364 * and this page will be returned to system.
365 *
366 * These primitives allow multi process access to different cpu ring buffer
367 * concurrently.
368 *
369 * These primitives don't distinguish read-only and read-consume access.
370 * Multi read-only access are also serialized.
371 */
372
373#ifdef CONFIG_SMP
374static DECLARE_RWSEM(all_cpu_access_lock);
375static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
376
377static inline void trace_access_lock(int cpu)
378{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500379 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800380 /* gain it for accessing the whole ring buffer. */
381 down_write(&all_cpu_access_lock);
382 } else {
383 /* gain it for accessing a cpu ring buffer. */
384
Steven Rostedtae3b5092013-01-23 15:22:59 -0500385 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800386 down_read(&all_cpu_access_lock);
387
388 /* Secondly block other access to this @cpu ring buffer. */
389 mutex_lock(&per_cpu(cpu_access_lock, cpu));
390 }
391}
392
393static inline void trace_access_unlock(int cpu)
394{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500395 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800396 up_write(&all_cpu_access_lock);
397 } else {
398 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
399 up_read(&all_cpu_access_lock);
400 }
401}
402
403static inline void trace_access_lock_init(void)
404{
405 int cpu;
406
407 for_each_possible_cpu(cpu)
408 mutex_init(&per_cpu(cpu_access_lock, cpu));
409}
410
411#else
412
413static DEFINE_MUTEX(access_lock);
414
415static inline void trace_access_lock(int cpu)
416{
417 (void)cpu;
418 mutex_lock(&access_lock);
419}
420
421static inline void trace_access_unlock(int cpu)
422{
423 (void)cpu;
424 mutex_unlock(&access_lock);
425}
426
427static inline void trace_access_lock_init(void)
428{
429}
430
431#endif
432
Steven Rostedtee6bce52008-11-12 17:52:37 -0500433/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500434unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400435 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500436 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400437 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700438
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400439static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400440{
441 if (tr->trace_buffer.buffer)
442 ring_buffer_record_on(tr->trace_buffer.buffer);
443 /*
444 * This flag is looked at when buffers haven't been allocated
445 * yet, or by some tracers (like irqsoff), that just want to
446 * know if the ring buffer has been disabled, but it can handle
447 * races of where it gets disabled but we still do a record.
448 * As the check is in the fast path of the tracers, it is more
449 * important to be fast than accurate.
450 */
451 tr->buffer_disabled = 0;
452 /* Make the flag seen by readers */
453 smp_wmb();
454}
455
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200456/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500457 * tracing_on - enable tracing buffers
458 *
459 * This function enables tracing buffers that may have been
460 * disabled with tracing_off.
461 */
462void tracing_on(void)
463{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400464 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500465}
466EXPORT_SYMBOL_GPL(tracing_on);
467
468/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500469 * __trace_puts - write a constant string into the trace buffer.
470 * @ip: The address of the caller
471 * @str: The constant string to write
472 * @size: The size of the string.
473 */
474int __trace_puts(unsigned long ip, const char *str, int size)
475{
476 struct ring_buffer_event *event;
477 struct ring_buffer *buffer;
478 struct print_entry *entry;
479 unsigned long irq_flags;
480 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800481 int pc;
482
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800483 if (!(trace_flags & TRACE_ITER_PRINTK))
484 return 0;
485
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800486 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500487
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500488 if (unlikely(tracing_selftest_running || tracing_disabled))
489 return 0;
490
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500491 alloc = sizeof(*entry) + size + 2; /* possible \n added */
492
493 local_save_flags(irq_flags);
494 buffer = global_trace.trace_buffer.buffer;
495 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800496 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500497 if (!event)
498 return 0;
499
500 entry = ring_buffer_event_data(event);
501 entry->ip = ip;
502
503 memcpy(&entry->buf, str, size);
504
505 /* Add a newline if necessary */
506 if (entry->buf[size - 1] != '\n') {
507 entry->buf[size] = '\n';
508 entry->buf[size + 1] = '\0';
509 } else
510 entry->buf[size] = '\0';
511
512 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800513 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500514
515 return size;
516}
517EXPORT_SYMBOL_GPL(__trace_puts);
518
519/**
520 * __trace_bputs - write the pointer to a constant string into trace buffer
521 * @ip: The address of the caller
522 * @str: The constant string to write to the buffer to
523 */
524int __trace_bputs(unsigned long ip, const char *str)
525{
526 struct ring_buffer_event *event;
527 struct ring_buffer *buffer;
528 struct bputs_entry *entry;
529 unsigned long irq_flags;
530 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800531 int pc;
532
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800533 if (!(trace_flags & TRACE_ITER_PRINTK))
534 return 0;
535
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800536 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500537
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500538 if (unlikely(tracing_selftest_running || tracing_disabled))
539 return 0;
540
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500541 local_save_flags(irq_flags);
542 buffer = global_trace.trace_buffer.buffer;
543 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800544 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500545 if (!event)
546 return 0;
547
548 entry = ring_buffer_event_data(event);
549 entry->ip = ip;
550 entry->str = str;
551
552 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800553 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500554
555 return 1;
556}
557EXPORT_SYMBOL_GPL(__trace_bputs);
558
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500559#ifdef CONFIG_TRACER_SNAPSHOT
560/**
561 * trace_snapshot - take a snapshot of the current buffer.
562 *
563 * This causes a swap between the snapshot buffer and the current live
564 * tracing buffer. You can use this to take snapshots of the live
565 * trace when some condition is triggered, but continue to trace.
566 *
567 * Note, make sure to allocate the snapshot with either
568 * a tracing_snapshot_alloc(), or by doing it manually
569 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
570 *
571 * If the snapshot buffer is not allocated, it will stop tracing.
572 * Basically making a permanent snapshot.
573 */
574void tracing_snapshot(void)
575{
576 struct trace_array *tr = &global_trace;
577 struct tracer *tracer = tr->current_trace;
578 unsigned long flags;
579
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500580 if (in_nmi()) {
581 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
582 internal_trace_puts("*** snapshot is being ignored ***\n");
583 return;
584 }
585
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500586 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500587 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
588 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500589 tracing_off();
590 return;
591 }
592
593 /* Note, snapshot can not be used when the tracer uses it */
594 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500595 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
596 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500597 return;
598 }
599
600 local_irq_save(flags);
601 update_max_tr(tr, current, smp_processor_id());
602 local_irq_restore(flags);
603}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500604EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500605
606static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
607 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400608static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
609
610static int alloc_snapshot(struct trace_array *tr)
611{
612 int ret;
613
614 if (!tr->allocated_snapshot) {
615
616 /* allocate spare buffer */
617 ret = resize_buffer_duplicate_size(&tr->max_buffer,
618 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
619 if (ret < 0)
620 return ret;
621
622 tr->allocated_snapshot = true;
623 }
624
625 return 0;
626}
627
Fabian Frederickad1438a2014-04-17 21:44:42 +0200628static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400629{
630 /*
631 * We don't free the ring buffer. instead, resize it because
632 * The max_tr ring buffer has some state (e.g. ring->clock) and
633 * we want preserve it.
634 */
635 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
636 set_buffer_entries(&tr->max_buffer, 1);
637 tracing_reset_online_cpus(&tr->max_buffer);
638 tr->allocated_snapshot = false;
639}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500640
641/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500642 * tracing_alloc_snapshot - allocate snapshot buffer.
643 *
644 * This only allocates the snapshot buffer if it isn't already
645 * allocated - it doesn't also take a snapshot.
646 *
647 * This is meant to be used in cases where the snapshot buffer needs
648 * to be set up for events that can't sleep but need to be able to
649 * trigger a snapshot.
650 */
651int tracing_alloc_snapshot(void)
652{
653 struct trace_array *tr = &global_trace;
654 int ret;
655
656 ret = alloc_snapshot(tr);
657 WARN_ON(ret < 0);
658
659 return ret;
660}
661EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
662
663/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
665 *
666 * This is similar to trace_snapshot(), but it will allocate the
667 * snapshot buffer if it isn't already allocated. Use this only
668 * where it is safe to sleep, as the allocation may sleep.
669 *
670 * This causes a swap between the snapshot buffer and the current live
671 * tracing buffer. You can use this to take snapshots of the live
672 * trace when some condition is triggered, but continue to trace.
673 */
674void tracing_snapshot_alloc(void)
675{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500676 int ret;
677
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500678 ret = tracing_alloc_snapshot();
679 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400680 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500681
682 tracing_snapshot();
683}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500684EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500685#else
686void tracing_snapshot(void)
687{
688 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
689}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500690EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500691int tracing_alloc_snapshot(void)
692{
693 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
694 return -ENODEV;
695}
696EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500697void tracing_snapshot_alloc(void)
698{
699 /* Give warning */
700 tracing_snapshot();
701}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500702EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500703#endif /* CONFIG_TRACER_SNAPSHOT */
704
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400705static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400706{
707 if (tr->trace_buffer.buffer)
708 ring_buffer_record_off(tr->trace_buffer.buffer);
709 /*
710 * This flag is looked at when buffers haven't been allocated
711 * yet, or by some tracers (like irqsoff), that just want to
712 * know if the ring buffer has been disabled, but it can handle
713 * races of where it gets disabled but we still do a record.
714 * As the check is in the fast path of the tracers, it is more
715 * important to be fast than accurate.
716 */
717 tr->buffer_disabled = 1;
718 /* Make the flag seen by readers */
719 smp_wmb();
720}
721
Steven Rostedt499e5472012-02-22 15:50:28 -0500722/**
723 * tracing_off - turn off tracing buffers
724 *
725 * This function stops the tracing buffers from recording data.
726 * It does not disable any overhead the tracers themselves may
727 * be causing. This function simply causes all recording to
728 * the ring buffers to fail.
729 */
730void tracing_off(void)
731{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400732 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500733}
734EXPORT_SYMBOL_GPL(tracing_off);
735
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400736void disable_trace_on_warning(void)
737{
738 if (__disable_trace_on_warning)
739 tracing_off();
740}
741
Steven Rostedt499e5472012-02-22 15:50:28 -0500742/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400743 * tracer_tracing_is_on - show real state of ring buffer enabled
744 * @tr : the trace array to know if ring buffer is enabled
745 *
746 * Shows real state of the ring buffer if it is enabled or not.
747 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400748static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400749{
750 if (tr->trace_buffer.buffer)
751 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
752 return !tr->buffer_disabled;
753}
754
Steven Rostedt499e5472012-02-22 15:50:28 -0500755/**
756 * tracing_is_on - show state of ring buffers enabled
757 */
758int tracing_is_on(void)
759{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400760 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500761}
762EXPORT_SYMBOL_GPL(tracing_is_on);
763
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400764static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200765{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400766 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200767
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200768 if (!str)
769 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800770 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200771 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800772 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200773 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400774 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200775 return 1;
776}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400777__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200778
Tim Bird0e950172010-02-25 15:36:43 -0800779static int __init set_tracing_thresh(char *str)
780{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800781 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800782 int ret;
783
784 if (!str)
785 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200786 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800787 if (ret < 0)
788 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800789 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800790 return 1;
791}
792__setup("tracing_thresh=", set_tracing_thresh);
793
Steven Rostedt57f50be2008-05-12 21:20:44 +0200794unsigned long nsecs_to_usecs(unsigned long nsecs)
795{
796 return nsecs / 1000;
797}
798
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200799/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200800static const char *trace_options[] = {
801 "print-parent",
802 "sym-offset",
803 "sym-addr",
804 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200805 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200806 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200807 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200808 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200809 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100810 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500811 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500812 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500813 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200814 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200815 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100816 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200817 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500818 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400819 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400820 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800821 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800822 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400823 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500824 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700825 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400826 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200827 NULL
828};
829
Zhaolei5079f322009-08-25 16:12:56 +0800830static struct {
831 u64 (*func)(void);
832 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800833 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800834} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000835 { trace_clock_local, "local", 1 },
836 { trace_clock_global, "global", 1 },
837 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700838 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000839 { trace_clock, "perf", 1 },
840 { ktime_get_mono_fast_ns, "mono", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800841 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800842};
843
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200844/*
845 * trace_parser_get_init - gets the buffer for trace parser
846 */
847int trace_parser_get_init(struct trace_parser *parser, int size)
848{
849 memset(parser, 0, sizeof(*parser));
850
851 parser->buffer = kmalloc(size, GFP_KERNEL);
852 if (!parser->buffer)
853 return 1;
854
855 parser->size = size;
856 return 0;
857}
858
859/*
860 * trace_parser_put - frees the buffer for trace parser
861 */
862void trace_parser_put(struct trace_parser *parser)
863{
864 kfree(parser->buffer);
865}
866
867/*
868 * trace_get_user - reads the user input string separated by space
869 * (matched by isspace(ch))
870 *
871 * For each string found the 'struct trace_parser' is updated,
872 * and the function returns.
873 *
874 * Returns number of bytes read.
875 *
876 * See kernel/trace/trace.h for 'struct trace_parser' details.
877 */
878int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
879 size_t cnt, loff_t *ppos)
880{
881 char ch;
882 size_t read = 0;
883 ssize_t ret;
884
885 if (!*ppos)
886 trace_parser_clear(parser);
887
888 ret = get_user(ch, ubuf++);
889 if (ret)
890 goto out;
891
892 read++;
893 cnt--;
894
895 /*
896 * The parser is not finished with the last write,
897 * continue reading the user input without skipping spaces.
898 */
899 if (!parser->cont) {
900 /* skip white space */
901 while (cnt && isspace(ch)) {
902 ret = get_user(ch, ubuf++);
903 if (ret)
904 goto out;
905 read++;
906 cnt--;
907 }
908
909 /* only spaces were written */
910 if (isspace(ch)) {
911 *ppos += read;
912 ret = read;
913 goto out;
914 }
915
916 parser->idx = 0;
917 }
918
919 /* read the non-space input */
920 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800921 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200922 parser->buffer[parser->idx++] = ch;
923 else {
924 ret = -EINVAL;
925 goto out;
926 }
927 ret = get_user(ch, ubuf++);
928 if (ret)
929 goto out;
930 read++;
931 cnt--;
932 }
933
934 /* We either got finished input or we have to wait for another call. */
935 if (isspace(ch)) {
936 parser->buffer[parser->idx] = 0;
937 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400938 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200939 parser->cont = true;
940 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400941 } else {
942 ret = -EINVAL;
943 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200944 }
945
946 *ppos += read;
947 ret = read;
948
949out:
950 return ret;
951}
952
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400953/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200954static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200955{
956 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200957
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500958 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200959 return -EBUSY;
960
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500961 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200962 if (cnt > len)
963 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400964 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200965
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400966 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200967 return cnt;
968}
969
Tim Bird0e950172010-02-25 15:36:43 -0800970unsigned long __read_mostly tracing_thresh;
971
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400972#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400973/*
974 * Copy the new maximum trace into the separate maximum-trace
975 * structure. (this way the maximum trace is permanently saved,
976 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
977 */
978static void
979__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
980{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500981 struct trace_buffer *trace_buf = &tr->trace_buffer;
982 struct trace_buffer *max_buf = &tr->max_buffer;
983 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
984 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400985
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500986 max_buf->cpu = cpu;
987 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400988
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500989 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400990 max_data->critical_start = data->critical_start;
991 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400992
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300993 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400994 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400995 /*
996 * If tsk == current, then use current_uid(), as that does not use
997 * RCU. The irq tracer can be called out of RCU scope.
998 */
999 if (tsk == current)
1000 max_data->uid = current_uid();
1001 else
1002 max_data->uid = task_uid(tsk);
1003
Steven Rostedt8248ac02009-09-02 12:27:41 -04001004 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1005 max_data->policy = tsk->policy;
1006 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001007
1008 /* record this tasks comm */
1009 tracing_record_cmdline(tsk);
1010}
1011
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001012/**
1013 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1014 * @tr: tracer
1015 * @tsk: the task with the latency
1016 * @cpu: The cpu that initiated the trace.
1017 *
1018 * Flip the buffers between the @tr and the max_tr and record information
1019 * about which task was the cause of this latency.
1020 */
Ingo Molnare309b412008-05-12 21:20:51 +02001021void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001022update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1023{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001024 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001025
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001026 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001027 return;
1028
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001029 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001030
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001031 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001032 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001033 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001034 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001035 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001036
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001037 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001038
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001039 buf = tr->trace_buffer.buffer;
1040 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1041 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001043 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001044 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001045}
1046
1047/**
1048 * update_max_tr_single - only copy one trace over, and reset the rest
1049 * @tr - tracer
1050 * @tsk - task with the latency
1051 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001052 *
1053 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001054 */
Ingo Molnare309b412008-05-12 21:20:51 +02001055void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001056update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1057{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001058 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001059
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001060 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001061 return;
1062
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001063 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001064 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001065 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001066 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001067 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001068 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001069
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001070 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001071
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001072 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001073
Steven Rostedte8165db2009-09-03 19:13:05 -04001074 if (ret == -EBUSY) {
1075 /*
1076 * We failed to swap the buffer due to a commit taking
1077 * place on this CPU. We fail to record, but we reset
1078 * the max trace buffer (no one writes directly to it)
1079 * and flag that it failed.
1080 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001081 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001082 "Failed to swap buffers due to commit in progress\n");
1083 }
1084
Steven Rostedte8165db2009-09-03 19:13:05 -04001085 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001086
1087 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001088 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001089}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001090#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001091
Rabin Vincente30f53a2014-11-10 19:46:34 +01001092static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001093{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001094 /* Iterators are static, they should be filled or empty */
1095 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001096 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001097
Rabin Vincente30f53a2014-11-10 19:46:34 +01001098 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1099 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001100}
1101
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001102#ifdef CONFIG_FTRACE_STARTUP_TEST
1103static int run_tracer_selftest(struct tracer *type)
1104{
1105 struct trace_array *tr = &global_trace;
1106 struct tracer *saved_tracer = tr->current_trace;
1107 int ret;
1108
1109 if (!type->selftest || tracing_selftest_disabled)
1110 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001111
1112 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001113 * Run a selftest on this tracer.
1114 * Here we reset the trace buffer, and set the current
1115 * tracer to be this tracer. The tracer can then run some
1116 * internal tracing to verify that everything is in order.
1117 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001118 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001119 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001120
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001121 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001122
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001123#ifdef CONFIG_TRACER_MAX_TRACE
1124 if (type->use_max_tr) {
1125 /* If we expanded the buffers, make sure the max is expanded too */
1126 if (ring_buffer_expanded)
1127 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1128 RING_BUFFER_ALL_CPUS);
1129 tr->allocated_snapshot = true;
1130 }
1131#endif
1132
1133 /* the test is responsible for initializing and enabling */
1134 pr_info("Testing tracer %s: ", type->name);
1135 ret = type->selftest(type, tr);
1136 /* the test is responsible for resetting too */
1137 tr->current_trace = saved_tracer;
1138 if (ret) {
1139 printk(KERN_CONT "FAILED!\n");
1140 /* Add the warning after printing 'FAILED' */
1141 WARN_ON(1);
1142 return -1;
1143 }
1144 /* Only reset on passing, to avoid touching corrupted buffers */
1145 tracing_reset_online_cpus(&tr->trace_buffer);
1146
1147#ifdef CONFIG_TRACER_MAX_TRACE
1148 if (type->use_max_tr) {
1149 tr->allocated_snapshot = false;
1150
1151 /* Shrink the max buffer again */
1152 if (ring_buffer_expanded)
1153 ring_buffer_resize(tr->max_buffer.buffer, 1,
1154 RING_BUFFER_ALL_CPUS);
1155 }
1156#endif
1157
1158 printk(KERN_CONT "PASSED\n");
1159 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001160}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001161#else
1162static inline int run_tracer_selftest(struct tracer *type)
1163{
1164 return 0;
1165}
1166#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001167
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001168/**
1169 * register_tracer - register a tracer with the ftrace system.
1170 * @type - the plugin for the tracer
1171 *
1172 * Register a new plugin tracer.
1173 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001174int register_tracer(struct tracer *type)
1175{
1176 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001177 int ret = 0;
1178
1179 if (!type->name) {
1180 pr_info("Tracer must have a name\n");
1181 return -1;
1182 }
1183
Dan Carpenter24a461d2010-07-10 12:06:44 +02001184 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001185 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1186 return -1;
1187 }
1188
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001189 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001190
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001191 tracing_selftest_running = true;
1192
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001193 for (t = trace_types; t; t = t->next) {
1194 if (strcmp(type->name, t->name) == 0) {
1195 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001196 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001197 type->name);
1198 ret = -1;
1199 goto out;
1200 }
1201 }
1202
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001203 if (!type->set_flag)
1204 type->set_flag = &dummy_set_flag;
1205 if (!type->flags)
1206 type->flags = &dummy_tracer_flags;
1207 else
1208 if (!type->flags->opts)
1209 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001210
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001211 ret = run_tracer_selftest(type);
1212 if (ret < 0)
1213 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001214
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001215 type->next = trace_types;
1216 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001217
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001218 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001219 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001220 mutex_unlock(&trace_types_lock);
1221
Steven Rostedtdac74942009-02-05 01:13:38 -05001222 if (ret || !default_bootup_tracer)
1223 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001224
Li Zefanee6c2c12009-09-18 14:06:47 +08001225 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001226 goto out_unlock;
1227
1228 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1229 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001230 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001231 default_bootup_tracer = NULL;
1232 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001233 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001234#ifdef CONFIG_FTRACE_STARTUP_TEST
1235 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1236 type->name);
1237#endif
1238
1239 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001240 return ret;
1241}
1242
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001243void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001244{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001245 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001246
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001247 if (!buffer)
1248 return;
1249
Steven Rostedtf6339032009-09-04 12:35:16 -04001250 ring_buffer_record_disable(buffer);
1251
1252 /* Make sure all commits have finished */
1253 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001254 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001255
1256 ring_buffer_record_enable(buffer);
1257}
1258
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001259void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001260{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001261 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001262 int cpu;
1263
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001264 if (!buffer)
1265 return;
1266
Steven Rostedt621968c2009-09-04 12:02:35 -04001267 ring_buffer_record_disable(buffer);
1268
1269 /* Make sure all commits have finished */
1270 synchronize_sched();
1271
Alexander Z Lam94571582013-08-02 18:36:16 -07001272 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001273
1274 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001275 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001276
1277 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001278}
1279
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001280/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001281void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001282{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001283 struct trace_array *tr;
1284
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001285 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001286 tracing_reset_online_cpus(&tr->trace_buffer);
1287#ifdef CONFIG_TRACER_MAX_TRACE
1288 tracing_reset_online_cpus(&tr->max_buffer);
1289#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001290 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001291}
1292
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001293#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001294#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001295static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001296struct saved_cmdlines_buffer {
1297 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1298 unsigned *map_cmdline_to_pid;
1299 unsigned cmdline_num;
1300 int cmdline_idx;
1301 char *saved_cmdlines;
1302};
1303static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001304
Steven Rostedt25b0b442008-05-12 21:21:00 +02001305/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001306static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001307
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001308static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001309{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001310 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1311}
1312
1313static inline void set_cmdline(int idx, const char *cmdline)
1314{
1315 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1316}
1317
1318static int allocate_cmdlines_buffer(unsigned int val,
1319 struct saved_cmdlines_buffer *s)
1320{
1321 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1322 GFP_KERNEL);
1323 if (!s->map_cmdline_to_pid)
1324 return -ENOMEM;
1325
1326 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1327 if (!s->saved_cmdlines) {
1328 kfree(s->map_cmdline_to_pid);
1329 return -ENOMEM;
1330 }
1331
1332 s->cmdline_idx = 0;
1333 s->cmdline_num = val;
1334 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1335 sizeof(s->map_pid_to_cmdline));
1336 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1337 val * sizeof(*s->map_cmdline_to_pid));
1338
1339 return 0;
1340}
1341
1342static int trace_create_savedcmd(void)
1343{
1344 int ret;
1345
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001346 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001347 if (!savedcmd)
1348 return -ENOMEM;
1349
1350 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1351 if (ret < 0) {
1352 kfree(savedcmd);
1353 savedcmd = NULL;
1354 return -ENOMEM;
1355 }
1356
1357 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001358}
1359
Carsten Emdeb5130b12009-09-13 01:43:07 +02001360int is_tracing_stopped(void)
1361{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001362 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001363}
1364
Steven Rostedt0f048702008-11-05 16:05:44 -05001365/**
1366 * tracing_start - quick start of the tracer
1367 *
1368 * If tracing is enabled but was stopped by tracing_stop,
1369 * this will start the tracer back up.
1370 */
1371void tracing_start(void)
1372{
1373 struct ring_buffer *buffer;
1374 unsigned long flags;
1375
1376 if (tracing_disabled)
1377 return;
1378
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001379 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1380 if (--global_trace.stop_count) {
1381 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001382 /* Someone screwed up their debugging */
1383 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001384 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001385 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001386 goto out;
1387 }
1388
Steven Rostedta2f80712010-03-12 19:56:00 -05001389 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001390 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001391
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001392 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001393 if (buffer)
1394 ring_buffer_record_enable(buffer);
1395
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001396#ifdef CONFIG_TRACER_MAX_TRACE
1397 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001398 if (buffer)
1399 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001400#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001401
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001402 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001403
Steven Rostedt0f048702008-11-05 16:05:44 -05001404 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001405 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1406}
1407
1408static void tracing_start_tr(struct trace_array *tr)
1409{
1410 struct ring_buffer *buffer;
1411 unsigned long flags;
1412
1413 if (tracing_disabled)
1414 return;
1415
1416 /* If global, we need to also start the max tracer */
1417 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1418 return tracing_start();
1419
1420 raw_spin_lock_irqsave(&tr->start_lock, flags);
1421
1422 if (--tr->stop_count) {
1423 if (tr->stop_count < 0) {
1424 /* Someone screwed up their debugging */
1425 WARN_ON_ONCE(1);
1426 tr->stop_count = 0;
1427 }
1428 goto out;
1429 }
1430
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001431 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001432 if (buffer)
1433 ring_buffer_record_enable(buffer);
1434
1435 out:
1436 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001437}
1438
1439/**
1440 * tracing_stop - quick stop of the tracer
1441 *
1442 * Light weight way to stop tracing. Use in conjunction with
1443 * tracing_start.
1444 */
1445void tracing_stop(void)
1446{
1447 struct ring_buffer *buffer;
1448 unsigned long flags;
1449
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001450 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1451 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001452 goto out;
1453
Steven Rostedta2f80712010-03-12 19:56:00 -05001454 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001455 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001456
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001457 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001458 if (buffer)
1459 ring_buffer_record_disable(buffer);
1460
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001461#ifdef CONFIG_TRACER_MAX_TRACE
1462 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001463 if (buffer)
1464 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001465#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001466
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001467 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001468
Steven Rostedt0f048702008-11-05 16:05:44 -05001469 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001470 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1471}
1472
1473static void tracing_stop_tr(struct trace_array *tr)
1474{
1475 struct ring_buffer *buffer;
1476 unsigned long flags;
1477
1478 /* If global, we need to also stop the max tracer */
1479 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1480 return tracing_stop();
1481
1482 raw_spin_lock_irqsave(&tr->start_lock, flags);
1483 if (tr->stop_count++)
1484 goto out;
1485
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001486 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001487 if (buffer)
1488 ring_buffer_record_disable(buffer);
1489
1490 out:
1491 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001492}
1493
Ingo Molnare309b412008-05-12 21:20:51 +02001494void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001495
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001496static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001497{
Carsten Emdea635cf02009-03-18 09:00:41 +01001498 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001499
1500 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001501 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001502
1503 /*
1504 * It's not the end of the world if we don't get
1505 * the lock, but we also don't want to spin
1506 * nor do we want to disable interrupts,
1507 * so if we miss here, then better luck next time.
1508 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001509 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001510 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001512 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001513 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001514 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001515
Carsten Emdea635cf02009-03-18 09:00:41 +01001516 /*
1517 * Check whether the cmdline buffer at idx has a pid
1518 * mapped. We are going to overwrite that entry so we
1519 * need to clear the map_pid_to_cmdline. Otherwise we
1520 * would read the new comm for the old pid.
1521 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001522 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001523 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001524 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001526 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1527 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001529 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001530 }
1531
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001532 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001533
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001534 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001535
1536 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001537}
1538
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001539static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001540{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001541 unsigned map;
1542
Steven Rostedt4ca53082009-03-16 19:20:15 -04001543 if (!pid) {
1544 strcpy(comm, "<idle>");
1545 return;
1546 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001547
Steven Rostedt74bf4072010-01-25 15:11:53 -05001548 if (WARN_ON_ONCE(pid < 0)) {
1549 strcpy(comm, "<XXX>");
1550 return;
1551 }
1552
Steven Rostedt4ca53082009-03-16 19:20:15 -04001553 if (pid > PID_MAX_DEFAULT) {
1554 strcpy(comm, "<...>");
1555 return;
1556 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001557
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001558 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001559 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001560 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001561 else
1562 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001563}
1564
1565void trace_find_cmdline(int pid, char comm[])
1566{
1567 preempt_disable();
1568 arch_spin_lock(&trace_cmdline_lock);
1569
1570 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001571
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001572 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001573 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001574}
1575
Ingo Molnare309b412008-05-12 21:20:51 +02001576void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001577{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001578 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001579 return;
1580
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001581 if (!__this_cpu_read(trace_cmdline_save))
1582 return;
1583
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001584 if (trace_save_cmdline(tsk))
1585 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001586}
1587
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001588void
Steven Rostedt38697052008-10-01 13:14:09 -04001589tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1590 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591{
1592 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001593
Steven Rostedt777e2082008-09-29 23:02:42 -04001594 entry->preempt_count = pc & 0xff;
1595 entry->pid = (tsk) ? tsk->pid : 0;
1596 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001597#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001598 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001599#else
1600 TRACE_FLAG_IRQS_NOSUPPORT |
1601#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001602 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1603 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001604 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1605 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001606}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001607EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001608
Steven Rostedte77405a2009-09-02 14:17:06 -04001609struct ring_buffer_event *
1610trace_buffer_lock_reserve(struct ring_buffer *buffer,
1611 int type,
1612 unsigned long len,
1613 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001614{
1615 struct ring_buffer_event *event;
1616
Steven Rostedte77405a2009-09-02 14:17:06 -04001617 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001618 if (event != NULL) {
1619 struct trace_entry *ent = ring_buffer_event_data(event);
1620
1621 tracing_generic_entry_update(ent, flags, pc);
1622 ent->type = type;
1623 }
1624
1625 return event;
1626}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001627
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001628void
1629__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1630{
1631 __this_cpu_write(trace_cmdline_save, true);
1632 ring_buffer_unlock_commit(buffer, event);
1633}
1634
Steven Rostedte77405a2009-09-02 14:17:06 -04001635static inline void
1636__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1637 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001638 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001639{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001640 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001641
Steven Rostedte77405a2009-09-02 14:17:06 -04001642 ftrace_trace_stack(buffer, flags, 6, pc);
1643 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001644}
1645
Steven Rostedte77405a2009-09-02 14:17:06 -04001646void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1647 struct ring_buffer_event *event,
1648 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001649{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001650 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001651}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001652EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001653
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001654static struct ring_buffer *temp_buffer;
1655
Steven Rostedtef5580d2009-02-27 19:38:04 -05001656struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001657trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1658 struct ftrace_event_file *ftrace_file,
1659 int type, unsigned long len,
1660 unsigned long flags, int pc)
1661{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001662 struct ring_buffer_event *entry;
1663
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001664 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001665 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001666 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001667 /*
1668 * If tracing is off, but we have triggers enabled
1669 * we still need to look at the event data. Use the temp_buffer
1670 * to store the trace event for the tigger to use. It's recusive
1671 * safe and will not be recorded anywhere.
1672 */
1673 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1674 *current_rb = temp_buffer;
1675 entry = trace_buffer_lock_reserve(*current_rb,
1676 type, len, flags, pc);
1677 }
1678 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001679}
1680EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1681
1682struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001683trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1684 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001685 unsigned long flags, int pc)
1686{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001687 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001688 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001689 type, len, flags, pc);
1690}
Steven Rostedt94487d62009-05-05 19:22:53 -04001691EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001692
Steven Rostedte77405a2009-09-02 14:17:06 -04001693void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1694 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001695 unsigned long flags, int pc)
1696{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001697 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001698}
Steven Rostedt94487d62009-05-05 19:22:53 -04001699EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001700
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001701void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1702 struct ring_buffer_event *event,
1703 unsigned long flags, int pc,
1704 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001705{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001706 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001707
1708 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1709 ftrace_trace_userstack(buffer, flags, pc);
1710}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001711EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001712
Steven Rostedte77405a2009-09-02 14:17:06 -04001713void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1714 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001715{
Steven Rostedte77405a2009-09-02 14:17:06 -04001716 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001717}
Steven Rostedt12acd472009-04-17 16:01:56 -04001718EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001719
Ingo Molnare309b412008-05-12 21:20:51 +02001720void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001721trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001722 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1723 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001724{
Tom Zanussie1112b42009-03-31 00:48:49 -05001725 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001726 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001727 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001728 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001729
Steven Rostedtd7690412008-10-01 00:29:53 -04001730 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001731 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001732 return;
1733
Steven Rostedte77405a2009-09-02 14:17:06 -04001734 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001735 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001736 if (!event)
1737 return;
1738 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001739 entry->ip = ip;
1740 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001741
Tom Zanussif306cc82013-10-24 08:34:17 -05001742 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001743 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001744}
1745
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001746#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001747
1748#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1749struct ftrace_stack {
1750 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1751};
1752
1753static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1754static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1755
Steven Rostedte77405a2009-09-02 14:17:06 -04001756static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001757 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001758 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001759{
Tom Zanussie1112b42009-03-31 00:48:49 -05001760 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001761 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001762 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001763 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001764 int use_stack;
1765 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001766
1767 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001768 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001769
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001770 /*
1771 * Since events can happen in NMIs there's no safe way to
1772 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1773 * or NMI comes in, it will just have to use the default
1774 * FTRACE_STACK_SIZE.
1775 */
1776 preempt_disable_notrace();
1777
Shan Wei82146522012-11-19 13:21:01 +08001778 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001779 /*
1780 * We don't need any atomic variables, just a barrier.
1781 * If an interrupt comes in, we don't care, because it would
1782 * have exited and put the counter back to what we want.
1783 * We just need a barrier to keep gcc from moving things
1784 * around.
1785 */
1786 barrier();
1787 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001788 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001789 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1790
1791 if (regs)
1792 save_stack_trace_regs(regs, &trace);
1793 else
1794 save_stack_trace(&trace);
1795
1796 if (trace.nr_entries > size)
1797 size = trace.nr_entries;
1798 } else
1799 /* From now on, use_stack is a boolean */
1800 use_stack = 0;
1801
1802 size *= sizeof(unsigned long);
1803
1804 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1805 sizeof(*entry) + size, flags, pc);
1806 if (!event)
1807 goto out;
1808 entry = ring_buffer_event_data(event);
1809
1810 memset(&entry->caller, 0, size);
1811
1812 if (use_stack)
1813 memcpy(&entry->caller, trace.entries,
1814 trace.nr_entries * sizeof(unsigned long));
1815 else {
1816 trace.max_entries = FTRACE_STACK_ENTRIES;
1817 trace.entries = entry->caller;
1818 if (regs)
1819 save_stack_trace_regs(regs, &trace);
1820 else
1821 save_stack_trace(&trace);
1822 }
1823
1824 entry->size = trace.nr_entries;
1825
Tom Zanussif306cc82013-10-24 08:34:17 -05001826 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001827 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001828
1829 out:
1830 /* Again, don't let gcc optimize things here */
1831 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001832 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001833 preempt_enable_notrace();
1834
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001835}
1836
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001837void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1838 int skip, int pc, struct pt_regs *regs)
1839{
1840 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1841 return;
1842
1843 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1844}
1845
Steven Rostedte77405a2009-09-02 14:17:06 -04001846void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1847 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001848{
1849 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1850 return;
1851
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001852 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001853}
1854
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001855void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1856 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001857{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001858 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001859}
1860
Steven Rostedt03889382009-12-11 09:48:22 -05001861/**
1862 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001863 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001864 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001865void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001866{
1867 unsigned long flags;
1868
1869 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001870 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001871
1872 local_save_flags(flags);
1873
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001874 /*
1875 * Skip 3 more, seems to get us at the caller of
1876 * this function.
1877 */
1878 skip += 3;
1879 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1880 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001881}
1882
Steven Rostedt91e86e52010-11-10 12:56:12 +01001883static DEFINE_PER_CPU(int, user_stack_count);
1884
Steven Rostedte77405a2009-09-02 14:17:06 -04001885void
1886ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001887{
Tom Zanussie1112b42009-03-31 00:48:49 -05001888 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001889 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001890 struct userstack_entry *entry;
1891 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001892
1893 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1894 return;
1895
Steven Rostedtb6345872010-03-12 20:03:30 -05001896 /*
1897 * NMIs can not handle page faults, even with fix ups.
1898 * The save user stack can (and often does) fault.
1899 */
1900 if (unlikely(in_nmi()))
1901 return;
1902
Steven Rostedt91e86e52010-11-10 12:56:12 +01001903 /*
1904 * prevent recursion, since the user stack tracing may
1905 * trigger other kernel events.
1906 */
1907 preempt_disable();
1908 if (__this_cpu_read(user_stack_count))
1909 goto out;
1910
1911 __this_cpu_inc(user_stack_count);
1912
Steven Rostedte77405a2009-09-02 14:17:06 -04001913 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001914 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001915 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001916 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001917 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001918
Steven Rostedt48659d32009-09-11 11:36:23 -04001919 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001920 memset(&entry->caller, 0, sizeof(entry->caller));
1921
1922 trace.nr_entries = 0;
1923 trace.max_entries = FTRACE_STACK_ENTRIES;
1924 trace.skip = 0;
1925 trace.entries = entry->caller;
1926
1927 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001928 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001929 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001930
Li Zefan1dbd1952010-12-09 15:47:56 +08001931 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001932 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001933 out:
1934 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001935}
1936
Hannes Eder4fd27352009-02-10 19:44:12 +01001937#ifdef UNUSED
1938static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001939{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001940 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001941}
Hannes Eder4fd27352009-02-10 19:44:12 +01001942#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001943
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001944#endif /* CONFIG_STACKTRACE */
1945
Steven Rostedt07d777f2011-09-22 14:01:55 -04001946/* created for use with alloc_percpu */
1947struct trace_buffer_struct {
1948 char buffer[TRACE_BUF_SIZE];
1949};
1950
1951static struct trace_buffer_struct *trace_percpu_buffer;
1952static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1953static struct trace_buffer_struct *trace_percpu_irq_buffer;
1954static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1955
1956/*
1957 * The buffer used is dependent on the context. There is a per cpu
1958 * buffer for normal context, softirq contex, hard irq context and
1959 * for NMI context. Thise allows for lockless recording.
1960 *
1961 * Note, if the buffers failed to be allocated, then this returns NULL
1962 */
1963static char *get_trace_buf(void)
1964{
1965 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001966
1967 /*
1968 * If we have allocated per cpu buffers, then we do not
1969 * need to do any locking.
1970 */
1971 if (in_nmi())
1972 percpu_buffer = trace_percpu_nmi_buffer;
1973 else if (in_irq())
1974 percpu_buffer = trace_percpu_irq_buffer;
1975 else if (in_softirq())
1976 percpu_buffer = trace_percpu_sirq_buffer;
1977 else
1978 percpu_buffer = trace_percpu_buffer;
1979
1980 if (!percpu_buffer)
1981 return NULL;
1982
Shan Weid8a03492012-11-13 09:53:04 +08001983 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001984}
1985
1986static int alloc_percpu_trace_buffer(void)
1987{
1988 struct trace_buffer_struct *buffers;
1989 struct trace_buffer_struct *sirq_buffers;
1990 struct trace_buffer_struct *irq_buffers;
1991 struct trace_buffer_struct *nmi_buffers;
1992
1993 buffers = alloc_percpu(struct trace_buffer_struct);
1994 if (!buffers)
1995 goto err_warn;
1996
1997 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1998 if (!sirq_buffers)
1999 goto err_sirq;
2000
2001 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2002 if (!irq_buffers)
2003 goto err_irq;
2004
2005 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2006 if (!nmi_buffers)
2007 goto err_nmi;
2008
2009 trace_percpu_buffer = buffers;
2010 trace_percpu_sirq_buffer = sirq_buffers;
2011 trace_percpu_irq_buffer = irq_buffers;
2012 trace_percpu_nmi_buffer = nmi_buffers;
2013
2014 return 0;
2015
2016 err_nmi:
2017 free_percpu(irq_buffers);
2018 err_irq:
2019 free_percpu(sirq_buffers);
2020 err_sirq:
2021 free_percpu(buffers);
2022 err_warn:
2023 WARN(1, "Could not allocate percpu trace_printk buffer");
2024 return -ENOMEM;
2025}
2026
Steven Rostedt81698832012-10-11 10:15:05 -04002027static int buffers_allocated;
2028
Steven Rostedt07d777f2011-09-22 14:01:55 -04002029void trace_printk_init_buffers(void)
2030{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002031 if (buffers_allocated)
2032 return;
2033
2034 if (alloc_percpu_trace_buffer())
2035 return;
2036
Steven Rostedt2184db42014-05-28 13:14:40 -04002037 /* trace_printk() is for debug use only. Don't use it in production. */
2038
2039 pr_warning("\n**********************************************************\n");
2040 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2041 pr_warning("** **\n");
2042 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2043 pr_warning("** **\n");
2044 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
Frans Klavereff264e2014-11-07 15:53:44 +01002045 pr_warning("** unsafe for production use. **\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002046 pr_warning("** **\n");
2047 pr_warning("** If you see this message and you are not debugging **\n");
2048 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2049 pr_warning("** **\n");
2050 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2051 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002052
Steven Rostedtb382ede62012-10-10 21:44:34 -04002053 /* Expand the buffers to set size */
2054 tracing_update_buffers();
2055
Steven Rostedt07d777f2011-09-22 14:01:55 -04002056 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002057
2058 /*
2059 * trace_printk_init_buffers() can be called by modules.
2060 * If that happens, then we need to start cmdline recording
2061 * directly here. If the global_trace.buffer is already
2062 * allocated here, then this was called by module code.
2063 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002064 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002065 tracing_start_cmdline_record();
2066}
2067
2068void trace_printk_start_comm(void)
2069{
2070 /* Start tracing comms if trace printk is set */
2071 if (!buffers_allocated)
2072 return;
2073 tracing_start_cmdline_record();
2074}
2075
2076static void trace_printk_start_stop_comm(int enabled)
2077{
2078 if (!buffers_allocated)
2079 return;
2080
2081 if (enabled)
2082 tracing_start_cmdline_record();
2083 else
2084 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002085}
2086
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002087/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002088 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002089 *
2090 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002091int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002092{
Tom Zanussie1112b42009-03-31 00:48:49 -05002093 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002094 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002095 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002096 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002097 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002098 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002099 char *tbuffer;
2100 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002101
2102 if (unlikely(tracing_selftest_running || tracing_disabled))
2103 return 0;
2104
2105 /* Don't pollute graph traces with trace_vprintk internals */
2106 pause_graph_tracing();
2107
2108 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002109 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002110
Steven Rostedt07d777f2011-09-22 14:01:55 -04002111 tbuffer = get_trace_buf();
2112 if (!tbuffer) {
2113 len = 0;
2114 goto out;
2115 }
2116
2117 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2118
2119 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002120 goto out;
2121
Steven Rostedt07d777f2011-09-22 14:01:55 -04002122 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002123 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002124 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002125 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2126 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002127 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002128 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002129 entry = ring_buffer_event_data(event);
2130 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002131 entry->fmt = fmt;
2132
Steven Rostedt07d777f2011-09-22 14:01:55 -04002133 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002134 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002135 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002136 ftrace_trace_stack(buffer, flags, 6, pc);
2137 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002138
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002139out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002140 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002141 unpause_graph_tracing();
2142
2143 return len;
2144}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002145EXPORT_SYMBOL_GPL(trace_vbprintk);
2146
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002147static int
2148__trace_array_vprintk(struct ring_buffer *buffer,
2149 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002150{
Tom Zanussie1112b42009-03-31 00:48:49 -05002151 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002152 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002153 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002154 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002155 unsigned long flags;
2156 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002157
2158 if (tracing_disabled || tracing_selftest_running)
2159 return 0;
2160
Steven Rostedt07d777f2011-09-22 14:01:55 -04002161 /* Don't pollute graph traces with trace_vprintk internals */
2162 pause_graph_tracing();
2163
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002164 pc = preempt_count();
2165 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002166
Steven Rostedt07d777f2011-09-22 14:01:55 -04002167
2168 tbuffer = get_trace_buf();
2169 if (!tbuffer) {
2170 len = 0;
2171 goto out;
2172 }
2173
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002174 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002175
Steven Rostedt07d777f2011-09-22 14:01:55 -04002176 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002177 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002178 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002179 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002180 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002181 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002182 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002183 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002184
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002185 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002186 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002187 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002188 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002189 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002190 out:
2191 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002192 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002193
2194 return len;
2195}
Steven Rostedt659372d2009-09-03 19:11:07 -04002196
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002197int trace_array_vprintk(struct trace_array *tr,
2198 unsigned long ip, const char *fmt, va_list args)
2199{
2200 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2201}
2202
2203int trace_array_printk(struct trace_array *tr,
2204 unsigned long ip, const char *fmt, ...)
2205{
2206 int ret;
2207 va_list ap;
2208
2209 if (!(trace_flags & TRACE_ITER_PRINTK))
2210 return 0;
2211
2212 va_start(ap, fmt);
2213 ret = trace_array_vprintk(tr, ip, fmt, ap);
2214 va_end(ap);
2215 return ret;
2216}
2217
2218int trace_array_printk_buf(struct ring_buffer *buffer,
2219 unsigned long ip, const char *fmt, ...)
2220{
2221 int ret;
2222 va_list ap;
2223
2224 if (!(trace_flags & TRACE_ITER_PRINTK))
2225 return 0;
2226
2227 va_start(ap, fmt);
2228 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2229 va_end(ap);
2230 return ret;
2231}
2232
Steven Rostedt659372d2009-09-03 19:11:07 -04002233int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2234{
Steven Rostedta813a152009-10-09 01:41:35 -04002235 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002236}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002237EXPORT_SYMBOL_GPL(trace_vprintk);
2238
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002239static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002240{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002241 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2242
Steven Rostedt5a90f572008-09-03 17:42:51 -04002243 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002244 if (buf_iter)
2245 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002246}
2247
Ingo Molnare309b412008-05-12 21:20:51 +02002248static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002249peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2250 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002251{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002252 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002253 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002254
Steven Rostedtd7690412008-10-01 00:29:53 -04002255 if (buf_iter)
2256 event = ring_buffer_iter_peek(buf_iter, ts);
2257 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002258 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002259 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002260
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002261 if (event) {
2262 iter->ent_size = ring_buffer_event_length(event);
2263 return ring_buffer_event_data(event);
2264 }
2265 iter->ent_size = 0;
2266 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002267}
Steven Rostedtd7690412008-10-01 00:29:53 -04002268
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002269static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002270__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2271 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002272{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002273 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002274 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002275 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002276 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002277 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002278 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002279 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002280 int cpu;
2281
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002282 /*
2283 * If we are in a per_cpu trace file, don't bother by iterating over
2284 * all cpu and peek directly.
2285 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002286 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002287 if (ring_buffer_empty_cpu(buffer, cpu_file))
2288 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002289 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002290 if (ent_cpu)
2291 *ent_cpu = cpu_file;
2292
2293 return ent;
2294 }
2295
Steven Rostedtab464282008-05-12 21:21:00 +02002296 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002297
2298 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002299 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002300
Steven Rostedtbc21b472010-03-31 19:49:26 -04002301 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002302
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002303 /*
2304 * Pick the entry with the smallest timestamp:
2305 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002306 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002307 next = ent;
2308 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002309 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002310 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002311 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002312 }
2313 }
2314
Steven Rostedt12b5da32012-03-27 10:43:28 -04002315 iter->ent_size = next_size;
2316
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002317 if (ent_cpu)
2318 *ent_cpu = next_cpu;
2319
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002320 if (ent_ts)
2321 *ent_ts = next_ts;
2322
Steven Rostedtbc21b472010-03-31 19:49:26 -04002323 if (missing_events)
2324 *missing_events = next_lost;
2325
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002326 return next;
2327}
2328
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002329/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002330struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2331 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002332{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002333 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002334}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002335
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002336/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002337void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002338{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002339 iter->ent = __find_next_entry(iter, &iter->cpu,
2340 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002341
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002342 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002343 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002344
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002345 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002346}
2347
Ingo Molnare309b412008-05-12 21:20:51 +02002348static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002349{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002350 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002351 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002352}
2353
Ingo Molnare309b412008-05-12 21:20:51 +02002354static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002355{
2356 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002357 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002358 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002359
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002360 WARN_ON_ONCE(iter->leftover);
2361
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002362 (*pos)++;
2363
2364 /* can't go backwards */
2365 if (iter->idx > i)
2366 return NULL;
2367
2368 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002369 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002370 else
2371 ent = iter;
2372
2373 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002374 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002375
2376 iter->pos = *pos;
2377
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002378 return ent;
2379}
2380
Jason Wessel955b61e2010-08-05 09:22:23 -05002381void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002382{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002383 struct ring_buffer_event *event;
2384 struct ring_buffer_iter *buf_iter;
2385 unsigned long entries = 0;
2386 u64 ts;
2387
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002388 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002389
Steven Rostedt6d158a82012-06-27 20:46:14 -04002390 buf_iter = trace_buffer_iter(iter, cpu);
2391 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002392 return;
2393
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002394 ring_buffer_iter_reset(buf_iter);
2395
2396 /*
2397 * We could have the case with the max latency tracers
2398 * that a reset never took place on a cpu. This is evident
2399 * by the timestamp being before the start of the buffer.
2400 */
2401 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002402 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002403 break;
2404 entries++;
2405 ring_buffer_read(buf_iter, NULL);
2406 }
2407
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002408 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002409}
2410
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002411/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002412 * The current tracer is copied to avoid a global locking
2413 * all around.
2414 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002415static void *s_start(struct seq_file *m, loff_t *pos)
2416{
2417 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002418 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002419 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002420 void *p = NULL;
2421 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002422 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002423
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002424 /*
2425 * copy the tracer to avoid using a global lock all around.
2426 * iter->trace is a copy of current_trace, the pointer to the
2427 * name may be used instead of a strcmp(), as iter->trace->name
2428 * will point to the same string as current_trace->name.
2429 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002430 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002431 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2432 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002433 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002434
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002435#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002436 if (iter->snapshot && iter->trace->use_max_tr)
2437 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002438#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002439
2440 if (!iter->snapshot)
2441 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002442
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002443 if (*pos != iter->pos) {
2444 iter->ent = NULL;
2445 iter->cpu = 0;
2446 iter->idx = -1;
2447
Steven Rostedtae3b5092013-01-23 15:22:59 -05002448 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002449 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002450 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002451 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002452 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002453
Lai Jiangshanac91d852010-03-02 17:54:50 +08002454 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002455 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2456 ;
2457
2458 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002459 /*
2460 * If we overflowed the seq_file before, then we want
2461 * to just reuse the trace_seq buffer again.
2462 */
2463 if (iter->leftover)
2464 p = iter;
2465 else {
2466 l = *pos - 1;
2467 p = s_next(m, p, &l);
2468 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002469 }
2470
Lai Jiangshan4f535962009-05-18 19:35:34 +08002471 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002472 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002473 return p;
2474}
2475
2476static void s_stop(struct seq_file *m, void *p)
2477{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002478 struct trace_iterator *iter = m->private;
2479
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002480#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002481 if (iter->snapshot && iter->trace->use_max_tr)
2482 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002483#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002484
2485 if (!iter->snapshot)
2486 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002487
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002488 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002489 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002490}
2491
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002492static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002493get_total_entries(struct trace_buffer *buf,
2494 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002495{
2496 unsigned long count;
2497 int cpu;
2498
2499 *total = 0;
2500 *entries = 0;
2501
2502 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002503 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002504 /*
2505 * If this buffer has skipped entries, then we hold all
2506 * entries for the trace and we need to ignore the
2507 * ones before the time stamp.
2508 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002509 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2510 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002511 /* total is the same as the entries */
2512 *total += count;
2513 } else
2514 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002515 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002516 *entries += count;
2517 }
2518}
2519
Ingo Molnare309b412008-05-12 21:20:51 +02002520static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002521{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002522 seq_puts(m, "# _------=> CPU# \n"
2523 "# / _-----=> irqs-off \n"
2524 "# | / _----=> need-resched \n"
2525 "# || / _---=> hardirq/softirq \n"
2526 "# ||| / _--=> preempt-depth \n"
2527 "# |||| / delay \n"
2528 "# cmd pid ||||| time | caller \n"
2529 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002530}
2531
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002532static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002533{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002534 unsigned long total;
2535 unsigned long entries;
2536
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002537 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002538 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2539 entries, total, num_online_cpus());
2540 seq_puts(m, "#\n");
2541}
2542
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002543static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002544{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002545 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002546 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2547 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002548}
2549
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002550static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002551{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002552 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002553 seq_puts(m, "# _-----=> irqs-off\n"
2554 "# / _----=> need-resched\n"
2555 "# | / _---=> hardirq/softirq\n"
2556 "# || / _--=> preempt-depth\n"
2557 "# ||| / delay\n"
2558 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2559 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002560}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002561
Jiri Olsa62b915f2010-04-02 19:01:22 +02002562void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002563print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2564{
2565 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002566 struct trace_buffer *buf = iter->trace_buffer;
2567 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002568 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002569 unsigned long entries;
2570 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002571 const char *name = "preemption";
2572
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002573 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002575 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002576
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002577 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002578 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002579 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002580 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002581 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002582 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002583 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002584 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002585 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002586 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002587#if defined(CONFIG_PREEMPT_NONE)
2588 "server",
2589#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2590 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002591#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002592 "preempt",
2593#else
2594 "unknown",
2595#endif
2596 /* These are reserved for later use */
2597 0, 0, 0, 0);
2598#ifdef CONFIG_SMP
2599 seq_printf(m, " #P:%d)\n", num_online_cpus());
2600#else
2601 seq_puts(m, ")\n");
2602#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002603 seq_puts(m, "# -----------------\n");
2604 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002605 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002606 data->comm, data->pid,
2607 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002608 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002609 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002610
2611 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002612 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002613 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2614 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002615 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002616 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2617 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002618 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002619 }
2620
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002621 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002622}
2623
Steven Rostedta3097202008-11-07 22:36:02 -05002624static void test_cpu_buff_start(struct trace_iterator *iter)
2625{
2626 struct trace_seq *s = &iter->seq;
2627
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002628 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2629 return;
2630
2631 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2632 return;
2633
Rusty Russell44623442009-01-01 10:12:23 +10302634 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002635 return;
2636
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002637 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002638 return;
2639
Rusty Russell44623442009-01-01 10:12:23 +10302640 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002641
2642 /* Don't print started cpu buffer for the first entry of the trace */
2643 if (iter->idx > 1)
2644 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2645 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002646}
2647
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002648static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002649{
Steven Rostedt214023c2008-05-12 21:20:46 +02002650 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002651 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002652 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002653 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002654
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002655 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002656
Steven Rostedta3097202008-11-07 22:36:02 -05002657 test_cpu_buff_start(iter);
2658
Steven Rostedtf633cef2008-12-23 23:24:13 -05002659 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002660
2661 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002662 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2663 trace_print_lat_context(iter);
2664 else
2665 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002666 }
2667
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002668 if (trace_seq_has_overflowed(s))
2669 return TRACE_TYPE_PARTIAL_LINE;
2670
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002671 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002672 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002673
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002674 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002675
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002676 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002677}
2678
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002679static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002680{
2681 struct trace_seq *s = &iter->seq;
2682 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002683 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002684
2685 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002686
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002687 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2688 trace_seq_printf(s, "%d %d %llu ",
2689 entry->pid, iter->cpu, iter->ts);
2690
2691 if (trace_seq_has_overflowed(s))
2692 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002693
Steven Rostedtf633cef2008-12-23 23:24:13 -05002694 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002695 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002696 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002697
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002698 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002699
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002700 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002701}
2702
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002703static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002704{
2705 struct trace_seq *s = &iter->seq;
2706 unsigned char newline = '\n';
2707 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002708 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002709
2710 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002711
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002712 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002713 SEQ_PUT_HEX_FIELD(s, entry->pid);
2714 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2715 SEQ_PUT_HEX_FIELD(s, iter->ts);
2716 if (trace_seq_has_overflowed(s))
2717 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002718 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002719
Steven Rostedtf633cef2008-12-23 23:24:13 -05002720 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002721 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002722 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002723 if (ret != TRACE_TYPE_HANDLED)
2724 return ret;
2725 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002726
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002727 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002728
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002729 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002730}
2731
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002732static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002733{
2734 struct trace_seq *s = &iter->seq;
2735 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002736 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002737
2738 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002739
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002740 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002741 SEQ_PUT_FIELD(s, entry->pid);
2742 SEQ_PUT_FIELD(s, iter->cpu);
2743 SEQ_PUT_FIELD(s, iter->ts);
2744 if (trace_seq_has_overflowed(s))
2745 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002746 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002747
Steven Rostedtf633cef2008-12-23 23:24:13 -05002748 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002749 return event ? event->funcs->binary(iter, 0, event) :
2750 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002751}
2752
Jiri Olsa62b915f2010-04-02 19:01:22 +02002753int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002754{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002755 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002756 int cpu;
2757
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002758 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002759 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002760 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002761 buf_iter = trace_buffer_iter(iter, cpu);
2762 if (buf_iter) {
2763 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002764 return 0;
2765 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002766 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002767 return 0;
2768 }
2769 return 1;
2770 }
2771
Steven Rostedtab464282008-05-12 21:21:00 +02002772 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002773 buf_iter = trace_buffer_iter(iter, cpu);
2774 if (buf_iter) {
2775 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002776 return 0;
2777 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002778 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002779 return 0;
2780 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002781 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002782
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002783 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002784}
2785
Lai Jiangshan4f535962009-05-18 19:35:34 +08002786/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002787enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002788{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002789 enum print_line_t ret;
2790
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002791 if (iter->lost_events) {
2792 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2793 iter->cpu, iter->lost_events);
2794 if (trace_seq_has_overflowed(&iter->seq))
2795 return TRACE_TYPE_PARTIAL_LINE;
2796 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002797
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002798 if (iter->trace && iter->trace->print_line) {
2799 ret = iter->trace->print_line(iter);
2800 if (ret != TRACE_TYPE_UNHANDLED)
2801 return ret;
2802 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002803
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002804 if (iter->ent->type == TRACE_BPUTS &&
2805 trace_flags & TRACE_ITER_PRINTK &&
2806 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2807 return trace_print_bputs_msg_only(iter);
2808
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002809 if (iter->ent->type == TRACE_BPRINT &&
2810 trace_flags & TRACE_ITER_PRINTK &&
2811 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002812 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002813
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002814 if (iter->ent->type == TRACE_PRINT &&
2815 trace_flags & TRACE_ITER_PRINTK &&
2816 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002817 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002818
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002819 if (trace_flags & TRACE_ITER_BIN)
2820 return print_bin_fmt(iter);
2821
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002822 if (trace_flags & TRACE_ITER_HEX)
2823 return print_hex_fmt(iter);
2824
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002825 if (trace_flags & TRACE_ITER_RAW)
2826 return print_raw_fmt(iter);
2827
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002828 return print_trace_fmt(iter);
2829}
2830
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002831void trace_latency_header(struct seq_file *m)
2832{
2833 struct trace_iterator *iter = m->private;
2834
2835 /* print nothing if the buffers are empty */
2836 if (trace_empty(iter))
2837 return;
2838
2839 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2840 print_trace_header(m, iter);
2841
2842 if (!(trace_flags & TRACE_ITER_VERBOSE))
2843 print_lat_help_header(m);
2844}
2845
Jiri Olsa62b915f2010-04-02 19:01:22 +02002846void trace_default_header(struct seq_file *m)
2847{
2848 struct trace_iterator *iter = m->private;
2849
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002850 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2851 return;
2852
Jiri Olsa62b915f2010-04-02 19:01:22 +02002853 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2854 /* print nothing if the buffers are empty */
2855 if (trace_empty(iter))
2856 return;
2857 print_trace_header(m, iter);
2858 if (!(trace_flags & TRACE_ITER_VERBOSE))
2859 print_lat_help_header(m);
2860 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002861 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2862 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002863 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002864 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002865 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002866 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002867 }
2868}
2869
Steven Rostedte0a413f2011-09-29 21:26:16 -04002870static void test_ftrace_alive(struct seq_file *m)
2871{
2872 if (!ftrace_is_dead())
2873 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002874 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2875 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002876}
2877
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002878#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002879static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002880{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002881 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2882 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2883 "# Takes a snapshot of the main buffer.\n"
2884 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2885 "# (Doesn't have to be '2' works with any number that\n"
2886 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002887}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002888
2889static void show_snapshot_percpu_help(struct seq_file *m)
2890{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002891 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002892#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002893 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2894 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002895#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002896 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2897 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002898#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002899 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2900 "# (Doesn't have to be '2' works with any number that\n"
2901 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002902}
2903
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002904static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2905{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002906 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002907 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002908 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002909 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002910
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002911 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002912 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2913 show_snapshot_main_help(m);
2914 else
2915 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002916}
2917#else
2918/* Should never be called */
2919static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2920#endif
2921
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002922static int s_show(struct seq_file *m, void *v)
2923{
2924 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002925 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002926
2927 if (iter->ent == NULL) {
2928 if (iter->tr) {
2929 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2930 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002931 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002932 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002933 if (iter->snapshot && trace_empty(iter))
2934 print_snapshot_help(m, iter);
2935 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002936 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002937 else
2938 trace_default_header(m);
2939
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002940 } else if (iter->leftover) {
2941 /*
2942 * If we filled the seq_file buffer earlier, we
2943 * want to just show it now.
2944 */
2945 ret = trace_print_seq(m, &iter->seq);
2946
2947 /* ret should this time be zero, but you never know */
2948 iter->leftover = ret;
2949
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002950 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002951 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002952 ret = trace_print_seq(m, &iter->seq);
2953 /*
2954 * If we overflow the seq_file buffer, then it will
2955 * ask us for this data again at start up.
2956 * Use that instead.
2957 * ret is 0 if seq_file write succeeded.
2958 * -1 otherwise.
2959 */
2960 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002961 }
2962
2963 return 0;
2964}
2965
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002966/*
2967 * Should be used after trace_array_get(), trace_types_lock
2968 * ensures that i_cdev was already initialized.
2969 */
2970static inline int tracing_get_cpu(struct inode *inode)
2971{
2972 if (inode->i_cdev) /* See trace_create_cpu_file() */
2973 return (long)inode->i_cdev - 1;
2974 return RING_BUFFER_ALL_CPUS;
2975}
2976
James Morris88e9d342009-09-22 16:43:43 -07002977static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002978 .start = s_start,
2979 .next = s_next,
2980 .stop = s_stop,
2981 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002982};
2983
Ingo Molnare309b412008-05-12 21:20:51 +02002984static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002985__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002986{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002987 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002988 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002989 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002990
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002991 if (tracing_disabled)
2992 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002993
Jiri Olsa50e18b92012-04-25 10:23:39 +02002994 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002995 if (!iter)
2996 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002997
Steven Rostedt6d158a82012-06-27 20:46:14 -04002998 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2999 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003000 if (!iter->buffer_iter)
3001 goto release;
3002
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003003 /*
3004 * We make a copy of the current tracer to avoid concurrent
3005 * changes on it while we are reading.
3006 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003007 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003008 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003009 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003010 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003011
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003012 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003013
Li Zefan79f55992009-06-15 14:58:26 +08003014 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003015 goto fail;
3016
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003017 iter->tr = tr;
3018
3019#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003020 /* Currently only the top directory has a snapshot */
3021 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003022 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003023 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003024#endif
3025 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003026 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003027 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003028 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003029 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003030
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003031 /* Notify the tracer early; before we stop tracing. */
3032 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003033 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003034
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003035 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003036 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003037 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3038
David Sharp8be07092012-11-13 12:18:22 -08003039 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003040 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003041 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3042
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003043 /* stop the trace while dumping if we are not opening "snapshot" */
3044 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003045 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003046
Steven Rostedtae3b5092013-01-23 15:22:59 -05003047 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003048 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003049 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003050 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003051 }
3052 ring_buffer_read_prepare_sync();
3053 for_each_tracing_cpu(cpu) {
3054 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003055 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003056 }
3057 } else {
3058 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003059 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003060 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003061 ring_buffer_read_prepare_sync();
3062 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003063 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003064 }
3065
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003066 mutex_unlock(&trace_types_lock);
3067
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003068 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003069
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003070 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003071 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003072 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003073 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003074release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003075 seq_release_private(inode, file);
3076 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003077}
3078
3079int tracing_open_generic(struct inode *inode, struct file *filp)
3080{
Steven Rostedt60a11772008-05-12 21:20:44 +02003081 if (tracing_disabled)
3082 return -ENODEV;
3083
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003084 filp->private_data = inode->i_private;
3085 return 0;
3086}
3087
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003088bool tracing_is_disabled(void)
3089{
3090 return (tracing_disabled) ? true: false;
3091}
3092
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003093/*
3094 * Open and update trace_array ref count.
3095 * Must have the current trace_array passed to it.
3096 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003097static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003098{
3099 struct trace_array *tr = inode->i_private;
3100
3101 if (tracing_disabled)
3102 return -ENODEV;
3103
3104 if (trace_array_get(tr) < 0)
3105 return -ENODEV;
3106
3107 filp->private_data = inode->i_private;
3108
3109 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003110}
3111
Hannes Eder4fd27352009-02-10 19:44:12 +01003112static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003113{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003114 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003115 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003116 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003117 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003118
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003119 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003120 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003121 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003122 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003123
Oleg Nesterov6484c712013-07-23 17:26:10 +02003124 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003125 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003126 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003127
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003128 for_each_tracing_cpu(cpu) {
3129 if (iter->buffer_iter[cpu])
3130 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3131 }
3132
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003133 if (iter->trace && iter->trace->close)
3134 iter->trace->close(iter);
3135
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003136 if (!iter->snapshot)
3137 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003138 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003139
3140 __trace_array_put(tr);
3141
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003142 mutex_unlock(&trace_types_lock);
3143
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003144 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003145 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003146 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003147 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003148 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003149
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003150 return 0;
3151}
3152
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003153static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3154{
3155 struct trace_array *tr = inode->i_private;
3156
3157 trace_array_put(tr);
3158 return 0;
3159}
3160
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003161static int tracing_single_release_tr(struct inode *inode, struct file *file)
3162{
3163 struct trace_array *tr = inode->i_private;
3164
3165 trace_array_put(tr);
3166
3167 return single_release(inode, file);
3168}
3169
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003170static int tracing_open(struct inode *inode, struct file *file)
3171{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003172 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003173 struct trace_iterator *iter;
3174 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003175
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003176 if (trace_array_get(tr) < 0)
3177 return -ENODEV;
3178
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003179 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003180 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3181 int cpu = tracing_get_cpu(inode);
3182
3183 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003184 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003185 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003186 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003187 }
3188
3189 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003190 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003191 if (IS_ERR(iter))
3192 ret = PTR_ERR(iter);
3193 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3194 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3195 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003196
3197 if (ret < 0)
3198 trace_array_put(tr);
3199
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003200 return ret;
3201}
3202
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003203/*
3204 * Some tracers are not suitable for instance buffers.
3205 * A tracer is always available for the global array (toplevel)
3206 * or if it explicitly states that it is.
3207 */
3208static bool
3209trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3210{
3211 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3212}
3213
3214/* Find the next tracer that this trace array may use */
3215static struct tracer *
3216get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3217{
3218 while (t && !trace_ok_for_array(t, tr))
3219 t = t->next;
3220
3221 return t;
3222}
3223
Ingo Molnare309b412008-05-12 21:20:51 +02003224static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003225t_next(struct seq_file *m, void *v, loff_t *pos)
3226{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003227 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003228 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003229
3230 (*pos)++;
3231
3232 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003233 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003234
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003235 return t;
3236}
3237
3238static void *t_start(struct seq_file *m, loff_t *pos)
3239{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003240 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003241 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003242 loff_t l = 0;
3243
3244 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003245
3246 t = get_tracer_for_array(tr, trace_types);
3247 for (; t && l < *pos; t = t_next(m, t, &l))
3248 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003249
3250 return t;
3251}
3252
3253static void t_stop(struct seq_file *m, void *p)
3254{
3255 mutex_unlock(&trace_types_lock);
3256}
3257
3258static int t_show(struct seq_file *m, void *v)
3259{
3260 struct tracer *t = v;
3261
3262 if (!t)
3263 return 0;
3264
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003265 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003266 if (t->next)
3267 seq_putc(m, ' ');
3268 else
3269 seq_putc(m, '\n');
3270
3271 return 0;
3272}
3273
James Morris88e9d342009-09-22 16:43:43 -07003274static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003275 .start = t_start,
3276 .next = t_next,
3277 .stop = t_stop,
3278 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003279};
3280
3281static int show_traces_open(struct inode *inode, struct file *file)
3282{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003283 struct trace_array *tr = inode->i_private;
3284 struct seq_file *m;
3285 int ret;
3286
Steven Rostedt60a11772008-05-12 21:20:44 +02003287 if (tracing_disabled)
3288 return -ENODEV;
3289
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003290 ret = seq_open(file, &show_traces_seq_ops);
3291 if (ret)
3292 return ret;
3293
3294 m = file->private_data;
3295 m->private = tr;
3296
3297 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003298}
3299
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003300static ssize_t
3301tracing_write_stub(struct file *filp, const char __user *ubuf,
3302 size_t count, loff_t *ppos)
3303{
3304 return count;
3305}
3306
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003307loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003308{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003309 int ret;
3310
Slava Pestov364829b2010-11-24 15:13:16 -08003311 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003312 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003313 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003314 file->f_pos = ret = 0;
3315
3316 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003317}
3318
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003319static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003320 .open = tracing_open,
3321 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003322 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003323 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003324 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003325};
3326
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003327static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003328 .open = show_traces_open,
3329 .read = seq_read,
3330 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003331 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003332};
3333
Ingo Molnar36dfe922008-05-12 21:20:52 +02003334/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003335 * The tracer itself will not take this lock, but still we want
3336 * to provide a consistent cpumask to user-space:
3337 */
3338static DEFINE_MUTEX(tracing_cpumask_update_lock);
3339
3340/*
3341 * Temporary storage for the character representation of the
3342 * CPU bitmask (and one more byte for the newline):
3343 */
3344static char mask_str[NR_CPUS + 1];
3345
Ingo Molnarc7078de2008-05-12 21:20:52 +02003346static ssize_t
3347tracing_cpumask_read(struct file *filp, char __user *ubuf,
3348 size_t count, loff_t *ppos)
3349{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003350 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003351 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003352
3353 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003354
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003355 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003356 if (count - len < 2) {
3357 count = -EINVAL;
3358 goto out_err;
3359 }
3360 len += sprintf(mask_str + len, "\n");
3361 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3362
3363out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003364 mutex_unlock(&tracing_cpumask_update_lock);
3365
3366 return count;
3367}
3368
3369static ssize_t
3370tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3371 size_t count, loff_t *ppos)
3372{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003373 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303374 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003375 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303376
3377 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3378 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003379
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303380 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003381 if (err)
3382 goto err_unlock;
3383
Li Zefan215368e2009-06-15 10:56:42 +08003384 mutex_lock(&tracing_cpumask_update_lock);
3385
Steven Rostedta5e25882008-12-02 15:34:05 -05003386 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003387 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003388 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003389 /*
3390 * Increase/decrease the disabled counter if we are
3391 * about to flip a bit in the cpumask:
3392 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003393 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303394 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003395 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3396 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003397 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003398 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303399 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003400 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3401 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003402 }
3403 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003404 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003405 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003406
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003407 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003408
Ingo Molnarc7078de2008-05-12 21:20:52 +02003409 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303410 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003411
Ingo Molnarc7078de2008-05-12 21:20:52 +02003412 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003413
3414err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003415 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003416
3417 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003418}
3419
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003420static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003421 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003422 .read = tracing_cpumask_read,
3423 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003424 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003425 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003426};
3427
Li Zefanfdb372e2009-12-08 11:15:59 +08003428static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003429{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003430 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003431 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003432 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003433 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003434
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003435 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003436 tracer_flags = tr->current_trace->flags->val;
3437 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003438
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003439 for (i = 0; trace_options[i]; i++) {
3440 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003441 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003442 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003443 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003444 }
3445
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003446 for (i = 0; trace_opts[i].name; i++) {
3447 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003448 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003449 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003450 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003451 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003452 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003453
Li Zefanfdb372e2009-12-08 11:15:59 +08003454 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003455}
3456
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003457static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003458 struct tracer_flags *tracer_flags,
3459 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003460{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003461 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003462 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003463
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003464 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003465 if (ret)
3466 return ret;
3467
3468 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003469 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003470 else
Zhaolei77708412009-08-07 18:53:21 +08003471 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003472 return 0;
3473}
3474
Li Zefan8d18eaa2009-12-08 11:17:06 +08003475/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003476static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003477{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003478 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003479 struct tracer_flags *tracer_flags = trace->flags;
3480 struct tracer_opt *opts = NULL;
3481 int i;
3482
3483 for (i = 0; tracer_flags->opts[i].name; i++) {
3484 opts = &tracer_flags->opts[i];
3485
3486 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003487 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003488 }
3489
3490 return -EINVAL;
3491}
3492
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003493/* Some tracers require overwrite to stay enabled */
3494int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3495{
3496 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3497 return -1;
3498
3499 return 0;
3500}
3501
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003502int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003503{
3504 /* do nothing if flag is already set */
3505 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003506 return 0;
3507
3508 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003509 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003510 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003511 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003512
3513 if (enabled)
3514 trace_flags |= mask;
3515 else
3516 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003517
3518 if (mask == TRACE_ITER_RECORD_CMD)
3519 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003520
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003521 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003522 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003523#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003524 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003525#endif
3526 }
Steven Rostedt81698832012-10-11 10:15:05 -04003527
3528 if (mask == TRACE_ITER_PRINTK)
3529 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003530
3531 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003532}
3533
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003534static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003535{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003536 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003537 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003538 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003539 int i;
3540
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003541 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003542
Li Zefan8d18eaa2009-12-08 11:17:06 +08003543 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003544 neg = 1;
3545 cmp += 2;
3546 }
3547
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003548 mutex_lock(&trace_types_lock);
3549
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003550 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003551 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003552 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003553 break;
3554 }
3555 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003556
3557 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003558 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003559 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003560
3561 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003562
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003563 return ret;
3564}
3565
3566static ssize_t
3567tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3568 size_t cnt, loff_t *ppos)
3569{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003570 struct seq_file *m = filp->private_data;
3571 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003572 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003573 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003574
3575 if (cnt >= sizeof(buf))
3576 return -EINVAL;
3577
3578 if (copy_from_user(&buf, ubuf, cnt))
3579 return -EFAULT;
3580
Steven Rostedta8dd2172013-01-09 20:54:17 -05003581 buf[cnt] = 0;
3582
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003583 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003584 if (ret < 0)
3585 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003586
Jiri Olsacf8517c2009-10-23 19:36:16 -04003587 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003588
3589 return cnt;
3590}
3591
Li Zefanfdb372e2009-12-08 11:15:59 +08003592static int tracing_trace_options_open(struct inode *inode, struct file *file)
3593{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003594 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003595 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003596
Li Zefanfdb372e2009-12-08 11:15:59 +08003597 if (tracing_disabled)
3598 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003599
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003600 if (trace_array_get(tr) < 0)
3601 return -ENODEV;
3602
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003603 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3604 if (ret < 0)
3605 trace_array_put(tr);
3606
3607 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003608}
3609
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003610static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003611 .open = tracing_trace_options_open,
3612 .read = seq_read,
3613 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003614 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003615 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003616};
3617
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003618static const char readme_msg[] =
3619 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003620 "# echo 0 > tracing_on : quick way to disable tracing\n"
3621 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3622 " Important files:\n"
3623 " trace\t\t\t- The static contents of the buffer\n"
3624 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3625 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3626 " current_tracer\t- function and latency tracers\n"
3627 " available_tracers\t- list of configured tracers for current_tracer\n"
3628 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3629 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3630 " trace_clock\t\t-change the clock used to order events\n"
3631 " local: Per cpu clock but may not be synced across CPUs\n"
3632 " global: Synced across CPUs but slows tracing down.\n"
3633 " counter: Not a clock, but just an increment\n"
3634 " uptime: Jiffy counter from time of boot\n"
3635 " perf: Same clock that perf events use\n"
3636#ifdef CONFIG_X86_64
3637 " x86-tsc: TSC cycle counter\n"
3638#endif
3639 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3640 " tracing_cpumask\t- Limit which CPUs to trace\n"
3641 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3642 "\t\t\t Remove sub-buffer with rmdir\n"
3643 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003644 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3645 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003646 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003647#ifdef CONFIG_DYNAMIC_FTRACE
3648 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003649 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3650 "\t\t\t functions\n"
3651 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3652 "\t modules: Can select a group via module\n"
3653 "\t Format: :mod:<module-name>\n"
3654 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3655 "\t triggers: a command to perform when function is hit\n"
3656 "\t Format: <function>:<trigger>[:count]\n"
3657 "\t trigger: traceon, traceoff\n"
3658 "\t\t enable_event:<system>:<event>\n"
3659 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003660#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003661 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003662#endif
3663#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003664 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003665#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003666 "\t\t dump\n"
3667 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003668 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3669 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3670 "\t The first one will disable tracing every time do_fault is hit\n"
3671 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3672 "\t The first time do trap is hit and it disables tracing, the\n"
3673 "\t counter will decrement to 2. If tracing is already disabled,\n"
3674 "\t the counter will not decrement. It only decrements when the\n"
3675 "\t trigger did work\n"
3676 "\t To remove trigger without count:\n"
3677 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3678 "\t To remove trigger with a count:\n"
3679 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003680 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003681 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3682 "\t modules: Can select a group via module command :mod:\n"
3683 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003684#endif /* CONFIG_DYNAMIC_FTRACE */
3685#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003686 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3687 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003688#endif
3689#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3690 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003691 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003692 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3693#endif
3694#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003695 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3696 "\t\t\t snapshot buffer. Read the contents for more\n"
3697 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003698#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003699#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003700 " stack_trace\t\t- Shows the max stack trace when active\n"
3701 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003702 "\t\t\t Write into this file to reset the max size (trigger a\n"
3703 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003704#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003705 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3706 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003707#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003708#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003709 " events/\t\t- Directory containing all trace event subsystems:\n"
3710 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3711 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003712 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3713 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003714 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003715 " events/<system>/<event>/\t- Directory containing control files for\n"
3716 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003717 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3718 " filter\t\t- If set, only events passing filter are traced\n"
3719 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003720 "\t Format: <trigger>[:count][if <filter>]\n"
3721 "\t trigger: traceon, traceoff\n"
3722 "\t enable_event:<system>:<event>\n"
3723 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003724#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003725 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003726#endif
3727#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003728 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003729#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003730 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3731 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3732 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3733 "\t events/block/block_unplug/trigger\n"
3734 "\t The first disables tracing every time block_unplug is hit.\n"
3735 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3736 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3737 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3738 "\t Like function triggers, the counter is only decremented if it\n"
3739 "\t enabled or disabled tracing.\n"
3740 "\t To remove a trigger without a count:\n"
3741 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3742 "\t To remove a trigger with a count:\n"
3743 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3744 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003745;
3746
3747static ssize_t
3748tracing_readme_read(struct file *filp, char __user *ubuf,
3749 size_t cnt, loff_t *ppos)
3750{
3751 return simple_read_from_buffer(ubuf, cnt, ppos,
3752 readme_msg, strlen(readme_msg));
3753}
3754
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003755static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003756 .open = tracing_open_generic,
3757 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003758 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003759};
3760
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003761static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003762{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003763 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003764
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003765 if (*pos || m->count)
3766 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003767
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003768 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003769
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003770 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3771 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003772 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003773 continue;
3774
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003775 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003776 }
3777
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003778 return NULL;
3779}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003780
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003781static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3782{
3783 void *v;
3784 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003785
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003786 preempt_disable();
3787 arch_spin_lock(&trace_cmdline_lock);
3788
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003789 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003790 while (l <= *pos) {
3791 v = saved_cmdlines_next(m, v, &l);
3792 if (!v)
3793 return NULL;
3794 }
3795
3796 return v;
3797}
3798
3799static void saved_cmdlines_stop(struct seq_file *m, void *v)
3800{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003801 arch_spin_unlock(&trace_cmdline_lock);
3802 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003803}
3804
3805static int saved_cmdlines_show(struct seq_file *m, void *v)
3806{
3807 char buf[TASK_COMM_LEN];
3808 unsigned int *pid = v;
3809
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003810 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003811 seq_printf(m, "%d %s\n", *pid, buf);
3812 return 0;
3813}
3814
3815static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3816 .start = saved_cmdlines_start,
3817 .next = saved_cmdlines_next,
3818 .stop = saved_cmdlines_stop,
3819 .show = saved_cmdlines_show,
3820};
3821
3822static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3823{
3824 if (tracing_disabled)
3825 return -ENODEV;
3826
3827 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003828}
3829
3830static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003831 .open = tracing_saved_cmdlines_open,
3832 .read = seq_read,
3833 .llseek = seq_lseek,
3834 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003835};
3836
3837static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003838tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3839 size_t cnt, loff_t *ppos)
3840{
3841 char buf[64];
3842 int r;
3843
3844 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003845 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003846 arch_spin_unlock(&trace_cmdline_lock);
3847
3848 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3849}
3850
3851static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3852{
3853 kfree(s->saved_cmdlines);
3854 kfree(s->map_cmdline_to_pid);
3855 kfree(s);
3856}
3857
3858static int tracing_resize_saved_cmdlines(unsigned int val)
3859{
3860 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3861
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003862 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003863 if (!s)
3864 return -ENOMEM;
3865
3866 if (allocate_cmdlines_buffer(val, s) < 0) {
3867 kfree(s);
3868 return -ENOMEM;
3869 }
3870
3871 arch_spin_lock(&trace_cmdline_lock);
3872 savedcmd_temp = savedcmd;
3873 savedcmd = s;
3874 arch_spin_unlock(&trace_cmdline_lock);
3875 free_saved_cmdlines_buffer(savedcmd_temp);
3876
3877 return 0;
3878}
3879
3880static ssize_t
3881tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3882 size_t cnt, loff_t *ppos)
3883{
3884 unsigned long val;
3885 int ret;
3886
3887 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3888 if (ret)
3889 return ret;
3890
3891 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3892 if (!val || val > PID_MAX_DEFAULT)
3893 return -EINVAL;
3894
3895 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3896 if (ret < 0)
3897 return ret;
3898
3899 *ppos += cnt;
3900
3901 return cnt;
3902}
3903
3904static const struct file_operations tracing_saved_cmdlines_size_fops = {
3905 .open = tracing_open_generic,
3906 .read = tracing_saved_cmdlines_size_read,
3907 .write = tracing_saved_cmdlines_size_write,
3908};
3909
3910static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003911tracing_set_trace_read(struct file *filp, char __user *ubuf,
3912 size_t cnt, loff_t *ppos)
3913{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003914 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003915 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003916 int r;
3917
3918 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003919 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003920 mutex_unlock(&trace_types_lock);
3921
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003922 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003923}
3924
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003925int tracer_init(struct tracer *t, struct trace_array *tr)
3926{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003927 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003928 return t->init(tr);
3929}
3930
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003931static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003932{
3933 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003934
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003935 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003936 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003937}
3938
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003939#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003940/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003941static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3942 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003943{
3944 int cpu, ret = 0;
3945
3946 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3947 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003948 ret = ring_buffer_resize(trace_buf->buffer,
3949 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003950 if (ret < 0)
3951 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003952 per_cpu_ptr(trace_buf->data, cpu)->entries =
3953 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003954 }
3955 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003956 ret = ring_buffer_resize(trace_buf->buffer,
3957 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003958 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003959 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3960 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003961 }
3962
3963 return ret;
3964}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003965#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003966
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003967static int __tracing_resize_ring_buffer(struct trace_array *tr,
3968 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003969{
3970 int ret;
3971
3972 /*
3973 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003974 * we use the size that was given, and we can forget about
3975 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003976 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003977 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003978
Steven Rostedtb382ede62012-10-10 21:44:34 -04003979 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003980 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003981 return 0;
3982
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003983 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003984 if (ret < 0)
3985 return ret;
3986
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003987#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003988 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3989 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003990 goto out;
3991
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003992 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003993 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003994 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3995 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003996 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003997 /*
3998 * AARGH! We are left with different
3999 * size max buffer!!!!
4000 * The max buffer is our "snapshot" buffer.
4001 * When a tracer needs a snapshot (one of the
4002 * latency tracers), it swaps the max buffer
4003 * with the saved snap shot. We succeeded to
4004 * update the size of the main buffer, but failed to
4005 * update the size of the max buffer. But when we tried
4006 * to reset the main buffer to the original size, we
4007 * failed there too. This is very unlikely to
4008 * happen, but if it does, warn and kill all
4009 * tracing.
4010 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004011 WARN_ON(1);
4012 tracing_disabled = 1;
4013 }
4014 return ret;
4015 }
4016
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004017 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004018 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004019 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004020 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004021
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004022 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004023#endif /* CONFIG_TRACER_MAX_TRACE */
4024
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004025 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004026 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004027 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004028 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004029
4030 return ret;
4031}
4032
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004033static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4034 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004035{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004036 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004037
4038 mutex_lock(&trace_types_lock);
4039
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004040 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4041 /* make sure, this cpu is enabled in the mask */
4042 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4043 ret = -EINVAL;
4044 goto out;
4045 }
4046 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004047
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004048 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004049 if (ret < 0)
4050 ret = -ENOMEM;
4051
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004052out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004053 mutex_unlock(&trace_types_lock);
4054
4055 return ret;
4056}
4057
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004058
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004059/**
4060 * tracing_update_buffers - used by tracing facility to expand ring buffers
4061 *
4062 * To save on memory when the tracing is never used on a system with it
4063 * configured in. The ring buffers are set to a minimum size. But once
4064 * a user starts to use the tracing facility, then they need to grow
4065 * to their default size.
4066 *
4067 * This function is to be called when a tracer is about to be used.
4068 */
4069int tracing_update_buffers(void)
4070{
4071 int ret = 0;
4072
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004073 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004074 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004075 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004076 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004077 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004078
4079 return ret;
4080}
4081
Steven Rostedt577b7852009-02-26 23:43:05 -05004082struct trace_option_dentry;
4083
4084static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004085create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004086
4087static void
4088destroy_trace_option_files(struct trace_option_dentry *topts);
4089
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004090/*
4091 * Used to clear out the tracer before deletion of an instance.
4092 * Must have trace_types_lock held.
4093 */
4094static void tracing_set_nop(struct trace_array *tr)
4095{
4096 if (tr->current_trace == &nop_trace)
4097 return;
4098
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004099 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004100
4101 if (tr->current_trace->reset)
4102 tr->current_trace->reset(tr);
4103
4104 tr->current_trace = &nop_trace;
4105}
4106
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004107static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004108{
Steven Rostedt577b7852009-02-26 23:43:05 -05004109 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004110 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004111#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004112 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004113#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004114 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004115
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004116 mutex_lock(&trace_types_lock);
4117
Steven Rostedt73c51622009-03-11 13:42:01 -04004118 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004119 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004120 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004121 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004122 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004123 ret = 0;
4124 }
4125
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004126 for (t = trace_types; t; t = t->next) {
4127 if (strcmp(t->name, buf) == 0)
4128 break;
4129 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004130 if (!t) {
4131 ret = -EINVAL;
4132 goto out;
4133 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004134 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004135 goto out;
4136
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004137 /* Some tracers are only allowed for the top level buffer */
4138 if (!trace_ok_for_array(t, tr)) {
4139 ret = -EINVAL;
4140 goto out;
4141 }
4142
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004143 /* If trace pipe files are being read, we can't change the tracer */
4144 if (tr->current_trace->ref) {
4145 ret = -EBUSY;
4146 goto out;
4147 }
4148
Steven Rostedt9f029e82008-11-12 15:24:24 -05004149 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004150
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004151 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004152
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004153 if (tr->current_trace->reset)
4154 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004155
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004156 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004157 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004158
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004159#ifdef CONFIG_TRACER_MAX_TRACE
4160 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004161
4162 if (had_max_tr && !t->use_max_tr) {
4163 /*
4164 * We need to make sure that the update_max_tr sees that
4165 * current_trace changed to nop_trace to keep it from
4166 * swapping the buffers after we resize it.
4167 * The update_max_tr is called from interrupts disabled
4168 * so a synchronized_sched() is sufficient.
4169 */
4170 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004171 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004172 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004173#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004174 /* Currently, only the top instance has options */
4175 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4176 destroy_trace_option_files(topts);
4177 topts = create_trace_option_files(tr, t);
4178 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004179
4180#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004181 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004182 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004183 if (ret < 0)
4184 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004185 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004186#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004187
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004188 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004189 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004190 if (ret)
4191 goto out;
4192 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004193
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004194 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004195 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004196 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004197 out:
4198 mutex_unlock(&trace_types_lock);
4199
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004200 return ret;
4201}
4202
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004203static ssize_t
4204tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4205 size_t cnt, loff_t *ppos)
4206{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004207 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004208 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004209 int i;
4210 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004211 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004212
Steven Rostedt60063a62008-10-28 10:44:24 -04004213 ret = cnt;
4214
Li Zefanee6c2c12009-09-18 14:06:47 +08004215 if (cnt > MAX_TRACER_SIZE)
4216 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004217
4218 if (copy_from_user(&buf, ubuf, cnt))
4219 return -EFAULT;
4220
4221 buf[cnt] = 0;
4222
4223 /* strip ending whitespace. */
4224 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4225 buf[i] = 0;
4226
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004227 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004228 if (err)
4229 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004230
Jiri Olsacf8517c2009-10-23 19:36:16 -04004231 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004232
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004233 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004234}
4235
4236static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004237tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4238 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004239{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004240 char buf[64];
4241 int r;
4242
Steven Rostedtcffae432008-05-12 21:21:00 +02004243 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004244 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004245 if (r > sizeof(buf))
4246 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004247 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004248}
4249
4250static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004251tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4252 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004253{
Hannes Eder5e398412009-02-10 19:44:34 +01004254 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004255 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004256
Peter Huewe22fe9b52011-06-07 21:58:27 +02004257 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4258 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004259 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004260
4261 *ptr = val * 1000;
4262
4263 return cnt;
4264}
4265
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004266static ssize_t
4267tracing_thresh_read(struct file *filp, char __user *ubuf,
4268 size_t cnt, loff_t *ppos)
4269{
4270 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4271}
4272
4273static ssize_t
4274tracing_thresh_write(struct file *filp, const char __user *ubuf,
4275 size_t cnt, loff_t *ppos)
4276{
4277 struct trace_array *tr = filp->private_data;
4278 int ret;
4279
4280 mutex_lock(&trace_types_lock);
4281 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4282 if (ret < 0)
4283 goto out;
4284
4285 if (tr->current_trace->update_thresh) {
4286 ret = tr->current_trace->update_thresh(tr);
4287 if (ret < 0)
4288 goto out;
4289 }
4290
4291 ret = cnt;
4292out:
4293 mutex_unlock(&trace_types_lock);
4294
4295 return ret;
4296}
4297
4298static ssize_t
4299tracing_max_lat_read(struct file *filp, char __user *ubuf,
4300 size_t cnt, loff_t *ppos)
4301{
4302 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4303}
4304
4305static ssize_t
4306tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4307 size_t cnt, loff_t *ppos)
4308{
4309 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4310}
4311
Steven Rostedtb3806b42008-05-12 21:20:46 +02004312static int tracing_open_pipe(struct inode *inode, struct file *filp)
4313{
Oleg Nesterov15544202013-07-23 17:25:57 +02004314 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004315 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004316 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004317
4318 if (tracing_disabled)
4319 return -ENODEV;
4320
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004321 if (trace_array_get(tr) < 0)
4322 return -ENODEV;
4323
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004324 mutex_lock(&trace_types_lock);
4325
Steven Rostedtb3806b42008-05-12 21:20:46 +02004326 /* create a buffer to store the information to pass to userspace */
4327 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004328 if (!iter) {
4329 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004330 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004331 goto out;
4332 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004333
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004334 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004335 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004336
4337 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4338 ret = -ENOMEM;
4339 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304340 }
4341
Steven Rostedta3097202008-11-07 22:36:02 -05004342 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304343 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004344
Steven Rostedt112f38a72009-06-01 15:16:05 -04004345 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4346 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4347
David Sharp8be07092012-11-13 12:18:22 -08004348 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004349 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004350 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4351
Oleg Nesterov15544202013-07-23 17:25:57 +02004352 iter->tr = tr;
4353 iter->trace_buffer = &tr->trace_buffer;
4354 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004355 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004356 filp->private_data = iter;
4357
Steven Rostedt107bad82008-05-12 21:21:01 +02004358 if (iter->trace->pipe_open)
4359 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004360
Arnd Bergmannb4447862010-07-07 23:40:11 +02004361 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004362
4363 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004364out:
4365 mutex_unlock(&trace_types_lock);
4366 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004367
4368fail:
4369 kfree(iter->trace);
4370 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004371 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004372 mutex_unlock(&trace_types_lock);
4373 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004374}
4375
4376static int tracing_release_pipe(struct inode *inode, struct file *file)
4377{
4378 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004379 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004380
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004381 mutex_lock(&trace_types_lock);
4382
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004383 tr->current_trace->ref--;
4384
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004385 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004386 iter->trace->pipe_close(iter);
4387
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004388 mutex_unlock(&trace_types_lock);
4389
Rusty Russell44623442009-01-01 10:12:23 +10304390 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004391 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004392 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004393
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004394 trace_array_put(tr);
4395
Steven Rostedtb3806b42008-05-12 21:20:46 +02004396 return 0;
4397}
4398
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004399static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004400trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004401{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004402 /* Iterators are static, they should be filled or empty */
4403 if (trace_buffer_iter(iter, iter->cpu_file))
4404 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004405
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004406 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004407 /*
4408 * Always select as readable when in blocking mode
4409 */
4410 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004411 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004412 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004413 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004414}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004415
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004416static unsigned int
4417tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4418{
4419 struct trace_iterator *iter = filp->private_data;
4420
4421 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004422}
4423
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004424/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004425static int tracing_wait_pipe(struct file *filp)
4426{
4427 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004428 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004429
4430 while (trace_empty(iter)) {
4431
4432 if ((filp->f_flags & O_NONBLOCK)) {
4433 return -EAGAIN;
4434 }
4435
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004436 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004437 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004438 * We still block if tracing is disabled, but we have never
4439 * read anything. This allows a user to cat this file, and
4440 * then enable tracing. But after we have read something,
4441 * we give an EOF when tracing is again disabled.
4442 *
4443 * iter->pos will be 0 if we haven't read anything.
4444 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004445 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004446 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004447
4448 mutex_unlock(&iter->mutex);
4449
Rabin Vincente30f53a2014-11-10 19:46:34 +01004450 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004451
4452 mutex_lock(&iter->mutex);
4453
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004454 if (ret)
4455 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004456 }
4457
4458 return 1;
4459}
4460
Steven Rostedtb3806b42008-05-12 21:20:46 +02004461/*
4462 * Consumer reader.
4463 */
4464static ssize_t
4465tracing_read_pipe(struct file *filp, char __user *ubuf,
4466 size_t cnt, loff_t *ppos)
4467{
4468 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004469 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004470
4471 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004472 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4473 if (sret != -EBUSY)
4474 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004475
Steven Rostedtf9520752009-03-02 14:04:40 -05004476 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004477
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004478 /*
4479 * Avoid more than one consumer on a single file descriptor
4480 * This is just a matter of traces coherency, the ring buffer itself
4481 * is protected.
4482 */
4483 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004484 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004485 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4486 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004487 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004488 }
4489
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004490waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004491 sret = tracing_wait_pipe(filp);
4492 if (sret <= 0)
4493 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004494
4495 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004496 if (trace_empty(iter)) {
4497 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004498 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004499 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004500
4501 if (cnt >= PAGE_SIZE)
4502 cnt = PAGE_SIZE - 1;
4503
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004504 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004505 memset(&iter->seq, 0,
4506 sizeof(struct trace_iterator) -
4507 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004508 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004509 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004510
Lai Jiangshan4f535962009-05-18 19:35:34 +08004511 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004512 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004513 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004514 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004515 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004516
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004517 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004518 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004519 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004520 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004521 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004522 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004523 if (ret != TRACE_TYPE_NO_CONSUME)
4524 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004525
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004526 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004527 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004528
4529 /*
4530 * Setting the full flag means we reached the trace_seq buffer
4531 * size and we should leave by partial output condition above.
4532 * One of the trace_seq_* functions is not used properly.
4533 */
4534 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4535 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004536 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004537 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004538 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004539
Steven Rostedtb3806b42008-05-12 21:20:46 +02004540 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004541 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004542 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004543 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004544
4545 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004546 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004547 * entries, go back to wait for more entries.
4548 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004549 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004550 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004551
Steven Rostedt107bad82008-05-12 21:21:01 +02004552out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004553 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004554
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004555 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004556}
4557
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004558static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4559 unsigned int idx)
4560{
4561 __free_page(spd->pages[idx]);
4562}
4563
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004564static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004565 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004566 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004567 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004568 .steal = generic_pipe_buf_steal,
4569 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004570};
4571
Steven Rostedt34cd4992009-02-09 12:06:29 -05004572static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004573tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004574{
4575 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004576 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004577 int ret;
4578
4579 /* Seq buffer is page-sized, exactly what we need. */
4580 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004581 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004582 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004583
4584 if (trace_seq_has_overflowed(&iter->seq)) {
4585 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004586 break;
4587 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004588
4589 /*
4590 * This should not be hit, because it should only
4591 * be set if the iter->seq overflowed. But check it
4592 * anyway to be safe.
4593 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05004594 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004595 iter->seq.seq.len = save_len;
4596 break;
4597 }
4598
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004599 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004600 if (rem < count) {
4601 rem = 0;
4602 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004603 break;
4604 }
4605
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004606 if (ret != TRACE_TYPE_NO_CONSUME)
4607 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004608 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004609 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004610 rem = 0;
4611 iter->ent = NULL;
4612 break;
4613 }
4614 }
4615
4616 return rem;
4617}
4618
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004619static ssize_t tracing_splice_read_pipe(struct file *filp,
4620 loff_t *ppos,
4621 struct pipe_inode_info *pipe,
4622 size_t len,
4623 unsigned int flags)
4624{
Jens Axboe35f3d142010-05-20 10:43:18 +02004625 struct page *pages_def[PIPE_DEF_BUFFERS];
4626 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004627 struct trace_iterator *iter = filp->private_data;
4628 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004629 .pages = pages_def,
4630 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004631 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004632 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004633 .flags = flags,
4634 .ops = &tracing_pipe_buf_ops,
4635 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004636 };
4637 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004638 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004639 unsigned int i;
4640
Jens Axboe35f3d142010-05-20 10:43:18 +02004641 if (splice_grow_spd(pipe, &spd))
4642 return -ENOMEM;
4643
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004644 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004645
4646 if (iter->trace->splice_read) {
4647 ret = iter->trace->splice_read(iter, filp,
4648 ppos, pipe, len, flags);
4649 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004650 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004651 }
4652
4653 ret = tracing_wait_pipe(filp);
4654 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004655 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004656
Jason Wessel955b61e2010-08-05 09:22:23 -05004657 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004658 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004659 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004660 }
4661
Lai Jiangshan4f535962009-05-18 19:35:34 +08004662 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004663 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004664
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004665 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004666 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004667 spd.pages[i] = alloc_page(GFP_KERNEL);
4668 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004669 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004670
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004671 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004672
4673 /* Copy the data into the page, so we can start over. */
4674 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004675 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004676 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004677 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004678 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004679 break;
4680 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004681 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004682 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004683
Steven Rostedtf9520752009-03-02 14:04:40 -05004684 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004685 }
4686
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004687 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004688 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004689 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004690
4691 spd.nr_pages = i;
4692
Jens Axboe35f3d142010-05-20 10:43:18 +02004693 ret = splice_to_pipe(pipe, &spd);
4694out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004695 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004696 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004697
Steven Rostedt34cd4992009-02-09 12:06:29 -05004698out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004699 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004700 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004701}
4702
Steven Rostedta98a3c32008-05-12 21:20:59 +02004703static ssize_t
4704tracing_entries_read(struct file *filp, char __user *ubuf,
4705 size_t cnt, loff_t *ppos)
4706{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004707 struct inode *inode = file_inode(filp);
4708 struct trace_array *tr = inode->i_private;
4709 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004710 char buf[64];
4711 int r = 0;
4712 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004713
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004714 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004715
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004716 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004717 int cpu, buf_size_same;
4718 unsigned long size;
4719
4720 size = 0;
4721 buf_size_same = 1;
4722 /* check if all cpu sizes are same */
4723 for_each_tracing_cpu(cpu) {
4724 /* fill in the size from first enabled cpu */
4725 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004726 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4727 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004728 buf_size_same = 0;
4729 break;
4730 }
4731 }
4732
4733 if (buf_size_same) {
4734 if (!ring_buffer_expanded)
4735 r = sprintf(buf, "%lu (expanded: %lu)\n",
4736 size >> 10,
4737 trace_buf_size >> 10);
4738 else
4739 r = sprintf(buf, "%lu\n", size >> 10);
4740 } else
4741 r = sprintf(buf, "X\n");
4742 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004743 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004744
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004745 mutex_unlock(&trace_types_lock);
4746
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004747 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4748 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004749}
4750
4751static ssize_t
4752tracing_entries_write(struct file *filp, const char __user *ubuf,
4753 size_t cnt, loff_t *ppos)
4754{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004755 struct inode *inode = file_inode(filp);
4756 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004757 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004758 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004759
Peter Huewe22fe9b52011-06-07 21:58:27 +02004760 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4761 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004762 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004763
4764 /* must have at least 1 entry */
4765 if (!val)
4766 return -EINVAL;
4767
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004768 /* value is in KB */
4769 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004770 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004771 if (ret < 0)
4772 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004773
Jiri Olsacf8517c2009-10-23 19:36:16 -04004774 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004775
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004776 return cnt;
4777}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004778
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004779static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004780tracing_total_entries_read(struct file *filp, char __user *ubuf,
4781 size_t cnt, loff_t *ppos)
4782{
4783 struct trace_array *tr = filp->private_data;
4784 char buf[64];
4785 int r, cpu;
4786 unsigned long size = 0, expanded_size = 0;
4787
4788 mutex_lock(&trace_types_lock);
4789 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004790 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004791 if (!ring_buffer_expanded)
4792 expanded_size += trace_buf_size >> 10;
4793 }
4794 if (ring_buffer_expanded)
4795 r = sprintf(buf, "%lu\n", size);
4796 else
4797 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4798 mutex_unlock(&trace_types_lock);
4799
4800 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4801}
4802
4803static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004804tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4805 size_t cnt, loff_t *ppos)
4806{
4807 /*
4808 * There is no need to read what the user has written, this function
4809 * is just to make sure that there is no error when "echo" is used
4810 */
4811
4812 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004813
4814 return cnt;
4815}
4816
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004817static int
4818tracing_free_buffer_release(struct inode *inode, struct file *filp)
4819{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004820 struct trace_array *tr = inode->i_private;
4821
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004822 /* disable tracing ? */
4823 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004824 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004825 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004826 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004827
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004828 trace_array_put(tr);
4829
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004830 return 0;
4831}
4832
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004833static ssize_t
4834tracing_mark_write(struct file *filp, const char __user *ubuf,
4835 size_t cnt, loff_t *fpos)
4836{
Steven Rostedtd696b582011-09-22 11:50:27 -04004837 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004838 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004839 struct ring_buffer_event *event;
4840 struct ring_buffer *buffer;
4841 struct print_entry *entry;
4842 unsigned long irq_flags;
4843 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004844 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004845 int nr_pages = 1;
4846 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004847 int offset;
4848 int size;
4849 int len;
4850 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004851 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004852
Steven Rostedtc76f0692008-11-07 22:36:02 -05004853 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004854 return -EINVAL;
4855
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004856 if (!(trace_flags & TRACE_ITER_MARKERS))
4857 return -EINVAL;
4858
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004859 if (cnt > TRACE_BUF_SIZE)
4860 cnt = TRACE_BUF_SIZE;
4861
Steven Rostedtd696b582011-09-22 11:50:27 -04004862 /*
4863 * Userspace is injecting traces into the kernel trace buffer.
4864 * We want to be as non intrusive as possible.
4865 * To do so, we do not want to allocate any special buffers
4866 * or take any locks, but instead write the userspace data
4867 * straight into the ring buffer.
4868 *
4869 * First we need to pin the userspace buffer into memory,
4870 * which, most likely it is, because it just referenced it.
4871 * But there's no guarantee that it is. By using get_user_pages_fast()
4872 * and kmap_atomic/kunmap_atomic() we can get access to the
4873 * pages directly. We then write the data directly into the
4874 * ring buffer.
4875 */
4876 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004877
Steven Rostedtd696b582011-09-22 11:50:27 -04004878 /* check if we cross pages */
4879 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4880 nr_pages = 2;
4881
4882 offset = addr & (PAGE_SIZE - 1);
4883 addr &= PAGE_MASK;
4884
4885 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4886 if (ret < nr_pages) {
4887 while (--ret >= 0)
4888 put_page(pages[ret]);
4889 written = -EFAULT;
4890 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004891 }
4892
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004893 for (i = 0; i < nr_pages; i++)
4894 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004895
4896 local_save_flags(irq_flags);
4897 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004898 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004899 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4900 irq_flags, preempt_count());
4901 if (!event) {
4902 /* Ring buffer disabled, return as if not open for write */
4903 written = -EBADF;
4904 goto out_unlock;
4905 }
4906
4907 entry = ring_buffer_event_data(event);
4908 entry->ip = _THIS_IP_;
4909
4910 if (nr_pages == 2) {
4911 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004912 memcpy(&entry->buf, map_page[0] + offset, len);
4913 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004914 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004915 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004916
4917 if (entry->buf[cnt - 1] != '\n') {
4918 entry->buf[cnt] = '\n';
4919 entry->buf[cnt + 1] = '\0';
4920 } else
4921 entry->buf[cnt] = '\0';
4922
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004923 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004924
4925 written = cnt;
4926
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004927 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004928
Steven Rostedtd696b582011-09-22 11:50:27 -04004929 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004930 for (i = 0; i < nr_pages; i++){
4931 kunmap_atomic(map_page[i]);
4932 put_page(pages[i]);
4933 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004934 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004935 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004936}
4937
Li Zefan13f16d22009-12-08 11:16:11 +08004938static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004939{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004940 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004941 int i;
4942
4943 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004944 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004945 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004946 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4947 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004948 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004949
Li Zefan13f16d22009-12-08 11:16:11 +08004950 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004951}
4952
Steven Rostedte1e232c2014-02-10 23:38:46 -05004953static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004954{
Zhaolei5079f322009-08-25 16:12:56 +08004955 int i;
4956
Zhaolei5079f322009-08-25 16:12:56 +08004957 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4958 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4959 break;
4960 }
4961 if (i == ARRAY_SIZE(trace_clocks))
4962 return -EINVAL;
4963
Zhaolei5079f322009-08-25 16:12:56 +08004964 mutex_lock(&trace_types_lock);
4965
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004966 tr->clock_id = i;
4967
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004968 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004969
David Sharp60303ed2012-10-11 16:27:52 -07004970 /*
4971 * New clock may not be consistent with the previous clock.
4972 * Reset the buffer so that it doesn't have incomparable timestamps.
4973 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004974 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004975
4976#ifdef CONFIG_TRACER_MAX_TRACE
4977 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4978 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004979 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004980#endif
David Sharp60303ed2012-10-11 16:27:52 -07004981
Zhaolei5079f322009-08-25 16:12:56 +08004982 mutex_unlock(&trace_types_lock);
4983
Steven Rostedte1e232c2014-02-10 23:38:46 -05004984 return 0;
4985}
4986
4987static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4988 size_t cnt, loff_t *fpos)
4989{
4990 struct seq_file *m = filp->private_data;
4991 struct trace_array *tr = m->private;
4992 char buf[64];
4993 const char *clockstr;
4994 int ret;
4995
4996 if (cnt >= sizeof(buf))
4997 return -EINVAL;
4998
4999 if (copy_from_user(&buf, ubuf, cnt))
5000 return -EFAULT;
5001
5002 buf[cnt] = 0;
5003
5004 clockstr = strstrip(buf);
5005
5006 ret = tracing_set_clock(tr, clockstr);
5007 if (ret)
5008 return ret;
5009
Zhaolei5079f322009-08-25 16:12:56 +08005010 *fpos += cnt;
5011
5012 return cnt;
5013}
5014
Li Zefan13f16d22009-12-08 11:16:11 +08005015static int tracing_clock_open(struct inode *inode, struct file *file)
5016{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005017 struct trace_array *tr = inode->i_private;
5018 int ret;
5019
Li Zefan13f16d22009-12-08 11:16:11 +08005020 if (tracing_disabled)
5021 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005022
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005023 if (trace_array_get(tr))
5024 return -ENODEV;
5025
5026 ret = single_open(file, tracing_clock_show, inode->i_private);
5027 if (ret < 0)
5028 trace_array_put(tr);
5029
5030 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005031}
5032
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005033struct ftrace_buffer_info {
5034 struct trace_iterator iter;
5035 void *spare;
5036 unsigned int read;
5037};
5038
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005039#ifdef CONFIG_TRACER_SNAPSHOT
5040static int tracing_snapshot_open(struct inode *inode, struct file *file)
5041{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005042 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005043 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005044 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005045 int ret = 0;
5046
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005047 if (trace_array_get(tr) < 0)
5048 return -ENODEV;
5049
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005050 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005051 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005052 if (IS_ERR(iter))
5053 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005054 } else {
5055 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005056 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005057 m = kzalloc(sizeof(*m), GFP_KERNEL);
5058 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005059 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005060 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5061 if (!iter) {
5062 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005063 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005064 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005065 ret = 0;
5066
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005067 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005068 iter->trace_buffer = &tr->max_buffer;
5069 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005070 m->private = iter;
5071 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005072 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005073out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005074 if (ret < 0)
5075 trace_array_put(tr);
5076
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005077 return ret;
5078}
5079
5080static ssize_t
5081tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5082 loff_t *ppos)
5083{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005084 struct seq_file *m = filp->private_data;
5085 struct trace_iterator *iter = m->private;
5086 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005087 unsigned long val;
5088 int ret;
5089
5090 ret = tracing_update_buffers();
5091 if (ret < 0)
5092 return ret;
5093
5094 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5095 if (ret)
5096 return ret;
5097
5098 mutex_lock(&trace_types_lock);
5099
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005100 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005101 ret = -EBUSY;
5102 goto out;
5103 }
5104
5105 switch (val) {
5106 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005107 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5108 ret = -EINVAL;
5109 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005110 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005111 if (tr->allocated_snapshot)
5112 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005113 break;
5114 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005115/* Only allow per-cpu swap if the ring buffer supports it */
5116#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5117 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5118 ret = -EINVAL;
5119 break;
5120 }
5121#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005122 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005123 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005124 if (ret < 0)
5125 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005126 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005127 local_irq_disable();
5128 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005129 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005130 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005131 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005132 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005133 local_irq_enable();
5134 break;
5135 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005136 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005137 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5138 tracing_reset_online_cpus(&tr->max_buffer);
5139 else
5140 tracing_reset(&tr->max_buffer, iter->cpu_file);
5141 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005142 break;
5143 }
5144
5145 if (ret >= 0) {
5146 *ppos += cnt;
5147 ret = cnt;
5148 }
5149out:
5150 mutex_unlock(&trace_types_lock);
5151 return ret;
5152}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005153
5154static int tracing_snapshot_release(struct inode *inode, struct file *file)
5155{
5156 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005157 int ret;
5158
5159 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005160
5161 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005162 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005163
5164 /* If write only, the seq_file is just a stub */
5165 if (m)
5166 kfree(m->private);
5167 kfree(m);
5168
5169 return 0;
5170}
5171
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005172static int tracing_buffers_open(struct inode *inode, struct file *filp);
5173static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5174 size_t count, loff_t *ppos);
5175static int tracing_buffers_release(struct inode *inode, struct file *file);
5176static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5177 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5178
5179static int snapshot_raw_open(struct inode *inode, struct file *filp)
5180{
5181 struct ftrace_buffer_info *info;
5182 int ret;
5183
5184 ret = tracing_buffers_open(inode, filp);
5185 if (ret < 0)
5186 return ret;
5187
5188 info = filp->private_data;
5189
5190 if (info->iter.trace->use_max_tr) {
5191 tracing_buffers_release(inode, filp);
5192 return -EBUSY;
5193 }
5194
5195 info->iter.snapshot = true;
5196 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5197
5198 return ret;
5199}
5200
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005201#endif /* CONFIG_TRACER_SNAPSHOT */
5202
5203
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005204static const struct file_operations tracing_thresh_fops = {
5205 .open = tracing_open_generic,
5206 .read = tracing_thresh_read,
5207 .write = tracing_thresh_write,
5208 .llseek = generic_file_llseek,
5209};
5210
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005211static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005212 .open = tracing_open_generic,
5213 .read = tracing_max_lat_read,
5214 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005215 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005216};
5217
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005218static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005219 .open = tracing_open_generic,
5220 .read = tracing_set_trace_read,
5221 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005222 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005223};
5224
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005225static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005226 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005227 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005228 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005229 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005230 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005231 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005232};
5233
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005234static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005235 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005236 .read = tracing_entries_read,
5237 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005238 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005239 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005240};
5241
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005242static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005243 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005244 .read = tracing_total_entries_read,
5245 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005246 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005247};
5248
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005249static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005250 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005251 .write = tracing_free_buffer_write,
5252 .release = tracing_free_buffer_release,
5253};
5254
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005255static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005256 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005257 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005258 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005259 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005260};
5261
Zhaolei5079f322009-08-25 16:12:56 +08005262static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005263 .open = tracing_clock_open,
5264 .read = seq_read,
5265 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005266 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005267 .write = tracing_clock_write,
5268};
5269
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005270#ifdef CONFIG_TRACER_SNAPSHOT
5271static const struct file_operations snapshot_fops = {
5272 .open = tracing_snapshot_open,
5273 .read = seq_read,
5274 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005275 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005276 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005277};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005278
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005279static const struct file_operations snapshot_raw_fops = {
5280 .open = snapshot_raw_open,
5281 .read = tracing_buffers_read,
5282 .release = tracing_buffers_release,
5283 .splice_read = tracing_buffers_splice_read,
5284 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005285};
5286
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005287#endif /* CONFIG_TRACER_SNAPSHOT */
5288
Steven Rostedt2cadf912008-12-01 22:20:19 -05005289static int tracing_buffers_open(struct inode *inode, struct file *filp)
5290{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005291 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005292 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005293 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005294
5295 if (tracing_disabled)
5296 return -ENODEV;
5297
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005298 if (trace_array_get(tr) < 0)
5299 return -ENODEV;
5300
Steven Rostedt2cadf912008-12-01 22:20:19 -05005301 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005302 if (!info) {
5303 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005304 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005305 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005306
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005307 mutex_lock(&trace_types_lock);
5308
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005309 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005310 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005311 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005312 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005313 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005314 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005315 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005316
5317 filp->private_data = info;
5318
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005319 tr->current_trace->ref++;
5320
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005321 mutex_unlock(&trace_types_lock);
5322
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005323 ret = nonseekable_open(inode, filp);
5324 if (ret < 0)
5325 trace_array_put(tr);
5326
5327 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005328}
5329
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005330static unsigned int
5331tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5332{
5333 struct ftrace_buffer_info *info = filp->private_data;
5334 struct trace_iterator *iter = &info->iter;
5335
5336 return trace_poll(iter, filp, poll_table);
5337}
5338
Steven Rostedt2cadf912008-12-01 22:20:19 -05005339static ssize_t
5340tracing_buffers_read(struct file *filp, char __user *ubuf,
5341 size_t count, loff_t *ppos)
5342{
5343 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005344 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005345 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005346 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005347
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005348 if (!count)
5349 return 0;
5350
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005351#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005352 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5353 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005354#endif
5355
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005356 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005357 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5358 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005359 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005360 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005361
Steven Rostedt2cadf912008-12-01 22:20:19 -05005362 /* Do we have previous read data to read? */
5363 if (info->read < PAGE_SIZE)
5364 goto read;
5365
Steven Rostedtb6273442013-02-28 13:44:11 -05005366 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005367 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005368 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005369 &info->spare,
5370 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005371 iter->cpu_file, 0);
5372 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005373
5374 if (ret < 0) {
5375 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005376 if ((filp->f_flags & O_NONBLOCK))
5377 return -EAGAIN;
5378
Rabin Vincente30f53a2014-11-10 19:46:34 +01005379 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005380 if (ret)
5381 return ret;
5382
Steven Rostedtb6273442013-02-28 13:44:11 -05005383 goto again;
5384 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005385 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005386 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005387
Steven Rostedt436fc282011-10-14 10:44:25 -04005388 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005389 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005390 size = PAGE_SIZE - info->read;
5391 if (size > count)
5392 size = count;
5393
5394 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005395 if (ret == size)
5396 return -EFAULT;
5397
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005398 size -= ret;
5399
Steven Rostedt2cadf912008-12-01 22:20:19 -05005400 *ppos += size;
5401 info->read += size;
5402
5403 return size;
5404}
5405
5406static int tracing_buffers_release(struct inode *inode, struct file *file)
5407{
5408 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005409 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005410
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005411 mutex_lock(&trace_types_lock);
5412
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005413 iter->tr->current_trace->ref--;
5414
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005415 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005416
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005417 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005418 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005419 kfree(info);
5420
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005421 mutex_unlock(&trace_types_lock);
5422
Steven Rostedt2cadf912008-12-01 22:20:19 -05005423 return 0;
5424}
5425
5426struct buffer_ref {
5427 struct ring_buffer *buffer;
5428 void *page;
5429 int ref;
5430};
5431
5432static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5433 struct pipe_buffer *buf)
5434{
5435 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5436
5437 if (--ref->ref)
5438 return;
5439
5440 ring_buffer_free_read_page(ref->buffer, ref->page);
5441 kfree(ref);
5442 buf->private = 0;
5443}
5444
Steven Rostedt2cadf912008-12-01 22:20:19 -05005445static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5446 struct pipe_buffer *buf)
5447{
5448 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5449
5450 ref->ref++;
5451}
5452
5453/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005454static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005455 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005456 .confirm = generic_pipe_buf_confirm,
5457 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005458 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005459 .get = buffer_pipe_buf_get,
5460};
5461
5462/*
5463 * Callback from splice_to_pipe(), if we need to release some pages
5464 * at the end of the spd in case we error'ed out in filling the pipe.
5465 */
5466static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5467{
5468 struct buffer_ref *ref =
5469 (struct buffer_ref *)spd->partial[i].private;
5470
5471 if (--ref->ref)
5472 return;
5473
5474 ring_buffer_free_read_page(ref->buffer, ref->page);
5475 kfree(ref);
5476 spd->partial[i].private = 0;
5477}
5478
5479static ssize_t
5480tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5481 struct pipe_inode_info *pipe, size_t len,
5482 unsigned int flags)
5483{
5484 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005485 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005486 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5487 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005488 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005489 .pages = pages_def,
5490 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005491 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005492 .flags = flags,
5493 .ops = &buffer_pipe_buf_ops,
5494 .spd_release = buffer_spd_release,
5495 };
5496 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005497 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01005498 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005499
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005500#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005501 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5502 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005503#endif
5504
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005505 if (splice_grow_spd(pipe, &spd))
5506 return -ENOMEM;
Jens Axboe35f3d142010-05-20 10:43:18 +02005507
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005508 if (*ppos & (PAGE_SIZE - 1))
5509 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005510
5511 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005512 if (len < PAGE_SIZE)
5513 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005514 len &= PAGE_MASK;
5515 }
5516
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005517 again:
5518 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005519 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005520
Al Viroa786c062014-04-11 12:01:03 -04005521 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005522 struct page *page;
5523 int r;
5524
5525 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01005526 if (!ref) {
5527 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005528 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01005529 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005530
Steven Rostedt7267fa62009-04-29 00:16:21 -04005531 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005532 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005533 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005534 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005535 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005536 kfree(ref);
5537 break;
5538 }
5539
5540 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005541 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005542 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005543 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005544 kfree(ref);
5545 break;
5546 }
5547
5548 /*
5549 * zero out any left over data, this is going to
5550 * user land.
5551 */
5552 size = ring_buffer_page_len(ref->page);
5553 if (size < PAGE_SIZE)
5554 memset(ref->page + size, 0, PAGE_SIZE - size);
5555
5556 page = virt_to_page(ref->page);
5557
5558 spd.pages[i] = page;
5559 spd.partial[i].len = PAGE_SIZE;
5560 spd.partial[i].offset = 0;
5561 spd.partial[i].private = (unsigned long)ref;
5562 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005563 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005564
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005565 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005566 }
5567
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005568 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005569 spd.nr_pages = i;
5570
5571 /* did we read anything? */
5572 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005573 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005574 return ret;
Rabin Vincent07906da2014-11-06 22:26:07 +01005575
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005576 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5577 return -EAGAIN;
5578
Rabin Vincente30f53a2014-11-10 19:46:34 +01005579 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005580 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005581 return ret;
Rabin Vincente30f53a2014-11-10 19:46:34 +01005582
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005583 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005584 }
5585
5586 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005587 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005588
Steven Rostedt2cadf912008-12-01 22:20:19 -05005589 return ret;
5590}
5591
5592static const struct file_operations tracing_buffers_fops = {
5593 .open = tracing_buffers_open,
5594 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005595 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005596 .release = tracing_buffers_release,
5597 .splice_read = tracing_buffers_splice_read,
5598 .llseek = no_llseek,
5599};
5600
Steven Rostedtc8d77182009-04-29 18:03:45 -04005601static ssize_t
5602tracing_stats_read(struct file *filp, char __user *ubuf,
5603 size_t count, loff_t *ppos)
5604{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005605 struct inode *inode = file_inode(filp);
5606 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005607 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005608 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005609 struct trace_seq *s;
5610 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005611 unsigned long long t;
5612 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005613
Li Zefane4f2d102009-06-15 10:57:28 +08005614 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005615 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005616 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005617
5618 trace_seq_init(s);
5619
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005620 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005621 trace_seq_printf(s, "entries: %ld\n", cnt);
5622
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005623 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005624 trace_seq_printf(s, "overrun: %ld\n", cnt);
5625
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005626 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005627 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5628
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005629 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005630 trace_seq_printf(s, "bytes: %ld\n", cnt);
5631
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005632 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005633 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005634 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005635 usec_rem = do_div(t, USEC_PER_SEC);
5636 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5637 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005638
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005639 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005640 usec_rem = do_div(t, USEC_PER_SEC);
5641 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5642 } else {
5643 /* counter or tsc mode for trace_clock */
5644 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005645 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005646
5647 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005648 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005649 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005650
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005651 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005652 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5653
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005654 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005655 trace_seq_printf(s, "read events: %ld\n", cnt);
5656
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005657 count = simple_read_from_buffer(ubuf, count, ppos,
5658 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04005659
5660 kfree(s);
5661
5662 return count;
5663}
5664
5665static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005666 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005667 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005668 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005669 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005670};
5671
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005672#ifdef CONFIG_DYNAMIC_FTRACE
5673
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005674int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005675{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005676 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005677}
5678
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005679static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005680tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005681 size_t cnt, loff_t *ppos)
5682{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005683 static char ftrace_dyn_info_buffer[1024];
5684 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005685 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005686 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005687 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005688 int r;
5689
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005690 mutex_lock(&dyn_info_mutex);
5691 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005692
Steven Rostedta26a2a22008-10-31 00:03:22 -04005693 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005694 buf[r++] = '\n';
5695
5696 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5697
5698 mutex_unlock(&dyn_info_mutex);
5699
5700 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005701}
5702
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005703static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005704 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005705 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005706 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005707};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005708#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005709
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005710#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5711static void
5712ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005713{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005714 tracing_snapshot();
5715}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005716
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005717static void
5718ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5719{
5720 unsigned long *count = (long *)data;
5721
5722 if (!*count)
5723 return;
5724
5725 if (*count != -1)
5726 (*count)--;
5727
5728 tracing_snapshot();
5729}
5730
5731static int
5732ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5733 struct ftrace_probe_ops *ops, void *data)
5734{
5735 long count = (long)data;
5736
5737 seq_printf(m, "%ps:", (void *)ip);
5738
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005739 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005740
5741 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005742 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005743 else
5744 seq_printf(m, ":count=%ld\n", count);
5745
5746 return 0;
5747}
5748
5749static struct ftrace_probe_ops snapshot_probe_ops = {
5750 .func = ftrace_snapshot,
5751 .print = ftrace_snapshot_print,
5752};
5753
5754static struct ftrace_probe_ops snapshot_count_probe_ops = {
5755 .func = ftrace_count_snapshot,
5756 .print = ftrace_snapshot_print,
5757};
5758
5759static int
5760ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5761 char *glob, char *cmd, char *param, int enable)
5762{
5763 struct ftrace_probe_ops *ops;
5764 void *count = (void *)-1;
5765 char *number;
5766 int ret;
5767
5768 /* hash funcs only work with set_ftrace_filter */
5769 if (!enable)
5770 return -EINVAL;
5771
5772 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5773
5774 if (glob[0] == '!') {
5775 unregister_ftrace_function_probe_func(glob+1, ops);
5776 return 0;
5777 }
5778
5779 if (!param)
5780 goto out_reg;
5781
5782 number = strsep(&param, ":");
5783
5784 if (!strlen(number))
5785 goto out_reg;
5786
5787 /*
5788 * We use the callback data field (which is a pointer)
5789 * as our counter.
5790 */
5791 ret = kstrtoul(number, 0, (unsigned long *)&count);
5792 if (ret)
5793 return ret;
5794
5795 out_reg:
5796 ret = register_ftrace_function_probe(glob, ops, count);
5797
5798 if (ret >= 0)
5799 alloc_snapshot(&global_trace);
5800
5801 return ret < 0 ? ret : 0;
5802}
5803
5804static struct ftrace_func_command ftrace_snapshot_cmd = {
5805 .name = "snapshot",
5806 .func = ftrace_trace_snapshot_callback,
5807};
5808
Tom Zanussi38de93a2013-10-24 08:34:18 -05005809static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005810{
5811 return register_ftrace_command(&ftrace_snapshot_cmd);
5812}
5813#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005814static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005815#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005816
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005817struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005818{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005819 if (tr->dir)
5820 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005821
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005822 if (!debugfs_initialized())
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05005823 return ERR_PTR(-ENODEV);
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005824
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005825 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5826 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005827
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005828 if (!tr->dir)
5829 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005830
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005831 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005832}
5833
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005834struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005835{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005836 return tracing_init_dentry_tr(&global_trace);
5837}
5838
5839static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5840{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005841 struct dentry *d_tracer;
5842
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005843 if (tr->percpu_dir)
5844 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005845
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005846 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05005847 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005848 return NULL;
5849
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005850 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005851
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005852 WARN_ONCE(!tr->percpu_dir,
5853 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005854
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005855 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005856}
5857
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005858static struct dentry *
5859trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5860 void *data, long cpu, const struct file_operations *fops)
5861{
5862 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5863
5864 if (ret) /* See tracing_get_cpu() */
5865 ret->d_inode->i_cdev = (void *)(cpu + 1);
5866 return ret;
5867}
5868
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005869static void
5870tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005871{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005872 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005873 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005874 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005875
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005876 if (!d_percpu)
5877 return;
5878
Steven Rostedtdd49a382010-10-20 21:51:26 -04005879 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005880 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5881 if (!d_cpu) {
5882 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5883 return;
5884 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005885
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005886 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005887 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005888 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005889
5890 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005891 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005892 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005893
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005894 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005895 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005896
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005897 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005898 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005899
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005900 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005901 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005902
5903#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005904 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005905 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005906
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005907 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005908 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005909#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005910}
5911
Steven Rostedt60a11772008-05-12 21:20:44 +02005912#ifdef CONFIG_FTRACE_SELFTEST
5913/* Let selftest have access to static functions in this file */
5914#include "trace_selftest.c"
5915#endif
5916
Steven Rostedt577b7852009-02-26 23:43:05 -05005917struct trace_option_dentry {
5918 struct tracer_opt *opt;
5919 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005920 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005921 struct dentry *entry;
5922};
5923
5924static ssize_t
5925trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5926 loff_t *ppos)
5927{
5928 struct trace_option_dentry *topt = filp->private_data;
5929 char *buf;
5930
5931 if (topt->flags->val & topt->opt->bit)
5932 buf = "1\n";
5933 else
5934 buf = "0\n";
5935
5936 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5937}
5938
5939static ssize_t
5940trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5941 loff_t *ppos)
5942{
5943 struct trace_option_dentry *topt = filp->private_data;
5944 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005945 int ret;
5946
Peter Huewe22fe9b52011-06-07 21:58:27 +02005947 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5948 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005949 return ret;
5950
Li Zefan8d18eaa2009-12-08 11:17:06 +08005951 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005952 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005953
5954 if (!!(topt->flags->val & topt->opt->bit) != val) {
5955 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005956 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005957 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005958 mutex_unlock(&trace_types_lock);
5959 if (ret)
5960 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005961 }
5962
5963 *ppos += cnt;
5964
5965 return cnt;
5966}
5967
5968
5969static const struct file_operations trace_options_fops = {
5970 .open = tracing_open_generic,
5971 .read = trace_options_read,
5972 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005973 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005974};
5975
Steven Rostedta8259072009-02-26 22:19:12 -05005976static ssize_t
5977trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5978 loff_t *ppos)
5979{
5980 long index = (long)filp->private_data;
5981 char *buf;
5982
5983 if (trace_flags & (1 << index))
5984 buf = "1\n";
5985 else
5986 buf = "0\n";
5987
5988 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5989}
5990
5991static ssize_t
5992trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5993 loff_t *ppos)
5994{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005995 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005996 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005997 unsigned long val;
5998 int ret;
5999
Peter Huewe22fe9b52011-06-07 21:58:27 +02006000 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6001 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006002 return ret;
6003
Zhaoleif2d84b62009-08-07 18:55:48 +08006004 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006005 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006006
6007 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006008 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006009 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006010
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006011 if (ret < 0)
6012 return ret;
6013
Steven Rostedta8259072009-02-26 22:19:12 -05006014 *ppos += cnt;
6015
6016 return cnt;
6017}
6018
Steven Rostedta8259072009-02-26 22:19:12 -05006019static const struct file_operations trace_options_core_fops = {
6020 .open = tracing_open_generic,
6021 .read = trace_options_core_read,
6022 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006023 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006024};
6025
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006026struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006027 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006028 struct dentry *parent,
6029 void *data,
6030 const struct file_operations *fops)
6031{
6032 struct dentry *ret;
6033
6034 ret = debugfs_create_file(name, mode, parent, data, fops);
6035 if (!ret)
6036 pr_warning("Could not create debugfs '%s' entry\n", name);
6037
6038 return ret;
6039}
6040
6041
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006042static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006043{
6044 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006045
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006046 if (tr->options)
6047 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006048
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006049 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006050 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006051 return NULL;
6052
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006053 tr->options = debugfs_create_dir("options", d_tracer);
6054 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006055 pr_warning("Could not create debugfs directory 'options'\n");
6056 return NULL;
6057 }
6058
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006059 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006060}
6061
Steven Rostedt577b7852009-02-26 23:43:05 -05006062static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006063create_trace_option_file(struct trace_array *tr,
6064 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006065 struct tracer_flags *flags,
6066 struct tracer_opt *opt)
6067{
6068 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006069
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006070 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006071 if (!t_options)
6072 return;
6073
6074 topt->flags = flags;
6075 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006076 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006077
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006078 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006079 &trace_options_fops);
6080
Steven Rostedt577b7852009-02-26 23:43:05 -05006081}
6082
6083static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006084create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006085{
6086 struct trace_option_dentry *topts;
6087 struct tracer_flags *flags;
6088 struct tracer_opt *opts;
6089 int cnt;
6090
6091 if (!tracer)
6092 return NULL;
6093
6094 flags = tracer->flags;
6095
6096 if (!flags || !flags->opts)
6097 return NULL;
6098
6099 opts = flags->opts;
6100
6101 for (cnt = 0; opts[cnt].name; cnt++)
6102 ;
6103
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006104 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006105 if (!topts)
6106 return NULL;
6107
6108 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006109 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006110 &opts[cnt]);
6111
6112 return topts;
6113}
6114
6115static void
6116destroy_trace_option_files(struct trace_option_dentry *topts)
6117{
6118 int cnt;
6119
6120 if (!topts)
6121 return;
6122
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006123 for (cnt = 0; topts[cnt].opt; cnt++)
6124 debugfs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006125
6126 kfree(topts);
6127}
6128
Steven Rostedta8259072009-02-26 22:19:12 -05006129static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006130create_trace_option_core_file(struct trace_array *tr,
6131 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006132{
6133 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006134
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006135 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006136 if (!t_options)
6137 return NULL;
6138
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006139 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006140 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006141}
6142
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006143static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006144{
6145 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006146 int i;
6147
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006148 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006149 if (!t_options)
6150 return;
6151
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006152 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006153 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006154}
6155
Steven Rostedt499e5472012-02-22 15:50:28 -05006156static ssize_t
6157rb_simple_read(struct file *filp, char __user *ubuf,
6158 size_t cnt, loff_t *ppos)
6159{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006160 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006161 char buf[64];
6162 int r;
6163
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006164 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006165 r = sprintf(buf, "%d\n", r);
6166
6167 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6168}
6169
6170static ssize_t
6171rb_simple_write(struct file *filp, const char __user *ubuf,
6172 size_t cnt, loff_t *ppos)
6173{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006174 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006175 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006176 unsigned long val;
6177 int ret;
6178
6179 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6180 if (ret)
6181 return ret;
6182
6183 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006184 mutex_lock(&trace_types_lock);
6185 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006186 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006187 if (tr->current_trace->start)
6188 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006189 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006190 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006191 if (tr->current_trace->stop)
6192 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006193 }
6194 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006195 }
6196
6197 (*ppos)++;
6198
6199 return cnt;
6200}
6201
6202static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006203 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006204 .read = rb_simple_read,
6205 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006206 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006207 .llseek = default_llseek,
6208};
6209
Steven Rostedt277ba042012-08-03 16:10:49 -04006210struct dentry *trace_instance_dir;
6211
6212static void
6213init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6214
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006215static int
6216allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006217{
6218 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006219
6220 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6221
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006222 buf->tr = tr;
6223
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006224 buf->buffer = ring_buffer_alloc(size, rb_flags);
6225 if (!buf->buffer)
6226 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006227
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006228 buf->data = alloc_percpu(struct trace_array_cpu);
6229 if (!buf->data) {
6230 ring_buffer_free(buf->buffer);
6231 return -ENOMEM;
6232 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006233
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006234 /* Allocate the first page for all buffers */
6235 set_buffer_entries(&tr->trace_buffer,
6236 ring_buffer_size(tr->trace_buffer.buffer, 0));
6237
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006238 return 0;
6239}
6240
6241static int allocate_trace_buffers(struct trace_array *tr, int size)
6242{
6243 int ret;
6244
6245 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6246 if (ret)
6247 return ret;
6248
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006249#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006250 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6251 allocate_snapshot ? size : 1);
6252 if (WARN_ON(ret)) {
6253 ring_buffer_free(tr->trace_buffer.buffer);
6254 free_percpu(tr->trace_buffer.data);
6255 return -ENOMEM;
6256 }
6257 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006258
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006259 /*
6260 * Only the top level trace array gets its snapshot allocated
6261 * from the kernel command line.
6262 */
6263 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006264#endif
6265 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006266}
6267
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006268static void free_trace_buffer(struct trace_buffer *buf)
6269{
6270 if (buf->buffer) {
6271 ring_buffer_free(buf->buffer);
6272 buf->buffer = NULL;
6273 free_percpu(buf->data);
6274 buf->data = NULL;
6275 }
6276}
6277
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006278static void free_trace_buffers(struct trace_array *tr)
6279{
6280 if (!tr)
6281 return;
6282
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006283 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006284
6285#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006286 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006287#endif
6288}
6289
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006290static int new_instance_create(const char *name)
6291{
Steven Rostedt277ba042012-08-03 16:10:49 -04006292 struct trace_array *tr;
6293 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006294
6295 mutex_lock(&trace_types_lock);
6296
6297 ret = -EEXIST;
6298 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6299 if (tr->name && strcmp(tr->name, name) == 0)
6300 goto out_unlock;
6301 }
6302
6303 ret = -ENOMEM;
6304 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6305 if (!tr)
6306 goto out_unlock;
6307
6308 tr->name = kstrdup(name, GFP_KERNEL);
6309 if (!tr->name)
6310 goto out_free_tr;
6311
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006312 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6313 goto out_free_tr;
6314
6315 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6316
Steven Rostedt277ba042012-08-03 16:10:49 -04006317 raw_spin_lock_init(&tr->start_lock);
6318
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006319 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6320
Steven Rostedt277ba042012-08-03 16:10:49 -04006321 tr->current_trace = &nop_trace;
6322
6323 INIT_LIST_HEAD(&tr->systems);
6324 INIT_LIST_HEAD(&tr->events);
6325
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006326 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006327 goto out_free_tr;
6328
Steven Rostedt277ba042012-08-03 16:10:49 -04006329 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6330 if (!tr->dir)
6331 goto out_free_tr;
6332
6333 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006334 if (ret) {
6335 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006336 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006337 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006338
6339 init_tracer_debugfs(tr, tr->dir);
6340
6341 list_add(&tr->list, &ftrace_trace_arrays);
6342
6343 mutex_unlock(&trace_types_lock);
6344
6345 return 0;
6346
6347 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006348 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006349 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006350 kfree(tr->name);
6351 kfree(tr);
6352
6353 out_unlock:
6354 mutex_unlock(&trace_types_lock);
6355
6356 return ret;
6357
6358}
6359
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006360static int instance_delete(const char *name)
6361{
6362 struct trace_array *tr;
6363 int found = 0;
6364 int ret;
6365
6366 mutex_lock(&trace_types_lock);
6367
6368 ret = -ENODEV;
6369 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6370 if (tr->name && strcmp(tr->name, name) == 0) {
6371 found = 1;
6372 break;
6373 }
6374 }
6375 if (!found)
6376 goto out_unlock;
6377
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006378 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006379 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006380 goto out_unlock;
6381
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006382 list_del(&tr->list);
6383
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006384 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006385 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006386 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006387 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006388 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006389
6390 kfree(tr->name);
6391 kfree(tr);
6392
6393 ret = 0;
6394
6395 out_unlock:
6396 mutex_unlock(&trace_types_lock);
6397
6398 return ret;
6399}
6400
Steven Rostedt277ba042012-08-03 16:10:49 -04006401static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6402{
6403 struct dentry *parent;
6404 int ret;
6405
6406 /* Paranoid: Make sure the parent is the "instances" directory */
Al Viro946e51f2014-10-26 19:19:16 -04006407 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
Steven Rostedt277ba042012-08-03 16:10:49 -04006408 if (WARN_ON_ONCE(parent != trace_instance_dir))
6409 return -ENOENT;
6410
6411 /*
6412 * The inode mutex is locked, but debugfs_create_dir() will also
6413 * take the mutex. As the instances directory can not be destroyed
6414 * or changed in any other way, it is safe to unlock it, and
6415 * let the dentry try. If two users try to make the same dir at
6416 * the same time, then the new_instance_create() will determine the
6417 * winner.
6418 */
6419 mutex_unlock(&inode->i_mutex);
6420
6421 ret = new_instance_create(dentry->d_iname);
6422
6423 mutex_lock(&inode->i_mutex);
6424
6425 return ret;
6426}
6427
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006428static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6429{
6430 struct dentry *parent;
6431 int ret;
6432
6433 /* Paranoid: Make sure the parent is the "instances" directory */
Al Viro946e51f2014-10-26 19:19:16 -04006434 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006435 if (WARN_ON_ONCE(parent != trace_instance_dir))
6436 return -ENOENT;
6437
6438 /* The caller did a dget() on dentry */
6439 mutex_unlock(&dentry->d_inode->i_mutex);
6440
6441 /*
6442 * The inode mutex is locked, but debugfs_create_dir() will also
6443 * take the mutex. As the instances directory can not be destroyed
6444 * or changed in any other way, it is safe to unlock it, and
6445 * let the dentry try. If two users try to make the same dir at
6446 * the same time, then the instance_delete() will determine the
6447 * winner.
6448 */
6449 mutex_unlock(&inode->i_mutex);
6450
6451 ret = instance_delete(dentry->d_iname);
6452
6453 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6454 mutex_lock(&dentry->d_inode->i_mutex);
6455
6456 return ret;
6457}
6458
Steven Rostedt277ba042012-08-03 16:10:49 -04006459static const struct inode_operations instance_dir_inode_operations = {
6460 .lookup = simple_lookup,
6461 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006462 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006463};
6464
6465static __init void create_trace_instances(struct dentry *d_tracer)
6466{
6467 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6468 if (WARN_ON(!trace_instance_dir))
6469 return;
6470
6471 /* Hijack the dir inode operations, to allow mkdir */
6472 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6473}
6474
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006475static void
6476init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6477{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006478 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006479
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006480 trace_create_file("available_tracers", 0444, d_tracer,
6481 tr, &show_traces_fops);
6482
6483 trace_create_file("current_tracer", 0644, d_tracer,
6484 tr, &set_tracer_fops);
6485
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006486 trace_create_file("tracing_cpumask", 0644, d_tracer,
6487 tr, &tracing_cpumask_fops);
6488
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006489 trace_create_file("trace_options", 0644, d_tracer,
6490 tr, &tracing_iter_fops);
6491
6492 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006493 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006494
6495 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006496 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006497
6498 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006499 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006500
6501 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6502 tr, &tracing_total_entries_fops);
6503
Wang YanQing238ae932013-05-26 16:52:01 +08006504 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006505 tr, &tracing_free_buffer_fops);
6506
6507 trace_create_file("trace_marker", 0220, d_tracer,
6508 tr, &tracing_mark_fops);
6509
6510 trace_create_file("trace_clock", 0644, d_tracer, tr,
6511 &trace_clock_fops);
6512
6513 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006514 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006515
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006516#ifdef CONFIG_TRACER_MAX_TRACE
6517 trace_create_file("tracing_max_latency", 0644, d_tracer,
6518 &tr->max_latency, &tracing_max_lat_fops);
6519#endif
6520
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006521 if (ftrace_create_function_files(tr, d_tracer))
6522 WARN(1, "Could not allocate function filter files");
6523
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006524#ifdef CONFIG_TRACER_SNAPSHOT
6525 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006526 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006527#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006528
6529 for_each_tracing_cpu(cpu)
6530 tracing_init_debugfs_percpu(tr, cpu);
6531
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006532}
6533
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006534static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006535{
6536 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006537
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006538 trace_access_lock_init();
6539
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006540 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006541 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006542 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006543
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006544 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006545
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006546 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006547 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006548
Li Zefan339ae5d2009-04-17 10:34:30 +08006549 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006550 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006551
Avadh Patel69abe6a2009-04-10 16:04:48 -04006552 trace_create_file("saved_cmdlines", 0444, d_tracer,
6553 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006554
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006555 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6556 NULL, &tracing_saved_cmdlines_size_fops);
6557
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006558#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006559 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6560 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006561#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006562
Steven Rostedt277ba042012-08-03 16:10:49 -04006563 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006564
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006565 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006566
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006567 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006568}
6569
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006570static int trace_panic_handler(struct notifier_block *this,
6571 unsigned long event, void *unused)
6572{
Steven Rostedt944ac422008-10-23 19:26:08 -04006573 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006574 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006575 return NOTIFY_OK;
6576}
6577
6578static struct notifier_block trace_panic_notifier = {
6579 .notifier_call = trace_panic_handler,
6580 .next = NULL,
6581 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6582};
6583
6584static int trace_die_handler(struct notifier_block *self,
6585 unsigned long val,
6586 void *data)
6587{
6588 switch (val) {
6589 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006590 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006591 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006592 break;
6593 default:
6594 break;
6595 }
6596 return NOTIFY_OK;
6597}
6598
6599static struct notifier_block trace_die_notifier = {
6600 .notifier_call = trace_die_handler,
6601 .priority = 200
6602};
6603
6604/*
6605 * printk is set to max of 1024, we really don't need it that big.
6606 * Nothing should be printing 1000 characters anyway.
6607 */
6608#define TRACE_MAX_PRINT 1000
6609
6610/*
6611 * Define here KERN_TRACE so that we have one place to modify
6612 * it if we decide to change what log level the ftrace dump
6613 * should be at.
6614 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006615#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006616
Jason Wessel955b61e2010-08-05 09:22:23 -05006617void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006618trace_printk_seq(struct trace_seq *s)
6619{
6620 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006621 if (s->seq.len >= TRACE_MAX_PRINT)
6622 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006623
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05006624 /*
6625 * More paranoid code. Although the buffer size is set to
6626 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6627 * an extra layer of protection.
6628 */
6629 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6630 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006631
6632 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006633 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006634
6635 printk(KERN_TRACE "%s", s->buffer);
6636
Steven Rostedtf9520752009-03-02 14:04:40 -05006637 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006638}
6639
Jason Wessel955b61e2010-08-05 09:22:23 -05006640void trace_init_global_iter(struct trace_iterator *iter)
6641{
6642 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006643 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006644 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006645 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006646
6647 if (iter->trace && iter->trace->open)
6648 iter->trace->open(iter);
6649
6650 /* Annotate start of buffers if we had overruns */
6651 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6652 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6653
6654 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6655 if (trace_clocks[iter->tr->clock_id].in_ns)
6656 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006657}
6658
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006659void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006660{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006661 /* use static because iter can be a bit big for the stack */
6662 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006663 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006664 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006665 unsigned long flags;
6666 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006667
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006668 /* Only allow one dump user at a time. */
6669 if (atomic_inc_return(&dump_running) != 1) {
6670 atomic_dec(&dump_running);
6671 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006672 }
6673
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006674 /*
6675 * Always turn off tracing when we dump.
6676 * We don't need to show trace output of what happens
6677 * between multiple crashes.
6678 *
6679 * If the user does a sysrq-z, then they can re-enable
6680 * tracing with echo 1 > tracing_on.
6681 */
6682 tracing_off();
6683
6684 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006685
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006686 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006687 trace_init_global_iter(&iter);
6688
Steven Rostedtd7690412008-10-01 00:29:53 -04006689 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006690 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006691 }
6692
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006693 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6694
Török Edwinb54d3de2008-11-22 13:28:48 +02006695 /* don't look at user memory in panic mode */
6696 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6697
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006698 switch (oops_dump_mode) {
6699 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006700 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006701 break;
6702 case DUMP_ORIG:
6703 iter.cpu_file = raw_smp_processor_id();
6704 break;
6705 case DUMP_NONE:
6706 goto out_enable;
6707 default:
6708 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006709 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006710 }
6711
6712 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006713
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006714 /* Did function tracer already get disabled? */
6715 if (ftrace_is_dead()) {
6716 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6717 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6718 }
6719
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006720 /*
6721 * We need to stop all tracing on all CPUS to read the
6722 * the next buffer. This is a bit expensive, but is
6723 * not done often. We fill all what we can read,
6724 * and then release the locks again.
6725 */
6726
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006727 while (!trace_empty(&iter)) {
6728
6729 if (!cnt)
6730 printk(KERN_TRACE "---------------------------------\n");
6731
6732 cnt++;
6733
6734 /* reset all but tr, trace, and overruns */
6735 memset(&iter.seq, 0,
6736 sizeof(struct trace_iterator) -
6737 offsetof(struct trace_iterator, seq));
6738 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6739 iter.pos = -1;
6740
Jason Wessel955b61e2010-08-05 09:22:23 -05006741 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006742 int ret;
6743
6744 ret = print_trace_line(&iter);
6745 if (ret != TRACE_TYPE_NO_CONSUME)
6746 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006747 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006748 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006749
6750 trace_printk_seq(&iter.seq);
6751 }
6752
6753 if (!cnt)
6754 printk(KERN_TRACE " (ftrace buffer empty)\n");
6755 else
6756 printk(KERN_TRACE "---------------------------------\n");
6757
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006758 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006759 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006760
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006761 for_each_tracing_cpu(cpu) {
6762 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006763 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006764 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006765 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006766}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006767EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006768
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006769__init static int tracer_alloc_buffers(void)
6770{
Steven Rostedt73c51622009-03-11 13:42:01 -04006771 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306772 int ret = -ENOMEM;
6773
David Sharp750912f2010-12-08 13:46:47 -08006774
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306775 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6776 goto out;
6777
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006778 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306779 goto out_free_buffer_mask;
6780
Steven Rostedt07d777f2011-09-22 14:01:55 -04006781 /* Only allocate trace_printk buffers if a trace_printk exists */
6782 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006783 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006784 trace_printk_init_buffers();
6785
Steven Rostedt73c51622009-03-11 13:42:01 -04006786 /* To save memory, keep the ring buffer size to its minimum */
6787 if (ring_buffer_expanded)
6788 ring_buf_size = trace_buf_size;
6789 else
6790 ring_buf_size = 1;
6791
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306792 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006793 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006794
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006795 raw_spin_lock_init(&global_trace.start_lock);
6796
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006797 /* Used for event triggers */
6798 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6799 if (!temp_buffer)
6800 goto out_free_cpumask;
6801
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006802 if (trace_create_savedcmd() < 0)
6803 goto out_free_temp_buffer;
6804
Steven Rostedtab464282008-05-12 21:21:00 +02006805 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006806 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006807 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6808 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006809 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006810 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006811
Steven Rostedt499e5472012-02-22 15:50:28 -05006812 if (global_trace.buffer_disabled)
6813 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006814
Steven Rostedte1e232c2014-02-10 23:38:46 -05006815 if (trace_boot_clock) {
6816 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6817 if (ret < 0)
6818 pr_warning("Trace clock %s not defined, going back to default\n",
6819 trace_boot_clock);
6820 }
6821
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006822 /*
6823 * register_tracer() might reference current_trace, so it
6824 * needs to be set before we register anything. This is
6825 * just a bootstrap of current_trace anyway.
6826 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006827 global_trace.current_trace = &nop_trace;
6828
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006829 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6830
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006831 ftrace_init_global_array_ops(&global_trace);
6832
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006833 register_tracer(&nop_trace);
6834
Steven Rostedt60a11772008-05-12 21:20:44 +02006835 /* All seems OK, enable tracing */
6836 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006837
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006838 atomic_notifier_chain_register(&panic_notifier_list,
6839 &trace_panic_notifier);
6840
6841 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006842
Steven Rostedtae63b312012-05-03 23:09:03 -04006843 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6844
6845 INIT_LIST_HEAD(&global_trace.systems);
6846 INIT_LIST_HEAD(&global_trace.events);
6847 list_add(&global_trace.list, &ftrace_trace_arrays);
6848
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006849 while (trace_boot_options) {
6850 char *option;
6851
6852 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006853 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006854 }
6855
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006856 register_snapshot_cmd();
6857
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006858 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006859
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006860out_free_savedcmd:
6861 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006862out_free_temp_buffer:
6863 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306864out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006865 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306866out_free_buffer_mask:
6867 free_cpumask_var(tracing_buffer_mask);
6868out:
6869 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006870}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006871
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006872void __init trace_init(void)
6873{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05006874 if (tracepoint_printk) {
6875 tracepoint_print_iter =
6876 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
6877 if (WARN_ON(!tracepoint_print_iter))
6878 tracepoint_printk = 0;
6879 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006880 tracer_alloc_buffers();
6881 init_ftrace_syscalls();
6882 trace_event_init();
6883}
6884
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006885__init static int clear_boot_tracer(void)
6886{
6887 /*
6888 * The default tracer at boot buffer is an init section.
6889 * This function is called in lateinit. If we did not
6890 * find the boot tracer, then clear it out, to prevent
6891 * later registration from accessing the buffer that is
6892 * about to be freed.
6893 */
6894 if (!default_bootup_tracer)
6895 return 0;
6896
6897 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6898 default_bootup_tracer);
6899 default_bootup_tracer = NULL;
6900
6901 return 0;
6902}
6903
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006904fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006905late_initcall(clear_boot_tracer);