blob: b4aa936509d20d725b375dd185ded3ac088f7dd9 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050028#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050046#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020047
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010048/*
Steven Rostedt73c51622009-03-11 13:42:01 -040049 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050052bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040053
54/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010056 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010058 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * at the same time, giving false positive or negative results.
60 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010061static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062
Steven Rostedtb2821ae2009-02-02 21:38:32 -050063/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
Li Zefan020e5f82009-07-01 10:47:05 +080066bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050068/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010072/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050082static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010084{
85 return 0;
86}
Steven Rostedt0f048702008-11-05 16:05:44 -050087
88/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040089 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
95/*
Steven Rostedt0f048702008-11-05 16:05:44 -050096 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100101static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500102
Christoph Lameter9288f992009-10-07 19:17:45 -0400103DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -0400104
Jason Wessel955b61e2010-08-05 09:22:23 -0500105cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200106
Steven Rostedt944ac422008-10-23 19:26:08 -0400107/*
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109 *
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
114 * serial console.
115 *
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400121 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200122
123enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400124
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400125/* When set, tracing will stop when a WARN*() is hit */
126int __disable_trace_on_warning;
127
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500128static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500129
Li Zefanee6c2c12009-09-18 14:06:47 +0800130#define MAX_TRACER_SIZE 100
131static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500132static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100133
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500134static bool allocate_snapshot;
135
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200136static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100137{
Chen Gang67012ab2013-04-08 12:06:44 +0800138 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500139 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400140 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500141 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100142 return 1;
143}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200144__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100145
Steven Rostedt944ac422008-10-23 19:26:08 -0400146static int __init set_ftrace_dump_on_oops(char *str)
147{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200148 if (*str++ != '=' || !*str) {
149 ftrace_dump_on_oops = DUMP_ALL;
150 return 1;
151 }
152
153 if (!strcmp("orig_cpu", str)) {
154 ftrace_dump_on_oops = DUMP_ORIG;
155 return 1;
156 }
157
158 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400159}
160__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200161
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400162static int __init stop_trace_on_warning(char *str)
163{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200164 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
165 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400166 return 1;
167}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200168__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400169
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171{
172 allocate_snapshot = true;
173 /* We also need the main ring buffer expanded */
174 ring_buffer_expanded = true;
175 return 1;
176}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400177__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500178
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179
180static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
181static char *trace_boot_options __initdata;
182
183static int __init set_trace_boot_options(char *str)
184{
Chen Gang67012ab2013-04-08 12:06:44 +0800185 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400186 trace_boot_options = trace_boot_options_buf;
187 return 0;
188}
189__setup("trace_options=", set_trace_boot_options);
190
Steven Rostedte1e232c2014-02-10 23:38:46 -0500191static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
192static char *trace_boot_clock __initdata;
193
194static int __init set_trace_boot_clock(char *str)
195{
196 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
197 trace_boot_clock = trace_boot_clock_buf;
198 return 0;
199}
200__setup("trace_clock=", set_trace_boot_clock);
201
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500202static int __init set_tracepoint_printk(char *str)
203{
204 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
205 tracepoint_printk = 1;
206 return 1;
207}
208__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400209
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800210unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200211{
212 nsec += 500;
213 do_div(nsec, 1000);
214 return nsec;
215}
216
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200217/*
218 * The global_trace is the descriptor that holds the tracing
219 * buffers for the live tracing. For each CPU, it contains
220 * a link list of pages that will store trace entries. The
221 * page descriptor of the pages in the memory is used to hold
222 * the link list by linking the lru item in the page descriptor
223 * to each of the pages in the buffer per CPU.
224 *
225 * For each active CPU there is a data field that holds the
226 * pages for the buffer for that CPU. Each CPU has the same number
227 * of pages allocated for its buffer.
228 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200229static struct trace_array global_trace;
230
Steven Rostedtae63b312012-05-03 23:09:03 -0400231LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200232
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400233int trace_array_get(struct trace_array *this_tr)
234{
235 struct trace_array *tr;
236 int ret = -ENODEV;
237
238 mutex_lock(&trace_types_lock);
239 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
240 if (tr == this_tr) {
241 tr->ref++;
242 ret = 0;
243 break;
244 }
245 }
246 mutex_unlock(&trace_types_lock);
247
248 return ret;
249}
250
251static void __trace_array_put(struct trace_array *this_tr)
252{
253 WARN_ON(!this_tr->ref);
254 this_tr->ref--;
255}
256
257void trace_array_put(struct trace_array *this_tr)
258{
259 mutex_lock(&trace_types_lock);
260 __trace_array_put(this_tr);
261 mutex_unlock(&trace_types_lock);
262}
263
Tom Zanussif306cc82013-10-24 08:34:17 -0500264int filter_check_discard(struct ftrace_event_file *file, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500267{
Tom Zanussif306cc82013-10-24 08:34:17 -0500268 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(file->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500275}
Tom Zanussif306cc82013-10-24 08:34:17 -0500276EXPORT_SYMBOL_GPL(filter_check_discard);
277
278int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
279 struct ring_buffer *buffer,
280 struct ring_buffer_event *event)
281{
282 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
283 !filter_match_preds(call->filter, rec)) {
284 ring_buffer_discard_commit(buffer, event);
285 return 1;
286 }
287
288 return 0;
289}
290EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500291
Fabian Frederickad1438a2014-04-17 21:44:42 +0200292static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400293{
294 u64 ts;
295
296 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700297 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400298 return trace_clock_local();
299
Alexander Z Lam94571582013-08-02 18:36:16 -0700300 ts = ring_buffer_time_stamp(buf->buffer, cpu);
301 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400302
303 return ts;
304}
305
Alexander Z Lam94571582013-08-02 18:36:16 -0700306cycle_t ftrace_now(int cpu)
307{
308 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
309}
310
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400311/**
312 * tracing_is_enabled - Show if global_trace has been disabled
313 *
314 * Shows if the global trace has been enabled or not. It uses the
315 * mirror flag "buffer_disabled" to be used in fast paths such as for
316 * the irqsoff tracer. But it may be inaccurate due to races. If you
317 * need to know the accurate state, use tracing_is_on() which is a little
318 * slower, but accurate.
319 */
Steven Rostedt90369902008-11-05 16:05:44 -0500320int tracing_is_enabled(void)
321{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400322 /*
323 * For quick access (irqsoff uses this in fast path), just
324 * return the mirror variable of the state of the ring buffer.
325 * It's a little racy, but we don't really care.
326 */
327 smp_rmb();
328 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500329}
330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400332 * trace_buf_size is the size in bytes that is allocated
333 * for a buffer. Note, the number of bytes is always rounded
334 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400335 *
336 * This number is purposely set to a low number of 16384.
337 * If the dump on oops happens, it will be much appreciated
338 * to not have to wait for all that output. Anyway this can be
339 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200340 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400341#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400342
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400343static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200344
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200345/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200346static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200347
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200348/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200349 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200350 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700351DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200352
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800353/*
354 * serialize the access of the ring buffer
355 *
356 * ring buffer serializes readers, but it is low level protection.
357 * The validity of the events (which returns by ring_buffer_peek() ..etc)
358 * are not protected by ring buffer.
359 *
360 * The content of events may become garbage if we allow other process consumes
361 * these events concurrently:
362 * A) the page of the consumed events may become a normal page
363 * (not reader page) in ring buffer, and this page will be rewrited
364 * by events producer.
365 * B) The page of the consumed events may become a page for splice_read,
366 * and this page will be returned to system.
367 *
368 * These primitives allow multi process access to different cpu ring buffer
369 * concurrently.
370 *
371 * These primitives don't distinguish read-only and read-consume access.
372 * Multi read-only access are also serialized.
373 */
374
375#ifdef CONFIG_SMP
376static DECLARE_RWSEM(all_cpu_access_lock);
377static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
378
379static inline void trace_access_lock(int cpu)
380{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500381 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800382 /* gain it for accessing the whole ring buffer. */
383 down_write(&all_cpu_access_lock);
384 } else {
385 /* gain it for accessing a cpu ring buffer. */
386
Steven Rostedtae3b5092013-01-23 15:22:59 -0500387 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800388 down_read(&all_cpu_access_lock);
389
390 /* Secondly block other access to this @cpu ring buffer. */
391 mutex_lock(&per_cpu(cpu_access_lock, cpu));
392 }
393}
394
395static inline void trace_access_unlock(int cpu)
396{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500397 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800398 up_write(&all_cpu_access_lock);
399 } else {
400 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
401 up_read(&all_cpu_access_lock);
402 }
403}
404
405static inline void trace_access_lock_init(void)
406{
407 int cpu;
408
409 for_each_possible_cpu(cpu)
410 mutex_init(&per_cpu(cpu_access_lock, cpu));
411}
412
413#else
414
415static DEFINE_MUTEX(access_lock);
416
417static inline void trace_access_lock(int cpu)
418{
419 (void)cpu;
420 mutex_lock(&access_lock);
421}
422
423static inline void trace_access_unlock(int cpu)
424{
425 (void)cpu;
426 mutex_unlock(&access_lock);
427}
428
429static inline void trace_access_lock_init(void)
430{
431}
432
433#endif
434
Steven Rostedtee6bce52008-11-12 17:52:37 -0500435/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500436unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400437 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500438 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400439 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700440
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400441static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400442{
443 if (tr->trace_buffer.buffer)
444 ring_buffer_record_on(tr->trace_buffer.buffer);
445 /*
446 * This flag is looked at when buffers haven't been allocated
447 * yet, or by some tracers (like irqsoff), that just want to
448 * know if the ring buffer has been disabled, but it can handle
449 * races of where it gets disabled but we still do a record.
450 * As the check is in the fast path of the tracers, it is more
451 * important to be fast than accurate.
452 */
453 tr->buffer_disabled = 0;
454 /* Make the flag seen by readers */
455 smp_wmb();
456}
457
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200458/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500459 * tracing_on - enable tracing buffers
460 *
461 * This function enables tracing buffers that may have been
462 * disabled with tracing_off.
463 */
464void tracing_on(void)
465{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400466 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500467}
468EXPORT_SYMBOL_GPL(tracing_on);
469
470/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500471 * __trace_puts - write a constant string into the trace buffer.
472 * @ip: The address of the caller
473 * @str: The constant string to write
474 * @size: The size of the string.
475 */
476int __trace_puts(unsigned long ip, const char *str, int size)
477{
478 struct ring_buffer_event *event;
479 struct ring_buffer *buffer;
480 struct print_entry *entry;
481 unsigned long irq_flags;
482 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800483 int pc;
484
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800485 if (!(trace_flags & TRACE_ITER_PRINTK))
486 return 0;
487
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800488 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500489
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500490 if (unlikely(tracing_selftest_running || tracing_disabled))
491 return 0;
492
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500493 alloc = sizeof(*entry) + size + 2; /* possible \n added */
494
495 local_save_flags(irq_flags);
496 buffer = global_trace.trace_buffer.buffer;
497 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800498 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500499 if (!event)
500 return 0;
501
502 entry = ring_buffer_event_data(event);
503 entry->ip = ip;
504
505 memcpy(&entry->buf, str, size);
506
507 /* Add a newline if necessary */
508 if (entry->buf[size - 1] != '\n') {
509 entry->buf[size] = '\n';
510 entry->buf[size + 1] = '\0';
511 } else
512 entry->buf[size] = '\0';
513
514 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800515 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500516
517 return size;
518}
519EXPORT_SYMBOL_GPL(__trace_puts);
520
521/**
522 * __trace_bputs - write the pointer to a constant string into trace buffer
523 * @ip: The address of the caller
524 * @str: The constant string to write to the buffer to
525 */
526int __trace_bputs(unsigned long ip, const char *str)
527{
528 struct ring_buffer_event *event;
529 struct ring_buffer *buffer;
530 struct bputs_entry *entry;
531 unsigned long irq_flags;
532 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800533 int pc;
534
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800535 if (!(trace_flags & TRACE_ITER_PRINTK))
536 return 0;
537
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800538 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500539
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500540 if (unlikely(tracing_selftest_running || tracing_disabled))
541 return 0;
542
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500543 local_save_flags(irq_flags);
544 buffer = global_trace.trace_buffer.buffer;
545 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800546 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500547 if (!event)
548 return 0;
549
550 entry = ring_buffer_event_data(event);
551 entry->ip = ip;
552 entry->str = str;
553
554 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800555 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500556
557 return 1;
558}
559EXPORT_SYMBOL_GPL(__trace_bputs);
560
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500561#ifdef CONFIG_TRACER_SNAPSHOT
562/**
563 * trace_snapshot - take a snapshot of the current buffer.
564 *
565 * This causes a swap between the snapshot buffer and the current live
566 * tracing buffer. You can use this to take snapshots of the live
567 * trace when some condition is triggered, but continue to trace.
568 *
569 * Note, make sure to allocate the snapshot with either
570 * a tracing_snapshot_alloc(), or by doing it manually
571 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
572 *
573 * If the snapshot buffer is not allocated, it will stop tracing.
574 * Basically making a permanent snapshot.
575 */
576void tracing_snapshot(void)
577{
578 struct trace_array *tr = &global_trace;
579 struct tracer *tracer = tr->current_trace;
580 unsigned long flags;
581
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500582 if (in_nmi()) {
583 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
584 internal_trace_puts("*** snapshot is being ignored ***\n");
585 return;
586 }
587
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500588 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500589 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
590 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500591 tracing_off();
592 return;
593 }
594
595 /* Note, snapshot can not be used when the tracer uses it */
596 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500597 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
598 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500599 return;
600 }
601
602 local_irq_save(flags);
603 update_max_tr(tr, current, smp_processor_id());
604 local_irq_restore(flags);
605}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500606EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500607
608static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
609 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400610static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
611
612static int alloc_snapshot(struct trace_array *tr)
613{
614 int ret;
615
616 if (!tr->allocated_snapshot) {
617
618 /* allocate spare buffer */
619 ret = resize_buffer_duplicate_size(&tr->max_buffer,
620 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
621 if (ret < 0)
622 return ret;
623
624 tr->allocated_snapshot = true;
625 }
626
627 return 0;
628}
629
Fabian Frederickad1438a2014-04-17 21:44:42 +0200630static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400631{
632 /*
633 * We don't free the ring buffer. instead, resize it because
634 * The max_tr ring buffer has some state (e.g. ring->clock) and
635 * we want preserve it.
636 */
637 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
638 set_buffer_entries(&tr->max_buffer, 1);
639 tracing_reset_online_cpus(&tr->max_buffer);
640 tr->allocated_snapshot = false;
641}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500642
643/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500644 * tracing_alloc_snapshot - allocate snapshot buffer.
645 *
646 * This only allocates the snapshot buffer if it isn't already
647 * allocated - it doesn't also take a snapshot.
648 *
649 * This is meant to be used in cases where the snapshot buffer needs
650 * to be set up for events that can't sleep but need to be able to
651 * trigger a snapshot.
652 */
653int tracing_alloc_snapshot(void)
654{
655 struct trace_array *tr = &global_trace;
656 int ret;
657
658 ret = alloc_snapshot(tr);
659 WARN_ON(ret < 0);
660
661 return ret;
662}
663EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
664
665/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500666 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
667 *
668 * This is similar to trace_snapshot(), but it will allocate the
669 * snapshot buffer if it isn't already allocated. Use this only
670 * where it is safe to sleep, as the allocation may sleep.
671 *
672 * This causes a swap between the snapshot buffer and the current live
673 * tracing buffer. You can use this to take snapshots of the live
674 * trace when some condition is triggered, but continue to trace.
675 */
676void tracing_snapshot_alloc(void)
677{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500678 int ret;
679
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500680 ret = tracing_alloc_snapshot();
681 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400682 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500683
684 tracing_snapshot();
685}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500686EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500687#else
688void tracing_snapshot(void)
689{
690 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
691}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500692EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500693int tracing_alloc_snapshot(void)
694{
695 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
696 return -ENODEV;
697}
698EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500699void tracing_snapshot_alloc(void)
700{
701 /* Give warning */
702 tracing_snapshot();
703}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500704EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500705#endif /* CONFIG_TRACER_SNAPSHOT */
706
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400707static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400708{
709 if (tr->trace_buffer.buffer)
710 ring_buffer_record_off(tr->trace_buffer.buffer);
711 /*
712 * This flag is looked at when buffers haven't been allocated
713 * yet, or by some tracers (like irqsoff), that just want to
714 * know if the ring buffer has been disabled, but it can handle
715 * races of where it gets disabled but we still do a record.
716 * As the check is in the fast path of the tracers, it is more
717 * important to be fast than accurate.
718 */
719 tr->buffer_disabled = 1;
720 /* Make the flag seen by readers */
721 smp_wmb();
722}
723
Steven Rostedt499e5472012-02-22 15:50:28 -0500724/**
725 * tracing_off - turn off tracing buffers
726 *
727 * This function stops the tracing buffers from recording data.
728 * It does not disable any overhead the tracers themselves may
729 * be causing. This function simply causes all recording to
730 * the ring buffers to fail.
731 */
732void tracing_off(void)
733{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400734 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500735}
736EXPORT_SYMBOL_GPL(tracing_off);
737
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400738void disable_trace_on_warning(void)
739{
740 if (__disable_trace_on_warning)
741 tracing_off();
742}
743
Steven Rostedt499e5472012-02-22 15:50:28 -0500744/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400745 * tracer_tracing_is_on - show real state of ring buffer enabled
746 * @tr : the trace array to know if ring buffer is enabled
747 *
748 * Shows real state of the ring buffer if it is enabled or not.
749 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400750static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400751{
752 if (tr->trace_buffer.buffer)
753 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
754 return !tr->buffer_disabled;
755}
756
Steven Rostedt499e5472012-02-22 15:50:28 -0500757/**
758 * tracing_is_on - show state of ring buffers enabled
759 */
760int tracing_is_on(void)
761{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400762 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500763}
764EXPORT_SYMBOL_GPL(tracing_is_on);
765
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400766static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200767{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400768 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200769
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200770 if (!str)
771 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800772 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200773 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800774 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200775 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400776 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200777 return 1;
778}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400779__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200780
Tim Bird0e950172010-02-25 15:36:43 -0800781static int __init set_tracing_thresh(char *str)
782{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800783 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800784 int ret;
785
786 if (!str)
787 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200788 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800789 if (ret < 0)
790 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800791 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800792 return 1;
793}
794__setup("tracing_thresh=", set_tracing_thresh);
795
Steven Rostedt57f50be2008-05-12 21:20:44 +0200796unsigned long nsecs_to_usecs(unsigned long nsecs)
797{
798 return nsecs / 1000;
799}
800
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200801/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200802static const char *trace_options[] = {
803 "print-parent",
804 "sym-offset",
805 "sym-addr",
806 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200807 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200808 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200809 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200810 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200811 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100812 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500813 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500814 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500815 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200816 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200817 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100818 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200819 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500820 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400821 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400822 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800823 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800824 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400825 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500826 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700827 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400828 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200829 NULL
830};
831
Zhaolei5079f322009-08-25 16:12:56 +0800832static struct {
833 u64 (*func)(void);
834 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800835 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800836} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000837 { trace_clock_local, "local", 1 },
838 { trace_clock_global, "global", 1 },
839 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700840 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000841 { trace_clock, "perf", 1 },
842 { ktime_get_mono_fast_ns, "mono", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800843 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800844};
845
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200846/*
847 * trace_parser_get_init - gets the buffer for trace parser
848 */
849int trace_parser_get_init(struct trace_parser *parser, int size)
850{
851 memset(parser, 0, sizeof(*parser));
852
853 parser->buffer = kmalloc(size, GFP_KERNEL);
854 if (!parser->buffer)
855 return 1;
856
857 parser->size = size;
858 return 0;
859}
860
861/*
862 * trace_parser_put - frees the buffer for trace parser
863 */
864void trace_parser_put(struct trace_parser *parser)
865{
866 kfree(parser->buffer);
867}
868
869/*
870 * trace_get_user - reads the user input string separated by space
871 * (matched by isspace(ch))
872 *
873 * For each string found the 'struct trace_parser' is updated,
874 * and the function returns.
875 *
876 * Returns number of bytes read.
877 *
878 * See kernel/trace/trace.h for 'struct trace_parser' details.
879 */
880int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
881 size_t cnt, loff_t *ppos)
882{
883 char ch;
884 size_t read = 0;
885 ssize_t ret;
886
887 if (!*ppos)
888 trace_parser_clear(parser);
889
890 ret = get_user(ch, ubuf++);
891 if (ret)
892 goto out;
893
894 read++;
895 cnt--;
896
897 /*
898 * The parser is not finished with the last write,
899 * continue reading the user input without skipping spaces.
900 */
901 if (!parser->cont) {
902 /* skip white space */
903 while (cnt && isspace(ch)) {
904 ret = get_user(ch, ubuf++);
905 if (ret)
906 goto out;
907 read++;
908 cnt--;
909 }
910
911 /* only spaces were written */
912 if (isspace(ch)) {
913 *ppos += read;
914 ret = read;
915 goto out;
916 }
917
918 parser->idx = 0;
919 }
920
921 /* read the non-space input */
922 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800923 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200924 parser->buffer[parser->idx++] = ch;
925 else {
926 ret = -EINVAL;
927 goto out;
928 }
929 ret = get_user(ch, ubuf++);
930 if (ret)
931 goto out;
932 read++;
933 cnt--;
934 }
935
936 /* We either got finished input or we have to wait for another call. */
937 if (isspace(ch)) {
938 parser->buffer[parser->idx] = 0;
939 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400940 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200941 parser->cont = true;
942 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400943 } else {
944 ret = -EINVAL;
945 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200946 }
947
948 *ppos += read;
949 ret = read;
950
951out:
952 return ret;
953}
954
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400955/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200956static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200957{
958 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200959
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500960 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200961 return -EBUSY;
962
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500963 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200964 if (cnt > len)
965 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400966 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200967
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400968 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200969 return cnt;
970}
971
Tim Bird0e950172010-02-25 15:36:43 -0800972unsigned long __read_mostly tracing_thresh;
973
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400974#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400975/*
976 * Copy the new maximum trace into the separate maximum-trace
977 * structure. (this way the maximum trace is permanently saved,
978 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
979 */
980static void
981__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
982{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500983 struct trace_buffer *trace_buf = &tr->trace_buffer;
984 struct trace_buffer *max_buf = &tr->max_buffer;
985 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
986 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400987
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500988 max_buf->cpu = cpu;
989 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400990
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500991 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400992 max_data->critical_start = data->critical_start;
993 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400994
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300995 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400996 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400997 /*
998 * If tsk == current, then use current_uid(), as that does not use
999 * RCU. The irq tracer can be called out of RCU scope.
1000 */
1001 if (tsk == current)
1002 max_data->uid = current_uid();
1003 else
1004 max_data->uid = task_uid(tsk);
1005
Steven Rostedt8248ac02009-09-02 12:27:41 -04001006 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1007 max_data->policy = tsk->policy;
1008 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001009
1010 /* record this tasks comm */
1011 tracing_record_cmdline(tsk);
1012}
1013
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001014/**
1015 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1016 * @tr: tracer
1017 * @tsk: the task with the latency
1018 * @cpu: The cpu that initiated the trace.
1019 *
1020 * Flip the buffers between the @tr and the max_tr and record information
1021 * about which task was the cause of this latency.
1022 */
Ingo Molnare309b412008-05-12 21:20:51 +02001023void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001024update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1025{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001026 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001027
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001028 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001029 return;
1030
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001031 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001032
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001033 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001034 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001035 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001036 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001037 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001038
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001039 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001040
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001041 buf = tr->trace_buffer.buffer;
1042 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1043 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001045 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001046 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001047}
1048
1049/**
1050 * update_max_tr_single - only copy one trace over, and reset the rest
1051 * @tr - tracer
1052 * @tsk - task with the latency
1053 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001054 *
1055 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001056 */
Ingo Molnare309b412008-05-12 21:20:51 +02001057void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001058update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1059{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001060 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001061
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001062 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001063 return;
1064
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001065 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001066 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001067 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001068 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001069 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001070 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001071
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001072 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001073
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001074 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001075
Steven Rostedte8165db2009-09-03 19:13:05 -04001076 if (ret == -EBUSY) {
1077 /*
1078 * We failed to swap the buffer due to a commit taking
1079 * place on this CPU. We fail to record, but we reset
1080 * the max trace buffer (no one writes directly to it)
1081 * and flag that it failed.
1082 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001083 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001084 "Failed to swap buffers due to commit in progress\n");
1085 }
1086
Steven Rostedte8165db2009-09-03 19:13:05 -04001087 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001088
1089 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001090 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001091}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001092#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001093
Rabin Vincente30f53a2014-11-10 19:46:34 +01001094static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001095{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001096 /* Iterators are static, they should be filled or empty */
1097 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001098 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001099
Rabin Vincente30f53a2014-11-10 19:46:34 +01001100 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1101 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001102}
1103
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001104#ifdef CONFIG_FTRACE_STARTUP_TEST
1105static int run_tracer_selftest(struct tracer *type)
1106{
1107 struct trace_array *tr = &global_trace;
1108 struct tracer *saved_tracer = tr->current_trace;
1109 int ret;
1110
1111 if (!type->selftest || tracing_selftest_disabled)
1112 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001113
1114 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001115 * Run a selftest on this tracer.
1116 * Here we reset the trace buffer, and set the current
1117 * tracer to be this tracer. The tracer can then run some
1118 * internal tracing to verify that everything is in order.
1119 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001120 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001121 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001122
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001123 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001124
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001125#ifdef CONFIG_TRACER_MAX_TRACE
1126 if (type->use_max_tr) {
1127 /* If we expanded the buffers, make sure the max is expanded too */
1128 if (ring_buffer_expanded)
1129 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1130 RING_BUFFER_ALL_CPUS);
1131 tr->allocated_snapshot = true;
1132 }
1133#endif
1134
1135 /* the test is responsible for initializing and enabling */
1136 pr_info("Testing tracer %s: ", type->name);
1137 ret = type->selftest(type, tr);
1138 /* the test is responsible for resetting too */
1139 tr->current_trace = saved_tracer;
1140 if (ret) {
1141 printk(KERN_CONT "FAILED!\n");
1142 /* Add the warning after printing 'FAILED' */
1143 WARN_ON(1);
1144 return -1;
1145 }
1146 /* Only reset on passing, to avoid touching corrupted buffers */
1147 tracing_reset_online_cpus(&tr->trace_buffer);
1148
1149#ifdef CONFIG_TRACER_MAX_TRACE
1150 if (type->use_max_tr) {
1151 tr->allocated_snapshot = false;
1152
1153 /* Shrink the max buffer again */
1154 if (ring_buffer_expanded)
1155 ring_buffer_resize(tr->max_buffer.buffer, 1,
1156 RING_BUFFER_ALL_CPUS);
1157 }
1158#endif
1159
1160 printk(KERN_CONT "PASSED\n");
1161 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001163#else
1164static inline int run_tracer_selftest(struct tracer *type)
1165{
1166 return 0;
1167}
1168#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001170/**
1171 * register_tracer - register a tracer with the ftrace system.
1172 * @type - the plugin for the tracer
1173 *
1174 * Register a new plugin tracer.
1175 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001176int register_tracer(struct tracer *type)
1177{
1178 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001179 int ret = 0;
1180
1181 if (!type->name) {
1182 pr_info("Tracer must have a name\n");
1183 return -1;
1184 }
1185
Dan Carpenter24a461d2010-07-10 12:06:44 +02001186 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001187 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1188 return -1;
1189 }
1190
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001191 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001192
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001193 tracing_selftest_running = true;
1194
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001195 for (t = trace_types; t; t = t->next) {
1196 if (strcmp(type->name, t->name) == 0) {
1197 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001198 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001199 type->name);
1200 ret = -1;
1201 goto out;
1202 }
1203 }
1204
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001205 if (!type->set_flag)
1206 type->set_flag = &dummy_set_flag;
1207 if (!type->flags)
1208 type->flags = &dummy_tracer_flags;
1209 else
1210 if (!type->flags->opts)
1211 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001212
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001213 ret = run_tracer_selftest(type);
1214 if (ret < 0)
1215 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001216
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001217 type->next = trace_types;
1218 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001219
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001220 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001221 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001222 mutex_unlock(&trace_types_lock);
1223
Steven Rostedtdac74942009-02-05 01:13:38 -05001224 if (ret || !default_bootup_tracer)
1225 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001226
Li Zefanee6c2c12009-09-18 14:06:47 +08001227 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001228 goto out_unlock;
1229
1230 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1231 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001232 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001233 default_bootup_tracer = NULL;
1234 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001235 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001236#ifdef CONFIG_FTRACE_STARTUP_TEST
1237 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1238 type->name);
1239#endif
1240
1241 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001242 return ret;
1243}
1244
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001245void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001246{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001247 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001248
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001249 if (!buffer)
1250 return;
1251
Steven Rostedtf6339032009-09-04 12:35:16 -04001252 ring_buffer_record_disable(buffer);
1253
1254 /* Make sure all commits have finished */
1255 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001256 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001257
1258 ring_buffer_record_enable(buffer);
1259}
1260
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001261void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001262{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001263 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001264 int cpu;
1265
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001266 if (!buffer)
1267 return;
1268
Steven Rostedt621968c2009-09-04 12:02:35 -04001269 ring_buffer_record_disable(buffer);
1270
1271 /* Make sure all commits have finished */
1272 synchronize_sched();
1273
Alexander Z Lam94571582013-08-02 18:36:16 -07001274 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001275
1276 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001277 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001278
1279 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001280}
1281
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001282/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001283void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001284{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001285 struct trace_array *tr;
1286
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001287 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001288 tracing_reset_online_cpus(&tr->trace_buffer);
1289#ifdef CONFIG_TRACER_MAX_TRACE
1290 tracing_reset_online_cpus(&tr->max_buffer);
1291#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001292 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001293}
1294
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001295#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001296#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001297static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001298struct saved_cmdlines_buffer {
1299 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1300 unsigned *map_cmdline_to_pid;
1301 unsigned cmdline_num;
1302 int cmdline_idx;
1303 char *saved_cmdlines;
1304};
1305static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001306
Steven Rostedt25b0b442008-05-12 21:21:00 +02001307/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001308static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001309
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001310static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001311{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001312 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1313}
1314
1315static inline void set_cmdline(int idx, const char *cmdline)
1316{
1317 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1318}
1319
1320static int allocate_cmdlines_buffer(unsigned int val,
1321 struct saved_cmdlines_buffer *s)
1322{
1323 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1324 GFP_KERNEL);
1325 if (!s->map_cmdline_to_pid)
1326 return -ENOMEM;
1327
1328 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1329 if (!s->saved_cmdlines) {
1330 kfree(s->map_cmdline_to_pid);
1331 return -ENOMEM;
1332 }
1333
1334 s->cmdline_idx = 0;
1335 s->cmdline_num = val;
1336 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1337 sizeof(s->map_pid_to_cmdline));
1338 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1339 val * sizeof(*s->map_cmdline_to_pid));
1340
1341 return 0;
1342}
1343
1344static int trace_create_savedcmd(void)
1345{
1346 int ret;
1347
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001348 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001349 if (!savedcmd)
1350 return -ENOMEM;
1351
1352 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1353 if (ret < 0) {
1354 kfree(savedcmd);
1355 savedcmd = NULL;
1356 return -ENOMEM;
1357 }
1358
1359 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001360}
1361
Carsten Emdeb5130b12009-09-13 01:43:07 +02001362int is_tracing_stopped(void)
1363{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001364 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001365}
1366
Steven Rostedt0f048702008-11-05 16:05:44 -05001367/**
1368 * tracing_start - quick start of the tracer
1369 *
1370 * If tracing is enabled but was stopped by tracing_stop,
1371 * this will start the tracer back up.
1372 */
1373void tracing_start(void)
1374{
1375 struct ring_buffer *buffer;
1376 unsigned long flags;
1377
1378 if (tracing_disabled)
1379 return;
1380
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001381 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1382 if (--global_trace.stop_count) {
1383 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001384 /* Someone screwed up their debugging */
1385 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001386 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001387 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001388 goto out;
1389 }
1390
Steven Rostedta2f80712010-03-12 19:56:00 -05001391 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001392 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001393
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001394 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001395 if (buffer)
1396 ring_buffer_record_enable(buffer);
1397
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001398#ifdef CONFIG_TRACER_MAX_TRACE
1399 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001400 if (buffer)
1401 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001402#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001403
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001404 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001405
Steven Rostedt0f048702008-11-05 16:05:44 -05001406 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001407 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1408}
1409
1410static void tracing_start_tr(struct trace_array *tr)
1411{
1412 struct ring_buffer *buffer;
1413 unsigned long flags;
1414
1415 if (tracing_disabled)
1416 return;
1417
1418 /* If global, we need to also start the max tracer */
1419 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1420 return tracing_start();
1421
1422 raw_spin_lock_irqsave(&tr->start_lock, flags);
1423
1424 if (--tr->stop_count) {
1425 if (tr->stop_count < 0) {
1426 /* Someone screwed up their debugging */
1427 WARN_ON_ONCE(1);
1428 tr->stop_count = 0;
1429 }
1430 goto out;
1431 }
1432
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001433 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001434 if (buffer)
1435 ring_buffer_record_enable(buffer);
1436
1437 out:
1438 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001439}
1440
1441/**
1442 * tracing_stop - quick stop of the tracer
1443 *
1444 * Light weight way to stop tracing. Use in conjunction with
1445 * tracing_start.
1446 */
1447void tracing_stop(void)
1448{
1449 struct ring_buffer *buffer;
1450 unsigned long flags;
1451
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001452 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1453 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001454 goto out;
1455
Steven Rostedta2f80712010-03-12 19:56:00 -05001456 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001457 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001458
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001459 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001460 if (buffer)
1461 ring_buffer_record_disable(buffer);
1462
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001463#ifdef CONFIG_TRACER_MAX_TRACE
1464 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001465 if (buffer)
1466 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001467#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001468
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001469 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001470
Steven Rostedt0f048702008-11-05 16:05:44 -05001471 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001472 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1473}
1474
1475static void tracing_stop_tr(struct trace_array *tr)
1476{
1477 struct ring_buffer *buffer;
1478 unsigned long flags;
1479
1480 /* If global, we need to also stop the max tracer */
1481 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1482 return tracing_stop();
1483
1484 raw_spin_lock_irqsave(&tr->start_lock, flags);
1485 if (tr->stop_count++)
1486 goto out;
1487
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001488 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001489 if (buffer)
1490 ring_buffer_record_disable(buffer);
1491
1492 out:
1493 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001494}
1495
Ingo Molnare309b412008-05-12 21:20:51 +02001496void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001497
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001498static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001499{
Carsten Emdea635cf02009-03-18 09:00:41 +01001500 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001501
1502 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001503 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001504
1505 /*
1506 * It's not the end of the world if we don't get
1507 * the lock, but we also don't want to spin
1508 * nor do we want to disable interrupts,
1509 * so if we miss here, then better luck next time.
1510 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001511 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001512 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001513
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001514 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001515 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001516 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001517
Carsten Emdea635cf02009-03-18 09:00:41 +01001518 /*
1519 * Check whether the cmdline buffer at idx has a pid
1520 * mapped. We are going to overwrite that entry so we
1521 * need to clear the map_pid_to_cmdline. Otherwise we
1522 * would read the new comm for the old pid.
1523 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001524 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001525 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001526 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001527
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001528 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1529 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001530
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001531 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001532 }
1533
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001534 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001535
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001536 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001537
1538 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001539}
1540
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001541static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001542{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001543 unsigned map;
1544
Steven Rostedt4ca53082009-03-16 19:20:15 -04001545 if (!pid) {
1546 strcpy(comm, "<idle>");
1547 return;
1548 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001549
Steven Rostedt74bf4072010-01-25 15:11:53 -05001550 if (WARN_ON_ONCE(pid < 0)) {
1551 strcpy(comm, "<XXX>");
1552 return;
1553 }
1554
Steven Rostedt4ca53082009-03-16 19:20:15 -04001555 if (pid > PID_MAX_DEFAULT) {
1556 strcpy(comm, "<...>");
1557 return;
1558 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001559
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001560 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001561 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001562 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001563 else
1564 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001565}
1566
1567void trace_find_cmdline(int pid, char comm[])
1568{
1569 preempt_disable();
1570 arch_spin_lock(&trace_cmdline_lock);
1571
1572 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001573
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001574 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001575 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576}
1577
Ingo Molnare309b412008-05-12 21:20:51 +02001578void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001579{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001580 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001581 return;
1582
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001583 if (!__this_cpu_read(trace_cmdline_save))
1584 return;
1585
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001586 if (trace_save_cmdline(tsk))
1587 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588}
1589
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001590void
Steven Rostedt38697052008-10-01 13:14:09 -04001591tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1592 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001593{
1594 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001595
Steven Rostedt777e2082008-09-29 23:02:42 -04001596 entry->preempt_count = pc & 0xff;
1597 entry->pid = (tsk) ? tsk->pid : 0;
1598 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001599#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001600 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001601#else
1602 TRACE_FLAG_IRQS_NOSUPPORT |
1603#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001604 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1605 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001606 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1607 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001608}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001609EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001610
Steven Rostedte77405a2009-09-02 14:17:06 -04001611struct ring_buffer_event *
1612trace_buffer_lock_reserve(struct ring_buffer *buffer,
1613 int type,
1614 unsigned long len,
1615 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001616{
1617 struct ring_buffer_event *event;
1618
Steven Rostedte77405a2009-09-02 14:17:06 -04001619 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001620 if (event != NULL) {
1621 struct trace_entry *ent = ring_buffer_event_data(event);
1622
1623 tracing_generic_entry_update(ent, flags, pc);
1624 ent->type = type;
1625 }
1626
1627 return event;
1628}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001629
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001630void
1631__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1632{
1633 __this_cpu_write(trace_cmdline_save, true);
1634 ring_buffer_unlock_commit(buffer, event);
1635}
1636
Steven Rostedte77405a2009-09-02 14:17:06 -04001637static inline void
1638__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1639 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001640 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001641{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001642 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001643
Steven Rostedte77405a2009-09-02 14:17:06 -04001644 ftrace_trace_stack(buffer, flags, 6, pc);
1645 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001646}
1647
Steven Rostedte77405a2009-09-02 14:17:06 -04001648void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1649 struct ring_buffer_event *event,
1650 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001651{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001652 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001653}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001654EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001655
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001656static struct ring_buffer *temp_buffer;
1657
Steven Rostedtef5580d2009-02-27 19:38:04 -05001658struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001659trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1660 struct ftrace_event_file *ftrace_file,
1661 int type, unsigned long len,
1662 unsigned long flags, int pc)
1663{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001664 struct ring_buffer_event *entry;
1665
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001666 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001667 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001668 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001669 /*
1670 * If tracing is off, but we have triggers enabled
1671 * we still need to look at the event data. Use the temp_buffer
1672 * to store the trace event for the tigger to use. It's recusive
1673 * safe and will not be recorded anywhere.
1674 */
1675 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1676 *current_rb = temp_buffer;
1677 entry = trace_buffer_lock_reserve(*current_rb,
1678 type, len, flags, pc);
1679 }
1680 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001681}
1682EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1683
1684struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001685trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1686 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001687 unsigned long flags, int pc)
1688{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001689 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001690 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001691 type, len, flags, pc);
1692}
Steven Rostedt94487d62009-05-05 19:22:53 -04001693EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001694
Steven Rostedte77405a2009-09-02 14:17:06 -04001695void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1696 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001697 unsigned long flags, int pc)
1698{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001699 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001700}
Steven Rostedt94487d62009-05-05 19:22:53 -04001701EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001702
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001703void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1704 struct ring_buffer_event *event,
1705 unsigned long flags, int pc,
1706 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001707{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001708 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001709
1710 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1711 ftrace_trace_userstack(buffer, flags, pc);
1712}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001713EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001714
Steven Rostedte77405a2009-09-02 14:17:06 -04001715void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1716 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001717{
Steven Rostedte77405a2009-09-02 14:17:06 -04001718 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001719}
Steven Rostedt12acd472009-04-17 16:01:56 -04001720EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001721
Ingo Molnare309b412008-05-12 21:20:51 +02001722void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001723trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001724 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1725 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001726{
Tom Zanussie1112b42009-03-31 00:48:49 -05001727 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001728 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001729 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001730 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001731
Steven Rostedtd7690412008-10-01 00:29:53 -04001732 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001733 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001734 return;
1735
Steven Rostedte77405a2009-09-02 14:17:06 -04001736 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001737 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001738 if (!event)
1739 return;
1740 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001741 entry->ip = ip;
1742 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001743
Tom Zanussif306cc82013-10-24 08:34:17 -05001744 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001745 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001746}
1747
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001748#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001749
1750#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1751struct ftrace_stack {
1752 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1753};
1754
1755static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1756static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1757
Steven Rostedte77405a2009-09-02 14:17:06 -04001758static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001759 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001760 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001761{
Tom Zanussie1112b42009-03-31 00:48:49 -05001762 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001763 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001764 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001765 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001766 int use_stack;
1767 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001768
1769 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001770 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001771
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001772 /*
1773 * Since events can happen in NMIs there's no safe way to
1774 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1775 * or NMI comes in, it will just have to use the default
1776 * FTRACE_STACK_SIZE.
1777 */
1778 preempt_disable_notrace();
1779
Shan Wei82146522012-11-19 13:21:01 +08001780 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001781 /*
1782 * We don't need any atomic variables, just a barrier.
1783 * If an interrupt comes in, we don't care, because it would
1784 * have exited and put the counter back to what we want.
1785 * We just need a barrier to keep gcc from moving things
1786 * around.
1787 */
1788 barrier();
1789 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001790 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001791 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1792
1793 if (regs)
1794 save_stack_trace_regs(regs, &trace);
1795 else
1796 save_stack_trace(&trace);
1797
1798 if (trace.nr_entries > size)
1799 size = trace.nr_entries;
1800 } else
1801 /* From now on, use_stack is a boolean */
1802 use_stack = 0;
1803
1804 size *= sizeof(unsigned long);
1805
1806 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1807 sizeof(*entry) + size, flags, pc);
1808 if (!event)
1809 goto out;
1810 entry = ring_buffer_event_data(event);
1811
1812 memset(&entry->caller, 0, size);
1813
1814 if (use_stack)
1815 memcpy(&entry->caller, trace.entries,
1816 trace.nr_entries * sizeof(unsigned long));
1817 else {
1818 trace.max_entries = FTRACE_STACK_ENTRIES;
1819 trace.entries = entry->caller;
1820 if (regs)
1821 save_stack_trace_regs(regs, &trace);
1822 else
1823 save_stack_trace(&trace);
1824 }
1825
1826 entry->size = trace.nr_entries;
1827
Tom Zanussif306cc82013-10-24 08:34:17 -05001828 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001829 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001830
1831 out:
1832 /* Again, don't let gcc optimize things here */
1833 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001834 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001835 preempt_enable_notrace();
1836
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001837}
1838
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001839void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1840 int skip, int pc, struct pt_regs *regs)
1841{
1842 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1843 return;
1844
1845 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1846}
1847
Steven Rostedte77405a2009-09-02 14:17:06 -04001848void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1849 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001850{
1851 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1852 return;
1853
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001854 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001855}
1856
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001857void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1858 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001859{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001860 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001861}
1862
Steven Rostedt03889382009-12-11 09:48:22 -05001863/**
1864 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001865 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001866 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001867void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001868{
1869 unsigned long flags;
1870
1871 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001872 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001873
1874 local_save_flags(flags);
1875
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001876 /*
1877 * Skip 3 more, seems to get us at the caller of
1878 * this function.
1879 */
1880 skip += 3;
1881 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1882 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001883}
1884
Steven Rostedt91e86e52010-11-10 12:56:12 +01001885static DEFINE_PER_CPU(int, user_stack_count);
1886
Steven Rostedte77405a2009-09-02 14:17:06 -04001887void
1888ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001889{
Tom Zanussie1112b42009-03-31 00:48:49 -05001890 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001891 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001892 struct userstack_entry *entry;
1893 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001894
1895 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1896 return;
1897
Steven Rostedtb6345872010-03-12 20:03:30 -05001898 /*
1899 * NMIs can not handle page faults, even with fix ups.
1900 * The save user stack can (and often does) fault.
1901 */
1902 if (unlikely(in_nmi()))
1903 return;
1904
Steven Rostedt91e86e52010-11-10 12:56:12 +01001905 /*
1906 * prevent recursion, since the user stack tracing may
1907 * trigger other kernel events.
1908 */
1909 preempt_disable();
1910 if (__this_cpu_read(user_stack_count))
1911 goto out;
1912
1913 __this_cpu_inc(user_stack_count);
1914
Steven Rostedte77405a2009-09-02 14:17:06 -04001915 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001916 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001917 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001918 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001919 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001920
Steven Rostedt48659d32009-09-11 11:36:23 -04001921 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001922 memset(&entry->caller, 0, sizeof(entry->caller));
1923
1924 trace.nr_entries = 0;
1925 trace.max_entries = FTRACE_STACK_ENTRIES;
1926 trace.skip = 0;
1927 trace.entries = entry->caller;
1928
1929 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001930 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001931 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001932
Li Zefan1dbd1952010-12-09 15:47:56 +08001933 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001934 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001935 out:
1936 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001937}
1938
Hannes Eder4fd27352009-02-10 19:44:12 +01001939#ifdef UNUSED
1940static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001941{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001942 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001943}
Hannes Eder4fd27352009-02-10 19:44:12 +01001944#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001945
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001946#endif /* CONFIG_STACKTRACE */
1947
Steven Rostedt07d777f2011-09-22 14:01:55 -04001948/* created for use with alloc_percpu */
1949struct trace_buffer_struct {
1950 char buffer[TRACE_BUF_SIZE];
1951};
1952
1953static struct trace_buffer_struct *trace_percpu_buffer;
1954static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1955static struct trace_buffer_struct *trace_percpu_irq_buffer;
1956static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1957
1958/*
1959 * The buffer used is dependent on the context. There is a per cpu
1960 * buffer for normal context, softirq contex, hard irq context and
1961 * for NMI context. Thise allows for lockless recording.
1962 *
1963 * Note, if the buffers failed to be allocated, then this returns NULL
1964 */
1965static char *get_trace_buf(void)
1966{
1967 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001968
1969 /*
1970 * If we have allocated per cpu buffers, then we do not
1971 * need to do any locking.
1972 */
1973 if (in_nmi())
1974 percpu_buffer = trace_percpu_nmi_buffer;
1975 else if (in_irq())
1976 percpu_buffer = trace_percpu_irq_buffer;
1977 else if (in_softirq())
1978 percpu_buffer = trace_percpu_sirq_buffer;
1979 else
1980 percpu_buffer = trace_percpu_buffer;
1981
1982 if (!percpu_buffer)
1983 return NULL;
1984
Shan Weid8a03492012-11-13 09:53:04 +08001985 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001986}
1987
1988static int alloc_percpu_trace_buffer(void)
1989{
1990 struct trace_buffer_struct *buffers;
1991 struct trace_buffer_struct *sirq_buffers;
1992 struct trace_buffer_struct *irq_buffers;
1993 struct trace_buffer_struct *nmi_buffers;
1994
1995 buffers = alloc_percpu(struct trace_buffer_struct);
1996 if (!buffers)
1997 goto err_warn;
1998
1999 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2000 if (!sirq_buffers)
2001 goto err_sirq;
2002
2003 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2004 if (!irq_buffers)
2005 goto err_irq;
2006
2007 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2008 if (!nmi_buffers)
2009 goto err_nmi;
2010
2011 trace_percpu_buffer = buffers;
2012 trace_percpu_sirq_buffer = sirq_buffers;
2013 trace_percpu_irq_buffer = irq_buffers;
2014 trace_percpu_nmi_buffer = nmi_buffers;
2015
2016 return 0;
2017
2018 err_nmi:
2019 free_percpu(irq_buffers);
2020 err_irq:
2021 free_percpu(sirq_buffers);
2022 err_sirq:
2023 free_percpu(buffers);
2024 err_warn:
2025 WARN(1, "Could not allocate percpu trace_printk buffer");
2026 return -ENOMEM;
2027}
2028
Steven Rostedt81698832012-10-11 10:15:05 -04002029static int buffers_allocated;
2030
Steven Rostedt07d777f2011-09-22 14:01:55 -04002031void trace_printk_init_buffers(void)
2032{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002033 if (buffers_allocated)
2034 return;
2035
2036 if (alloc_percpu_trace_buffer())
2037 return;
2038
Steven Rostedt2184db42014-05-28 13:14:40 -04002039 /* trace_printk() is for debug use only. Don't use it in production. */
2040
Borislav Petkov69a1c992015-01-27 17:17:20 +01002041 pr_warning("\n");
2042 pr_warning("**********************************************************\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002043 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2044 pr_warning("** **\n");
2045 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2046 pr_warning("** **\n");
2047 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
Frans Klavereff264e2014-11-07 15:53:44 +01002048 pr_warning("** unsafe for production use. **\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002049 pr_warning("** **\n");
2050 pr_warning("** If you see this message and you are not debugging **\n");
2051 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2052 pr_warning("** **\n");
2053 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2054 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002055
Steven Rostedtb382ede62012-10-10 21:44:34 -04002056 /* Expand the buffers to set size */
2057 tracing_update_buffers();
2058
Steven Rostedt07d777f2011-09-22 14:01:55 -04002059 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002060
2061 /*
2062 * trace_printk_init_buffers() can be called by modules.
2063 * If that happens, then we need to start cmdline recording
2064 * directly here. If the global_trace.buffer is already
2065 * allocated here, then this was called by module code.
2066 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002067 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002068 tracing_start_cmdline_record();
2069}
2070
2071void trace_printk_start_comm(void)
2072{
2073 /* Start tracing comms if trace printk is set */
2074 if (!buffers_allocated)
2075 return;
2076 tracing_start_cmdline_record();
2077}
2078
2079static void trace_printk_start_stop_comm(int enabled)
2080{
2081 if (!buffers_allocated)
2082 return;
2083
2084 if (enabled)
2085 tracing_start_cmdline_record();
2086 else
2087 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002088}
2089
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002090/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002091 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002092 *
2093 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002094int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002095{
Tom Zanussie1112b42009-03-31 00:48:49 -05002096 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002097 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002098 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002099 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002100 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002101 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002102 char *tbuffer;
2103 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002104
2105 if (unlikely(tracing_selftest_running || tracing_disabled))
2106 return 0;
2107
2108 /* Don't pollute graph traces with trace_vprintk internals */
2109 pause_graph_tracing();
2110
2111 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002112 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002113
Steven Rostedt07d777f2011-09-22 14:01:55 -04002114 tbuffer = get_trace_buf();
2115 if (!tbuffer) {
2116 len = 0;
2117 goto out;
2118 }
2119
2120 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2121
2122 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002123 goto out;
2124
Steven Rostedt07d777f2011-09-22 14:01:55 -04002125 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002126 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002127 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002128 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2129 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002130 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002131 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002132 entry = ring_buffer_event_data(event);
2133 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002134 entry->fmt = fmt;
2135
Steven Rostedt07d777f2011-09-22 14:01:55 -04002136 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002137 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002138 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002139 ftrace_trace_stack(buffer, flags, 6, pc);
2140 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002141
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002142out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002143 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002144 unpause_graph_tracing();
2145
2146 return len;
2147}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002148EXPORT_SYMBOL_GPL(trace_vbprintk);
2149
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002150static int
2151__trace_array_vprintk(struct ring_buffer *buffer,
2152 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002153{
Tom Zanussie1112b42009-03-31 00:48:49 -05002154 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002155 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002156 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002157 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002158 unsigned long flags;
2159 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002160
2161 if (tracing_disabled || tracing_selftest_running)
2162 return 0;
2163
Steven Rostedt07d777f2011-09-22 14:01:55 -04002164 /* Don't pollute graph traces with trace_vprintk internals */
2165 pause_graph_tracing();
2166
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002167 pc = preempt_count();
2168 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002169
Steven Rostedt07d777f2011-09-22 14:01:55 -04002170
2171 tbuffer = get_trace_buf();
2172 if (!tbuffer) {
2173 len = 0;
2174 goto out;
2175 }
2176
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002177 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002178
Steven Rostedt07d777f2011-09-22 14:01:55 -04002179 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002180 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002181 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002182 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002183 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002184 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002185 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002186 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002187
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002188 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002189 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002190 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002191 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002192 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002193 out:
2194 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002195 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002196
2197 return len;
2198}
Steven Rostedt659372d2009-09-03 19:11:07 -04002199
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002200int trace_array_vprintk(struct trace_array *tr,
2201 unsigned long ip, const char *fmt, va_list args)
2202{
2203 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2204}
2205
2206int trace_array_printk(struct trace_array *tr,
2207 unsigned long ip, const char *fmt, ...)
2208{
2209 int ret;
2210 va_list ap;
2211
2212 if (!(trace_flags & TRACE_ITER_PRINTK))
2213 return 0;
2214
2215 va_start(ap, fmt);
2216 ret = trace_array_vprintk(tr, ip, fmt, ap);
2217 va_end(ap);
2218 return ret;
2219}
2220
2221int trace_array_printk_buf(struct ring_buffer *buffer,
2222 unsigned long ip, const char *fmt, ...)
2223{
2224 int ret;
2225 va_list ap;
2226
2227 if (!(trace_flags & TRACE_ITER_PRINTK))
2228 return 0;
2229
2230 va_start(ap, fmt);
2231 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2232 va_end(ap);
2233 return ret;
2234}
2235
Steven Rostedt659372d2009-09-03 19:11:07 -04002236int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2237{
Steven Rostedta813a152009-10-09 01:41:35 -04002238 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002239}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002240EXPORT_SYMBOL_GPL(trace_vprintk);
2241
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002242static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002243{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002244 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2245
Steven Rostedt5a90f572008-09-03 17:42:51 -04002246 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002247 if (buf_iter)
2248 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002249}
2250
Ingo Molnare309b412008-05-12 21:20:51 +02002251static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002252peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2253 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002254{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002255 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002256 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002257
Steven Rostedtd7690412008-10-01 00:29:53 -04002258 if (buf_iter)
2259 event = ring_buffer_iter_peek(buf_iter, ts);
2260 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002261 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002262 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002263
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002264 if (event) {
2265 iter->ent_size = ring_buffer_event_length(event);
2266 return ring_buffer_event_data(event);
2267 }
2268 iter->ent_size = 0;
2269 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002270}
Steven Rostedtd7690412008-10-01 00:29:53 -04002271
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002272static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002273__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2274 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002275{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002276 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002277 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002278 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002279 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002280 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002281 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002282 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002283 int cpu;
2284
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002285 /*
2286 * If we are in a per_cpu trace file, don't bother by iterating over
2287 * all cpu and peek directly.
2288 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002289 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002290 if (ring_buffer_empty_cpu(buffer, cpu_file))
2291 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002292 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002293 if (ent_cpu)
2294 *ent_cpu = cpu_file;
2295
2296 return ent;
2297 }
2298
Steven Rostedtab464282008-05-12 21:21:00 +02002299 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002300
2301 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002302 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002303
Steven Rostedtbc21b472010-03-31 19:49:26 -04002304 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002305
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002306 /*
2307 * Pick the entry with the smallest timestamp:
2308 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002309 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002310 next = ent;
2311 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002312 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002313 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002314 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002315 }
2316 }
2317
Steven Rostedt12b5da32012-03-27 10:43:28 -04002318 iter->ent_size = next_size;
2319
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002320 if (ent_cpu)
2321 *ent_cpu = next_cpu;
2322
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002323 if (ent_ts)
2324 *ent_ts = next_ts;
2325
Steven Rostedtbc21b472010-03-31 19:49:26 -04002326 if (missing_events)
2327 *missing_events = next_lost;
2328
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002329 return next;
2330}
2331
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002332/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002333struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2334 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002335{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002336 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002337}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002338
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002339/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002340void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002341{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002342 iter->ent = __find_next_entry(iter, &iter->cpu,
2343 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002344
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002345 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002346 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002347
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002348 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002349}
2350
Ingo Molnare309b412008-05-12 21:20:51 +02002351static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002352{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002353 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002354 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002355}
2356
Ingo Molnare309b412008-05-12 21:20:51 +02002357static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002358{
2359 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002360 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002361 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002362
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002363 WARN_ON_ONCE(iter->leftover);
2364
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002365 (*pos)++;
2366
2367 /* can't go backwards */
2368 if (iter->idx > i)
2369 return NULL;
2370
2371 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002372 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002373 else
2374 ent = iter;
2375
2376 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002377 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002378
2379 iter->pos = *pos;
2380
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002381 return ent;
2382}
2383
Jason Wessel955b61e2010-08-05 09:22:23 -05002384void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002385{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002386 struct ring_buffer_event *event;
2387 struct ring_buffer_iter *buf_iter;
2388 unsigned long entries = 0;
2389 u64 ts;
2390
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002391 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002392
Steven Rostedt6d158a82012-06-27 20:46:14 -04002393 buf_iter = trace_buffer_iter(iter, cpu);
2394 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002395 return;
2396
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002397 ring_buffer_iter_reset(buf_iter);
2398
2399 /*
2400 * We could have the case with the max latency tracers
2401 * that a reset never took place on a cpu. This is evident
2402 * by the timestamp being before the start of the buffer.
2403 */
2404 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002405 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002406 break;
2407 entries++;
2408 ring_buffer_read(buf_iter, NULL);
2409 }
2410
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002411 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002412}
2413
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002414/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002415 * The current tracer is copied to avoid a global locking
2416 * all around.
2417 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002418static void *s_start(struct seq_file *m, loff_t *pos)
2419{
2420 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002421 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002422 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002423 void *p = NULL;
2424 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002425 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002426
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002427 /*
2428 * copy the tracer to avoid using a global lock all around.
2429 * iter->trace is a copy of current_trace, the pointer to the
2430 * name may be used instead of a strcmp(), as iter->trace->name
2431 * will point to the same string as current_trace->name.
2432 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002433 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002434 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2435 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002436 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002437
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002438#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002439 if (iter->snapshot && iter->trace->use_max_tr)
2440 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002441#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002442
2443 if (!iter->snapshot)
2444 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002445
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002446 if (*pos != iter->pos) {
2447 iter->ent = NULL;
2448 iter->cpu = 0;
2449 iter->idx = -1;
2450
Steven Rostedtae3b5092013-01-23 15:22:59 -05002451 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002452 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002453 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002454 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002455 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002456
Lai Jiangshanac91d852010-03-02 17:54:50 +08002457 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002458 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2459 ;
2460
2461 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002462 /*
2463 * If we overflowed the seq_file before, then we want
2464 * to just reuse the trace_seq buffer again.
2465 */
2466 if (iter->leftover)
2467 p = iter;
2468 else {
2469 l = *pos - 1;
2470 p = s_next(m, p, &l);
2471 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002472 }
2473
Lai Jiangshan4f535962009-05-18 19:35:34 +08002474 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002475 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002476 return p;
2477}
2478
2479static void s_stop(struct seq_file *m, void *p)
2480{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002481 struct trace_iterator *iter = m->private;
2482
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002483#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002484 if (iter->snapshot && iter->trace->use_max_tr)
2485 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002486#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002487
2488 if (!iter->snapshot)
2489 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002490
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002491 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002492 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002493}
2494
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002495static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002496get_total_entries(struct trace_buffer *buf,
2497 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002498{
2499 unsigned long count;
2500 int cpu;
2501
2502 *total = 0;
2503 *entries = 0;
2504
2505 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002506 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002507 /*
2508 * If this buffer has skipped entries, then we hold all
2509 * entries for the trace and we need to ignore the
2510 * ones before the time stamp.
2511 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002512 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2513 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002514 /* total is the same as the entries */
2515 *total += count;
2516 } else
2517 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002518 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002519 *entries += count;
2520 }
2521}
2522
Ingo Molnare309b412008-05-12 21:20:51 +02002523static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002524{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002525 seq_puts(m, "# _------=> CPU# \n"
2526 "# / _-----=> irqs-off \n"
2527 "# | / _----=> need-resched \n"
2528 "# || / _---=> hardirq/softirq \n"
2529 "# ||| / _--=> preempt-depth \n"
2530 "# |||| / delay \n"
2531 "# cmd pid ||||| time | caller \n"
2532 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002533}
2534
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002535static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002536{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002537 unsigned long total;
2538 unsigned long entries;
2539
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002540 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002541 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2542 entries, total, num_online_cpus());
2543 seq_puts(m, "#\n");
2544}
2545
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002546static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002547{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002548 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002549 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2550 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002551}
2552
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002553static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002554{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002555 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002556 seq_puts(m, "# _-----=> irqs-off\n"
2557 "# / _----=> need-resched\n"
2558 "# | / _---=> hardirq/softirq\n"
2559 "# || / _--=> preempt-depth\n"
2560 "# ||| / delay\n"
2561 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2562 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002563}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002564
Jiri Olsa62b915f2010-04-02 19:01:22 +02002565void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002566print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2567{
2568 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002569 struct trace_buffer *buf = iter->trace_buffer;
2570 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002571 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002572 unsigned long entries;
2573 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574 const char *name = "preemption";
2575
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002576 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002577
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002578 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002579
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002580 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002581 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002582 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002583 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002584 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002585 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002586 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002587 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002588 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002589 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002590#if defined(CONFIG_PREEMPT_NONE)
2591 "server",
2592#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2593 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002594#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002595 "preempt",
2596#else
2597 "unknown",
2598#endif
2599 /* These are reserved for later use */
2600 0, 0, 0, 0);
2601#ifdef CONFIG_SMP
2602 seq_printf(m, " #P:%d)\n", num_online_cpus());
2603#else
2604 seq_puts(m, ")\n");
2605#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002606 seq_puts(m, "# -----------------\n");
2607 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002608 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002609 data->comm, data->pid,
2610 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002611 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002612 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002613
2614 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002615 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002616 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2617 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002618 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002619 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2620 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002621 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002622 }
2623
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002624 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002625}
2626
Steven Rostedta3097202008-11-07 22:36:02 -05002627static void test_cpu_buff_start(struct trace_iterator *iter)
2628{
2629 struct trace_seq *s = &iter->seq;
2630
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002631 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2632 return;
2633
2634 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2635 return;
2636
Rusty Russell44623442009-01-01 10:12:23 +10302637 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002638 return;
2639
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002640 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002641 return;
2642
Rusty Russell44623442009-01-01 10:12:23 +10302643 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002644
2645 /* Don't print started cpu buffer for the first entry of the trace */
2646 if (iter->idx > 1)
2647 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2648 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002649}
2650
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002651static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002652{
Steven Rostedt214023c2008-05-12 21:20:46 +02002653 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002654 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002655 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002656 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002657
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002658 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002659
Steven Rostedta3097202008-11-07 22:36:02 -05002660 test_cpu_buff_start(iter);
2661
Steven Rostedtf633cef2008-12-23 23:24:13 -05002662 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002663
2664 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002665 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2666 trace_print_lat_context(iter);
2667 else
2668 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002669 }
2670
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002671 if (trace_seq_has_overflowed(s))
2672 return TRACE_TYPE_PARTIAL_LINE;
2673
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002674 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002675 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002676
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002677 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002678
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002679 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002680}
2681
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002682static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002683{
2684 struct trace_seq *s = &iter->seq;
2685 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002686 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002687
2688 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002689
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002690 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2691 trace_seq_printf(s, "%d %d %llu ",
2692 entry->pid, iter->cpu, iter->ts);
2693
2694 if (trace_seq_has_overflowed(s))
2695 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002696
Steven Rostedtf633cef2008-12-23 23:24:13 -05002697 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002698 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002699 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002700
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002701 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002702
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002703 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002704}
2705
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002706static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002707{
2708 struct trace_seq *s = &iter->seq;
2709 unsigned char newline = '\n';
2710 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002711 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002712
2713 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002714
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002715 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002716 SEQ_PUT_HEX_FIELD(s, entry->pid);
2717 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2718 SEQ_PUT_HEX_FIELD(s, iter->ts);
2719 if (trace_seq_has_overflowed(s))
2720 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002721 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002722
Steven Rostedtf633cef2008-12-23 23:24:13 -05002723 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002724 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002725 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002726 if (ret != TRACE_TYPE_HANDLED)
2727 return ret;
2728 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002729
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002730 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002731
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002732 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002733}
2734
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002735static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002736{
2737 struct trace_seq *s = &iter->seq;
2738 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002739 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002740
2741 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002742
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002743 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002744 SEQ_PUT_FIELD(s, entry->pid);
2745 SEQ_PUT_FIELD(s, iter->cpu);
2746 SEQ_PUT_FIELD(s, iter->ts);
2747 if (trace_seq_has_overflowed(s))
2748 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002749 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002750
Steven Rostedtf633cef2008-12-23 23:24:13 -05002751 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002752 return event ? event->funcs->binary(iter, 0, event) :
2753 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002754}
2755
Jiri Olsa62b915f2010-04-02 19:01:22 +02002756int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002757{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002758 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002759 int cpu;
2760
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002761 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002762 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002763 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002764 buf_iter = trace_buffer_iter(iter, cpu);
2765 if (buf_iter) {
2766 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002767 return 0;
2768 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002769 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002770 return 0;
2771 }
2772 return 1;
2773 }
2774
Steven Rostedtab464282008-05-12 21:21:00 +02002775 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002776 buf_iter = trace_buffer_iter(iter, cpu);
2777 if (buf_iter) {
2778 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002779 return 0;
2780 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002781 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002782 return 0;
2783 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002784 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002785
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002786 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002787}
2788
Lai Jiangshan4f535962009-05-18 19:35:34 +08002789/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002790enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002791{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002792 enum print_line_t ret;
2793
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002794 if (iter->lost_events) {
2795 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2796 iter->cpu, iter->lost_events);
2797 if (trace_seq_has_overflowed(&iter->seq))
2798 return TRACE_TYPE_PARTIAL_LINE;
2799 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002800
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002801 if (iter->trace && iter->trace->print_line) {
2802 ret = iter->trace->print_line(iter);
2803 if (ret != TRACE_TYPE_UNHANDLED)
2804 return ret;
2805 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002806
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002807 if (iter->ent->type == TRACE_BPUTS &&
2808 trace_flags & TRACE_ITER_PRINTK &&
2809 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2810 return trace_print_bputs_msg_only(iter);
2811
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002812 if (iter->ent->type == TRACE_BPRINT &&
2813 trace_flags & TRACE_ITER_PRINTK &&
2814 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002815 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002816
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002817 if (iter->ent->type == TRACE_PRINT &&
2818 trace_flags & TRACE_ITER_PRINTK &&
2819 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002820 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002821
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002822 if (trace_flags & TRACE_ITER_BIN)
2823 return print_bin_fmt(iter);
2824
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002825 if (trace_flags & TRACE_ITER_HEX)
2826 return print_hex_fmt(iter);
2827
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002828 if (trace_flags & TRACE_ITER_RAW)
2829 return print_raw_fmt(iter);
2830
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002831 return print_trace_fmt(iter);
2832}
2833
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002834void trace_latency_header(struct seq_file *m)
2835{
2836 struct trace_iterator *iter = m->private;
2837
2838 /* print nothing if the buffers are empty */
2839 if (trace_empty(iter))
2840 return;
2841
2842 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2843 print_trace_header(m, iter);
2844
2845 if (!(trace_flags & TRACE_ITER_VERBOSE))
2846 print_lat_help_header(m);
2847}
2848
Jiri Olsa62b915f2010-04-02 19:01:22 +02002849void trace_default_header(struct seq_file *m)
2850{
2851 struct trace_iterator *iter = m->private;
2852
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002853 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2854 return;
2855
Jiri Olsa62b915f2010-04-02 19:01:22 +02002856 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2857 /* print nothing if the buffers are empty */
2858 if (trace_empty(iter))
2859 return;
2860 print_trace_header(m, iter);
2861 if (!(trace_flags & TRACE_ITER_VERBOSE))
2862 print_lat_help_header(m);
2863 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002864 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2865 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002866 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002867 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002868 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002869 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002870 }
2871}
2872
Steven Rostedte0a413f2011-09-29 21:26:16 -04002873static void test_ftrace_alive(struct seq_file *m)
2874{
2875 if (!ftrace_is_dead())
2876 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002877 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2878 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002879}
2880
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002881#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002882static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002883{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002884 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2885 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2886 "# Takes a snapshot of the main buffer.\n"
2887 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2888 "# (Doesn't have to be '2' works with any number that\n"
2889 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002890}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002891
2892static void show_snapshot_percpu_help(struct seq_file *m)
2893{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002894 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002895#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002896 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2897 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002898#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002899 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2900 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002901#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002902 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2903 "# (Doesn't have to be '2' works with any number that\n"
2904 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002905}
2906
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002907static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2908{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002909 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002910 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002911 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002912 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002913
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002914 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002915 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2916 show_snapshot_main_help(m);
2917 else
2918 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002919}
2920#else
2921/* Should never be called */
2922static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2923#endif
2924
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002925static int s_show(struct seq_file *m, void *v)
2926{
2927 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002928 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002929
2930 if (iter->ent == NULL) {
2931 if (iter->tr) {
2932 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2933 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002934 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002935 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002936 if (iter->snapshot && trace_empty(iter))
2937 print_snapshot_help(m, iter);
2938 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002939 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002940 else
2941 trace_default_header(m);
2942
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002943 } else if (iter->leftover) {
2944 /*
2945 * If we filled the seq_file buffer earlier, we
2946 * want to just show it now.
2947 */
2948 ret = trace_print_seq(m, &iter->seq);
2949
2950 /* ret should this time be zero, but you never know */
2951 iter->leftover = ret;
2952
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002953 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002954 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002955 ret = trace_print_seq(m, &iter->seq);
2956 /*
2957 * If we overflow the seq_file buffer, then it will
2958 * ask us for this data again at start up.
2959 * Use that instead.
2960 * ret is 0 if seq_file write succeeded.
2961 * -1 otherwise.
2962 */
2963 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002964 }
2965
2966 return 0;
2967}
2968
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002969/*
2970 * Should be used after trace_array_get(), trace_types_lock
2971 * ensures that i_cdev was already initialized.
2972 */
2973static inline int tracing_get_cpu(struct inode *inode)
2974{
2975 if (inode->i_cdev) /* See trace_create_cpu_file() */
2976 return (long)inode->i_cdev - 1;
2977 return RING_BUFFER_ALL_CPUS;
2978}
2979
James Morris88e9d342009-09-22 16:43:43 -07002980static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002981 .start = s_start,
2982 .next = s_next,
2983 .stop = s_stop,
2984 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002985};
2986
Ingo Molnare309b412008-05-12 21:20:51 +02002987static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002988__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002989{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002990 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002991 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002992 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002993
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002994 if (tracing_disabled)
2995 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002996
Jiri Olsa50e18b92012-04-25 10:23:39 +02002997 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002998 if (!iter)
2999 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003000
Steven Rostedt6d158a82012-06-27 20:46:14 -04003001 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
3002 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003003 if (!iter->buffer_iter)
3004 goto release;
3005
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003006 /*
3007 * We make a copy of the current tracer to avoid concurrent
3008 * changes on it while we are reading.
3009 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003010 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003011 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003012 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003013 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003014
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003015 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003016
Li Zefan79f55992009-06-15 14:58:26 +08003017 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003018 goto fail;
3019
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003020 iter->tr = tr;
3021
3022#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003023 /* Currently only the top directory has a snapshot */
3024 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003025 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003026 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003027#endif
3028 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003029 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003030 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003031 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003032 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003033
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003034 /* Notify the tracer early; before we stop tracing. */
3035 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003036 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003037
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003038 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003039 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003040 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3041
David Sharp8be07092012-11-13 12:18:22 -08003042 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003043 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003044 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3045
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003046 /* stop the trace while dumping if we are not opening "snapshot" */
3047 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003048 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003049
Steven Rostedtae3b5092013-01-23 15:22:59 -05003050 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003051 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003052 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003053 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003054 }
3055 ring_buffer_read_prepare_sync();
3056 for_each_tracing_cpu(cpu) {
3057 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003058 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003059 }
3060 } else {
3061 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003062 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003063 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003064 ring_buffer_read_prepare_sync();
3065 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003066 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003067 }
3068
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003069 mutex_unlock(&trace_types_lock);
3070
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003071 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003072
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003073 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003074 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003075 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003076 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003077release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003078 seq_release_private(inode, file);
3079 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003080}
3081
3082int tracing_open_generic(struct inode *inode, struct file *filp)
3083{
Steven Rostedt60a11772008-05-12 21:20:44 +02003084 if (tracing_disabled)
3085 return -ENODEV;
3086
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003087 filp->private_data = inode->i_private;
3088 return 0;
3089}
3090
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003091bool tracing_is_disabled(void)
3092{
3093 return (tracing_disabled) ? true: false;
3094}
3095
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003096/*
3097 * Open and update trace_array ref count.
3098 * Must have the current trace_array passed to it.
3099 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003100static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003101{
3102 struct trace_array *tr = inode->i_private;
3103
3104 if (tracing_disabled)
3105 return -ENODEV;
3106
3107 if (trace_array_get(tr) < 0)
3108 return -ENODEV;
3109
3110 filp->private_data = inode->i_private;
3111
3112 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003113}
3114
Hannes Eder4fd27352009-02-10 19:44:12 +01003115static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003116{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003117 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003118 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003119 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003120 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003121
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003122 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003123 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003124 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003125 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003126
Oleg Nesterov6484c712013-07-23 17:26:10 +02003127 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003128 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003129 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003130
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003131 for_each_tracing_cpu(cpu) {
3132 if (iter->buffer_iter[cpu])
3133 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3134 }
3135
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003136 if (iter->trace && iter->trace->close)
3137 iter->trace->close(iter);
3138
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003139 if (!iter->snapshot)
3140 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003141 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003142
3143 __trace_array_put(tr);
3144
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003145 mutex_unlock(&trace_types_lock);
3146
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003147 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003148 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003149 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003150 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003151 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003152
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003153 return 0;
3154}
3155
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003156static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3157{
3158 struct trace_array *tr = inode->i_private;
3159
3160 trace_array_put(tr);
3161 return 0;
3162}
3163
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003164static int tracing_single_release_tr(struct inode *inode, struct file *file)
3165{
3166 struct trace_array *tr = inode->i_private;
3167
3168 trace_array_put(tr);
3169
3170 return single_release(inode, file);
3171}
3172
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003173static int tracing_open(struct inode *inode, struct file *file)
3174{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003175 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003176 struct trace_iterator *iter;
3177 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003178
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003179 if (trace_array_get(tr) < 0)
3180 return -ENODEV;
3181
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003182 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003183 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3184 int cpu = tracing_get_cpu(inode);
3185
3186 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003187 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003188 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003189 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003190 }
3191
3192 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003193 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003194 if (IS_ERR(iter))
3195 ret = PTR_ERR(iter);
3196 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3197 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3198 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003199
3200 if (ret < 0)
3201 trace_array_put(tr);
3202
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003203 return ret;
3204}
3205
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003206/*
3207 * Some tracers are not suitable for instance buffers.
3208 * A tracer is always available for the global array (toplevel)
3209 * or if it explicitly states that it is.
3210 */
3211static bool
3212trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3213{
3214 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3215}
3216
3217/* Find the next tracer that this trace array may use */
3218static struct tracer *
3219get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3220{
3221 while (t && !trace_ok_for_array(t, tr))
3222 t = t->next;
3223
3224 return t;
3225}
3226
Ingo Molnare309b412008-05-12 21:20:51 +02003227static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003228t_next(struct seq_file *m, void *v, loff_t *pos)
3229{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003230 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003231 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003232
3233 (*pos)++;
3234
3235 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003236 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003237
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003238 return t;
3239}
3240
3241static void *t_start(struct seq_file *m, loff_t *pos)
3242{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003243 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003244 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003245 loff_t l = 0;
3246
3247 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003248
3249 t = get_tracer_for_array(tr, trace_types);
3250 for (; t && l < *pos; t = t_next(m, t, &l))
3251 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003252
3253 return t;
3254}
3255
3256static void t_stop(struct seq_file *m, void *p)
3257{
3258 mutex_unlock(&trace_types_lock);
3259}
3260
3261static int t_show(struct seq_file *m, void *v)
3262{
3263 struct tracer *t = v;
3264
3265 if (!t)
3266 return 0;
3267
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003268 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003269 if (t->next)
3270 seq_putc(m, ' ');
3271 else
3272 seq_putc(m, '\n');
3273
3274 return 0;
3275}
3276
James Morris88e9d342009-09-22 16:43:43 -07003277static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003278 .start = t_start,
3279 .next = t_next,
3280 .stop = t_stop,
3281 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003282};
3283
3284static int show_traces_open(struct inode *inode, struct file *file)
3285{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003286 struct trace_array *tr = inode->i_private;
3287 struct seq_file *m;
3288 int ret;
3289
Steven Rostedt60a11772008-05-12 21:20:44 +02003290 if (tracing_disabled)
3291 return -ENODEV;
3292
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003293 ret = seq_open(file, &show_traces_seq_ops);
3294 if (ret)
3295 return ret;
3296
3297 m = file->private_data;
3298 m->private = tr;
3299
3300 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003301}
3302
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003303static ssize_t
3304tracing_write_stub(struct file *filp, const char __user *ubuf,
3305 size_t count, loff_t *ppos)
3306{
3307 return count;
3308}
3309
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003310loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003311{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003312 int ret;
3313
Slava Pestov364829b2010-11-24 15:13:16 -08003314 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003315 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003316 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003317 file->f_pos = ret = 0;
3318
3319 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003320}
3321
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003322static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003323 .open = tracing_open,
3324 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003325 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003326 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003327 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003328};
3329
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003330static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003331 .open = show_traces_open,
3332 .read = seq_read,
3333 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003334 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003335};
3336
Ingo Molnar36dfe922008-05-12 21:20:52 +02003337/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003338 * The tracer itself will not take this lock, but still we want
3339 * to provide a consistent cpumask to user-space:
3340 */
3341static DEFINE_MUTEX(tracing_cpumask_update_lock);
3342
3343/*
3344 * Temporary storage for the character representation of the
3345 * CPU bitmask (and one more byte for the newline):
3346 */
3347static char mask_str[NR_CPUS + 1];
3348
Ingo Molnarc7078de2008-05-12 21:20:52 +02003349static ssize_t
3350tracing_cpumask_read(struct file *filp, char __user *ubuf,
3351 size_t count, loff_t *ppos)
3352{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003353 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003354 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003355
3356 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003357
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003358 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003359 if (count - len < 2) {
3360 count = -EINVAL;
3361 goto out_err;
3362 }
3363 len += sprintf(mask_str + len, "\n");
3364 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3365
3366out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003367 mutex_unlock(&tracing_cpumask_update_lock);
3368
3369 return count;
3370}
3371
3372static ssize_t
3373tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3374 size_t count, loff_t *ppos)
3375{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003376 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303377 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003378 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303379
3380 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3381 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003382
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303383 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003384 if (err)
3385 goto err_unlock;
3386
Li Zefan215368e2009-06-15 10:56:42 +08003387 mutex_lock(&tracing_cpumask_update_lock);
3388
Steven Rostedta5e25882008-12-02 15:34:05 -05003389 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003390 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003391 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003392 /*
3393 * Increase/decrease the disabled counter if we are
3394 * about to flip a bit in the cpumask:
3395 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003396 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303397 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003398 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3399 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003400 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003401 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303402 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003403 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3404 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003405 }
3406 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003407 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003408 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003409
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003410 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003411
Ingo Molnarc7078de2008-05-12 21:20:52 +02003412 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303413 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003414
Ingo Molnarc7078de2008-05-12 21:20:52 +02003415 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003416
3417err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003418 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003419
3420 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003421}
3422
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003423static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003424 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003425 .read = tracing_cpumask_read,
3426 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003427 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003428 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003429};
3430
Li Zefanfdb372e2009-12-08 11:15:59 +08003431static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003432{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003433 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003434 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003435 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003436 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003437
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003438 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003439 tracer_flags = tr->current_trace->flags->val;
3440 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003441
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003442 for (i = 0; trace_options[i]; i++) {
3443 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003444 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003445 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003446 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003447 }
3448
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003449 for (i = 0; trace_opts[i].name; i++) {
3450 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003451 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003452 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003453 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003454 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003455 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003456
Li Zefanfdb372e2009-12-08 11:15:59 +08003457 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003458}
3459
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003460static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003461 struct tracer_flags *tracer_flags,
3462 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003463{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003464 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003465 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003466
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003467 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003468 if (ret)
3469 return ret;
3470
3471 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003472 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003473 else
Zhaolei77708412009-08-07 18:53:21 +08003474 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003475 return 0;
3476}
3477
Li Zefan8d18eaa2009-12-08 11:17:06 +08003478/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003479static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003480{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003481 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003482 struct tracer_flags *tracer_flags = trace->flags;
3483 struct tracer_opt *opts = NULL;
3484 int i;
3485
3486 for (i = 0; tracer_flags->opts[i].name; i++) {
3487 opts = &tracer_flags->opts[i];
3488
3489 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003490 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003491 }
3492
3493 return -EINVAL;
3494}
3495
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003496/* Some tracers require overwrite to stay enabled */
3497int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3498{
3499 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3500 return -1;
3501
3502 return 0;
3503}
3504
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003505int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003506{
3507 /* do nothing if flag is already set */
3508 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003509 return 0;
3510
3511 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003512 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003513 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003514 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003515
3516 if (enabled)
3517 trace_flags |= mask;
3518 else
3519 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003520
3521 if (mask == TRACE_ITER_RECORD_CMD)
3522 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003523
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003524 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003525 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003526#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003527 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003528#endif
3529 }
Steven Rostedt81698832012-10-11 10:15:05 -04003530
3531 if (mask == TRACE_ITER_PRINTK)
3532 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003533
3534 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003535}
3536
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003537static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003538{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003539 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003540 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003541 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003542 int i;
3543
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003544 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003545
Li Zefan8d18eaa2009-12-08 11:17:06 +08003546 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003547 neg = 1;
3548 cmp += 2;
3549 }
3550
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003551 mutex_lock(&trace_types_lock);
3552
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003553 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003554 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003555 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003556 break;
3557 }
3558 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003559
3560 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003561 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003562 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003563
3564 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003565
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003566 return ret;
3567}
3568
3569static ssize_t
3570tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3571 size_t cnt, loff_t *ppos)
3572{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003573 struct seq_file *m = filp->private_data;
3574 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003575 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003576 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003577
3578 if (cnt >= sizeof(buf))
3579 return -EINVAL;
3580
3581 if (copy_from_user(&buf, ubuf, cnt))
3582 return -EFAULT;
3583
Steven Rostedta8dd2172013-01-09 20:54:17 -05003584 buf[cnt] = 0;
3585
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003586 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003587 if (ret < 0)
3588 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003589
Jiri Olsacf8517c2009-10-23 19:36:16 -04003590 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003591
3592 return cnt;
3593}
3594
Li Zefanfdb372e2009-12-08 11:15:59 +08003595static int tracing_trace_options_open(struct inode *inode, struct file *file)
3596{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003597 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003598 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003599
Li Zefanfdb372e2009-12-08 11:15:59 +08003600 if (tracing_disabled)
3601 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003602
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003603 if (trace_array_get(tr) < 0)
3604 return -ENODEV;
3605
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003606 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3607 if (ret < 0)
3608 trace_array_put(tr);
3609
3610 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003611}
3612
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003613static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003614 .open = tracing_trace_options_open,
3615 .read = seq_read,
3616 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003617 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003618 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003619};
3620
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003621static const char readme_msg[] =
3622 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003623 "# echo 0 > tracing_on : quick way to disable tracing\n"
3624 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3625 " Important files:\n"
3626 " trace\t\t\t- The static contents of the buffer\n"
3627 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3628 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3629 " current_tracer\t- function and latency tracers\n"
3630 " available_tracers\t- list of configured tracers for current_tracer\n"
3631 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3632 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3633 " trace_clock\t\t-change the clock used to order events\n"
3634 " local: Per cpu clock but may not be synced across CPUs\n"
3635 " global: Synced across CPUs but slows tracing down.\n"
3636 " counter: Not a clock, but just an increment\n"
3637 " uptime: Jiffy counter from time of boot\n"
3638 " perf: Same clock that perf events use\n"
3639#ifdef CONFIG_X86_64
3640 " x86-tsc: TSC cycle counter\n"
3641#endif
3642 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3643 " tracing_cpumask\t- Limit which CPUs to trace\n"
3644 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3645 "\t\t\t Remove sub-buffer with rmdir\n"
3646 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003647 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3648 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003649 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003650#ifdef CONFIG_DYNAMIC_FTRACE
3651 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003652 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3653 "\t\t\t functions\n"
3654 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3655 "\t modules: Can select a group via module\n"
3656 "\t Format: :mod:<module-name>\n"
3657 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3658 "\t triggers: a command to perform when function is hit\n"
3659 "\t Format: <function>:<trigger>[:count]\n"
3660 "\t trigger: traceon, traceoff\n"
3661 "\t\t enable_event:<system>:<event>\n"
3662 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003663#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003664 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003665#endif
3666#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003667 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003668#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003669 "\t\t dump\n"
3670 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003671 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3672 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3673 "\t The first one will disable tracing every time do_fault is hit\n"
3674 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3675 "\t The first time do trap is hit and it disables tracing, the\n"
3676 "\t counter will decrement to 2. If tracing is already disabled,\n"
3677 "\t the counter will not decrement. It only decrements when the\n"
3678 "\t trigger did work\n"
3679 "\t To remove trigger without count:\n"
3680 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3681 "\t To remove trigger with a count:\n"
3682 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003683 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003684 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3685 "\t modules: Can select a group via module command :mod:\n"
3686 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003687#endif /* CONFIG_DYNAMIC_FTRACE */
3688#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003689 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3690 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003691#endif
3692#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3693 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003694 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003695 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3696#endif
3697#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003698 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3699 "\t\t\t snapshot buffer. Read the contents for more\n"
3700 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003701#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003702#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003703 " stack_trace\t\t- Shows the max stack trace when active\n"
3704 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003705 "\t\t\t Write into this file to reset the max size (trigger a\n"
3706 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003707#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003708 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3709 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003710#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003711#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003712 " events/\t\t- Directory containing all trace event subsystems:\n"
3713 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3714 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003715 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3716 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003717 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003718 " events/<system>/<event>/\t- Directory containing control files for\n"
3719 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003720 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3721 " filter\t\t- If set, only events passing filter are traced\n"
3722 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003723 "\t Format: <trigger>[:count][if <filter>]\n"
3724 "\t trigger: traceon, traceoff\n"
3725 "\t enable_event:<system>:<event>\n"
3726 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003727#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003728 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003729#endif
3730#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003731 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003732#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003733 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3734 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3735 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3736 "\t events/block/block_unplug/trigger\n"
3737 "\t The first disables tracing every time block_unplug is hit.\n"
3738 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3739 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3740 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3741 "\t Like function triggers, the counter is only decremented if it\n"
3742 "\t enabled or disabled tracing.\n"
3743 "\t To remove a trigger without a count:\n"
3744 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3745 "\t To remove a trigger with a count:\n"
3746 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3747 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003748;
3749
3750static ssize_t
3751tracing_readme_read(struct file *filp, char __user *ubuf,
3752 size_t cnt, loff_t *ppos)
3753{
3754 return simple_read_from_buffer(ubuf, cnt, ppos,
3755 readme_msg, strlen(readme_msg));
3756}
3757
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003758static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003759 .open = tracing_open_generic,
3760 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003761 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003762};
3763
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003764static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003765{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003766 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003767
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003768 if (*pos || m->count)
3769 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003770
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003771 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003772
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003773 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3774 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003775 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003776 continue;
3777
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003778 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003779 }
3780
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003781 return NULL;
3782}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003783
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003784static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3785{
3786 void *v;
3787 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003788
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003789 preempt_disable();
3790 arch_spin_lock(&trace_cmdline_lock);
3791
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003792 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003793 while (l <= *pos) {
3794 v = saved_cmdlines_next(m, v, &l);
3795 if (!v)
3796 return NULL;
3797 }
3798
3799 return v;
3800}
3801
3802static void saved_cmdlines_stop(struct seq_file *m, void *v)
3803{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003804 arch_spin_unlock(&trace_cmdline_lock);
3805 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003806}
3807
3808static int saved_cmdlines_show(struct seq_file *m, void *v)
3809{
3810 char buf[TASK_COMM_LEN];
3811 unsigned int *pid = v;
3812
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003813 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003814 seq_printf(m, "%d %s\n", *pid, buf);
3815 return 0;
3816}
3817
3818static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3819 .start = saved_cmdlines_start,
3820 .next = saved_cmdlines_next,
3821 .stop = saved_cmdlines_stop,
3822 .show = saved_cmdlines_show,
3823};
3824
3825static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3826{
3827 if (tracing_disabled)
3828 return -ENODEV;
3829
3830 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003831}
3832
3833static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003834 .open = tracing_saved_cmdlines_open,
3835 .read = seq_read,
3836 .llseek = seq_lseek,
3837 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003838};
3839
3840static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003841tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3842 size_t cnt, loff_t *ppos)
3843{
3844 char buf[64];
3845 int r;
3846
3847 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003848 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003849 arch_spin_unlock(&trace_cmdline_lock);
3850
3851 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3852}
3853
3854static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3855{
3856 kfree(s->saved_cmdlines);
3857 kfree(s->map_cmdline_to_pid);
3858 kfree(s);
3859}
3860
3861static int tracing_resize_saved_cmdlines(unsigned int val)
3862{
3863 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3864
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003865 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003866 if (!s)
3867 return -ENOMEM;
3868
3869 if (allocate_cmdlines_buffer(val, s) < 0) {
3870 kfree(s);
3871 return -ENOMEM;
3872 }
3873
3874 arch_spin_lock(&trace_cmdline_lock);
3875 savedcmd_temp = savedcmd;
3876 savedcmd = s;
3877 arch_spin_unlock(&trace_cmdline_lock);
3878 free_saved_cmdlines_buffer(savedcmd_temp);
3879
3880 return 0;
3881}
3882
3883static ssize_t
3884tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3885 size_t cnt, loff_t *ppos)
3886{
3887 unsigned long val;
3888 int ret;
3889
3890 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3891 if (ret)
3892 return ret;
3893
3894 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3895 if (!val || val > PID_MAX_DEFAULT)
3896 return -EINVAL;
3897
3898 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3899 if (ret < 0)
3900 return ret;
3901
3902 *ppos += cnt;
3903
3904 return cnt;
3905}
3906
3907static const struct file_operations tracing_saved_cmdlines_size_fops = {
3908 .open = tracing_open_generic,
3909 .read = tracing_saved_cmdlines_size_read,
3910 .write = tracing_saved_cmdlines_size_write,
3911};
3912
3913static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003914tracing_set_trace_read(struct file *filp, char __user *ubuf,
3915 size_t cnt, loff_t *ppos)
3916{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003917 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003918 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003919 int r;
3920
3921 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003922 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003923 mutex_unlock(&trace_types_lock);
3924
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003925 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003926}
3927
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003928int tracer_init(struct tracer *t, struct trace_array *tr)
3929{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003930 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003931 return t->init(tr);
3932}
3933
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003934static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003935{
3936 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003937
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003938 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003939 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003940}
3941
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003942#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003943/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003944static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3945 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003946{
3947 int cpu, ret = 0;
3948
3949 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3950 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003951 ret = ring_buffer_resize(trace_buf->buffer,
3952 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003953 if (ret < 0)
3954 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003955 per_cpu_ptr(trace_buf->data, cpu)->entries =
3956 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003957 }
3958 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003959 ret = ring_buffer_resize(trace_buf->buffer,
3960 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003961 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003962 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3963 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003964 }
3965
3966 return ret;
3967}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003968#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003969
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003970static int __tracing_resize_ring_buffer(struct trace_array *tr,
3971 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003972{
3973 int ret;
3974
3975 /*
3976 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003977 * we use the size that was given, and we can forget about
3978 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003979 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003980 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003981
Steven Rostedtb382ede62012-10-10 21:44:34 -04003982 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003983 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003984 return 0;
3985
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003986 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003987 if (ret < 0)
3988 return ret;
3989
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003990#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003991 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3992 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003993 goto out;
3994
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003995 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003996 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003997 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3998 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003999 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004000 /*
4001 * AARGH! We are left with different
4002 * size max buffer!!!!
4003 * The max buffer is our "snapshot" buffer.
4004 * When a tracer needs a snapshot (one of the
4005 * latency tracers), it swaps the max buffer
4006 * with the saved snap shot. We succeeded to
4007 * update the size of the main buffer, but failed to
4008 * update the size of the max buffer. But when we tried
4009 * to reset the main buffer to the original size, we
4010 * failed there too. This is very unlikely to
4011 * happen, but if it does, warn and kill all
4012 * tracing.
4013 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004014 WARN_ON(1);
4015 tracing_disabled = 1;
4016 }
4017 return ret;
4018 }
4019
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004020 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004021 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004022 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004023 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004024
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004025 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004026#endif /* CONFIG_TRACER_MAX_TRACE */
4027
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004028 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004029 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004030 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004031 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004032
4033 return ret;
4034}
4035
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004036static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4037 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004038{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004039 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004040
4041 mutex_lock(&trace_types_lock);
4042
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004043 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4044 /* make sure, this cpu is enabled in the mask */
4045 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4046 ret = -EINVAL;
4047 goto out;
4048 }
4049 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004050
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004051 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004052 if (ret < 0)
4053 ret = -ENOMEM;
4054
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004055out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004056 mutex_unlock(&trace_types_lock);
4057
4058 return ret;
4059}
4060
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004061
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004062/**
4063 * tracing_update_buffers - used by tracing facility to expand ring buffers
4064 *
4065 * To save on memory when the tracing is never used on a system with it
4066 * configured in. The ring buffers are set to a minimum size. But once
4067 * a user starts to use the tracing facility, then they need to grow
4068 * to their default size.
4069 *
4070 * This function is to be called when a tracer is about to be used.
4071 */
4072int tracing_update_buffers(void)
4073{
4074 int ret = 0;
4075
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004076 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004077 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004078 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004079 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004080 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004081
4082 return ret;
4083}
4084
Steven Rostedt577b7852009-02-26 23:43:05 -05004085struct trace_option_dentry;
4086
4087static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004088create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004089
4090static void
4091destroy_trace_option_files(struct trace_option_dentry *topts);
4092
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004093/*
4094 * Used to clear out the tracer before deletion of an instance.
4095 * Must have trace_types_lock held.
4096 */
4097static void tracing_set_nop(struct trace_array *tr)
4098{
4099 if (tr->current_trace == &nop_trace)
4100 return;
4101
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004102 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004103
4104 if (tr->current_trace->reset)
4105 tr->current_trace->reset(tr);
4106
4107 tr->current_trace = &nop_trace;
4108}
4109
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004110static void update_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004111{
Steven Rostedt577b7852009-02-26 23:43:05 -05004112 static struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004113
4114 /* Only enable if the directory has been created already. */
4115 if (!tr->dir)
4116 return;
4117
4118 /* Currently, only the top instance has options */
4119 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
4120 return;
4121
4122 destroy_trace_option_files(topts);
4123 topts = create_trace_option_files(tr, t);
4124}
4125
4126static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4127{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004128 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004129#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004130 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004131#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004132 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004133
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004134 mutex_lock(&trace_types_lock);
4135
Steven Rostedt73c51622009-03-11 13:42:01 -04004136 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004137 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004138 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004139 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004140 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004141 ret = 0;
4142 }
4143
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004144 for (t = trace_types; t; t = t->next) {
4145 if (strcmp(t->name, buf) == 0)
4146 break;
4147 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004148 if (!t) {
4149 ret = -EINVAL;
4150 goto out;
4151 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004152 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004153 goto out;
4154
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004155 /* Some tracers are only allowed for the top level buffer */
4156 if (!trace_ok_for_array(t, tr)) {
4157 ret = -EINVAL;
4158 goto out;
4159 }
4160
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004161 /* If trace pipe files are being read, we can't change the tracer */
4162 if (tr->current_trace->ref) {
4163 ret = -EBUSY;
4164 goto out;
4165 }
4166
Steven Rostedt9f029e82008-11-12 15:24:24 -05004167 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004168
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004169 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004170
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004171 if (tr->current_trace->reset)
4172 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004173
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004174 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004175 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004176
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004177#ifdef CONFIG_TRACER_MAX_TRACE
4178 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004179
4180 if (had_max_tr && !t->use_max_tr) {
4181 /*
4182 * We need to make sure that the update_max_tr sees that
4183 * current_trace changed to nop_trace to keep it from
4184 * swapping the buffers after we resize it.
4185 * The update_max_tr is called from interrupts disabled
4186 * so a synchronized_sched() is sufficient.
4187 */
4188 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004189 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004190 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004191#endif
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004192 update_tracer_options(tr, t);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004193
4194#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004195 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004196 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004197 if (ret < 0)
4198 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004199 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004200#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004201
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004202 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004203 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004204 if (ret)
4205 goto out;
4206 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004207
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004208 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004209 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004210 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004211 out:
4212 mutex_unlock(&trace_types_lock);
4213
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004214 return ret;
4215}
4216
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004217static ssize_t
4218tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4219 size_t cnt, loff_t *ppos)
4220{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004221 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004222 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004223 int i;
4224 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004225 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004226
Steven Rostedt60063a62008-10-28 10:44:24 -04004227 ret = cnt;
4228
Li Zefanee6c2c12009-09-18 14:06:47 +08004229 if (cnt > MAX_TRACER_SIZE)
4230 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004231
4232 if (copy_from_user(&buf, ubuf, cnt))
4233 return -EFAULT;
4234
4235 buf[cnt] = 0;
4236
4237 /* strip ending whitespace. */
4238 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4239 buf[i] = 0;
4240
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004241 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004242 if (err)
4243 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004244
Jiri Olsacf8517c2009-10-23 19:36:16 -04004245 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004246
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004247 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004248}
4249
4250static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004251tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4252 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004253{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004254 char buf[64];
4255 int r;
4256
Steven Rostedtcffae432008-05-12 21:21:00 +02004257 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004258 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004259 if (r > sizeof(buf))
4260 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004261 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004262}
4263
4264static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004265tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4266 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004267{
Hannes Eder5e398412009-02-10 19:44:34 +01004268 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004269 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004270
Peter Huewe22fe9b52011-06-07 21:58:27 +02004271 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4272 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004273 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004274
4275 *ptr = val * 1000;
4276
4277 return cnt;
4278}
4279
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004280static ssize_t
4281tracing_thresh_read(struct file *filp, char __user *ubuf,
4282 size_t cnt, loff_t *ppos)
4283{
4284 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4285}
4286
4287static ssize_t
4288tracing_thresh_write(struct file *filp, const char __user *ubuf,
4289 size_t cnt, loff_t *ppos)
4290{
4291 struct trace_array *tr = filp->private_data;
4292 int ret;
4293
4294 mutex_lock(&trace_types_lock);
4295 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4296 if (ret < 0)
4297 goto out;
4298
4299 if (tr->current_trace->update_thresh) {
4300 ret = tr->current_trace->update_thresh(tr);
4301 if (ret < 0)
4302 goto out;
4303 }
4304
4305 ret = cnt;
4306out:
4307 mutex_unlock(&trace_types_lock);
4308
4309 return ret;
4310}
4311
4312static ssize_t
4313tracing_max_lat_read(struct file *filp, char __user *ubuf,
4314 size_t cnt, loff_t *ppos)
4315{
4316 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4317}
4318
4319static ssize_t
4320tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4321 size_t cnt, loff_t *ppos)
4322{
4323 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4324}
4325
Steven Rostedtb3806b42008-05-12 21:20:46 +02004326static int tracing_open_pipe(struct inode *inode, struct file *filp)
4327{
Oleg Nesterov15544202013-07-23 17:25:57 +02004328 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004329 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004330 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004331
4332 if (tracing_disabled)
4333 return -ENODEV;
4334
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004335 if (trace_array_get(tr) < 0)
4336 return -ENODEV;
4337
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004338 mutex_lock(&trace_types_lock);
4339
Steven Rostedtb3806b42008-05-12 21:20:46 +02004340 /* create a buffer to store the information to pass to userspace */
4341 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004342 if (!iter) {
4343 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004344 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004345 goto out;
4346 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004347
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004348 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004349 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004350
4351 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4352 ret = -ENOMEM;
4353 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304354 }
4355
Steven Rostedta3097202008-11-07 22:36:02 -05004356 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304357 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004358
Steven Rostedt112f38a72009-06-01 15:16:05 -04004359 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4360 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4361
David Sharp8be07092012-11-13 12:18:22 -08004362 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004363 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004364 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4365
Oleg Nesterov15544202013-07-23 17:25:57 +02004366 iter->tr = tr;
4367 iter->trace_buffer = &tr->trace_buffer;
4368 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004369 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004370 filp->private_data = iter;
4371
Steven Rostedt107bad82008-05-12 21:21:01 +02004372 if (iter->trace->pipe_open)
4373 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004374
Arnd Bergmannb4447862010-07-07 23:40:11 +02004375 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004376
4377 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004378out:
4379 mutex_unlock(&trace_types_lock);
4380 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004381
4382fail:
4383 kfree(iter->trace);
4384 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004385 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004386 mutex_unlock(&trace_types_lock);
4387 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004388}
4389
4390static int tracing_release_pipe(struct inode *inode, struct file *file)
4391{
4392 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004393 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004394
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004395 mutex_lock(&trace_types_lock);
4396
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004397 tr->current_trace->ref--;
4398
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004399 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004400 iter->trace->pipe_close(iter);
4401
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004402 mutex_unlock(&trace_types_lock);
4403
Rusty Russell44623442009-01-01 10:12:23 +10304404 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004405 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004406 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004407
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004408 trace_array_put(tr);
4409
Steven Rostedtb3806b42008-05-12 21:20:46 +02004410 return 0;
4411}
4412
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004413static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004414trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004415{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004416 /* Iterators are static, they should be filled or empty */
4417 if (trace_buffer_iter(iter, iter->cpu_file))
4418 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004419
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004420 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004421 /*
4422 * Always select as readable when in blocking mode
4423 */
4424 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004425 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004426 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004427 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004428}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004429
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004430static unsigned int
4431tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4432{
4433 struct trace_iterator *iter = filp->private_data;
4434
4435 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004436}
4437
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004438/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004439static int tracing_wait_pipe(struct file *filp)
4440{
4441 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004442 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004443
4444 while (trace_empty(iter)) {
4445
4446 if ((filp->f_flags & O_NONBLOCK)) {
4447 return -EAGAIN;
4448 }
4449
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004450 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004451 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004452 * We still block if tracing is disabled, but we have never
4453 * read anything. This allows a user to cat this file, and
4454 * then enable tracing. But after we have read something,
4455 * we give an EOF when tracing is again disabled.
4456 *
4457 * iter->pos will be 0 if we haven't read anything.
4458 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004459 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004460 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004461
4462 mutex_unlock(&iter->mutex);
4463
Rabin Vincente30f53a2014-11-10 19:46:34 +01004464 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004465
4466 mutex_lock(&iter->mutex);
4467
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004468 if (ret)
4469 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004470 }
4471
4472 return 1;
4473}
4474
Steven Rostedtb3806b42008-05-12 21:20:46 +02004475/*
4476 * Consumer reader.
4477 */
4478static ssize_t
4479tracing_read_pipe(struct file *filp, char __user *ubuf,
4480 size_t cnt, loff_t *ppos)
4481{
4482 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004483 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004484
4485 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004486 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4487 if (sret != -EBUSY)
4488 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004489
Steven Rostedtf9520752009-03-02 14:04:40 -05004490 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004491
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004492 /*
4493 * Avoid more than one consumer on a single file descriptor
4494 * This is just a matter of traces coherency, the ring buffer itself
4495 * is protected.
4496 */
4497 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004498 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004499 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4500 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004501 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004502 }
4503
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004504waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004505 sret = tracing_wait_pipe(filp);
4506 if (sret <= 0)
4507 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004508
4509 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004510 if (trace_empty(iter)) {
4511 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004512 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004513 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004514
4515 if (cnt >= PAGE_SIZE)
4516 cnt = PAGE_SIZE - 1;
4517
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004518 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004519 memset(&iter->seq, 0,
4520 sizeof(struct trace_iterator) -
4521 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004522 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004523 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004524
Lai Jiangshan4f535962009-05-18 19:35:34 +08004525 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004526 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004527 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004528 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004529 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004530
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004531 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004532 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004533 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004534 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004535 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004536 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004537 if (ret != TRACE_TYPE_NO_CONSUME)
4538 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004539
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004540 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004541 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004542
4543 /*
4544 * Setting the full flag means we reached the trace_seq buffer
4545 * size and we should leave by partial output condition above.
4546 * One of the trace_seq_* functions is not used properly.
4547 */
4548 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4549 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004550 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004551 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004552 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004553
Steven Rostedtb3806b42008-05-12 21:20:46 +02004554 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004555 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004556 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004557 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004558
4559 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004560 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004561 * entries, go back to wait for more entries.
4562 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004563 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004564 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004565
Steven Rostedt107bad82008-05-12 21:21:01 +02004566out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004567 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004568
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004569 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004570}
4571
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004572static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4573 unsigned int idx)
4574{
4575 __free_page(spd->pages[idx]);
4576}
4577
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004578static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004579 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004580 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004581 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004582 .steal = generic_pipe_buf_steal,
4583 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004584};
4585
Steven Rostedt34cd4992009-02-09 12:06:29 -05004586static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004587tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004588{
4589 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004590 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004591 int ret;
4592
4593 /* Seq buffer is page-sized, exactly what we need. */
4594 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004595 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004596 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004597
4598 if (trace_seq_has_overflowed(&iter->seq)) {
4599 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004600 break;
4601 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004602
4603 /*
4604 * This should not be hit, because it should only
4605 * be set if the iter->seq overflowed. But check it
4606 * anyway to be safe.
4607 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05004608 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004609 iter->seq.seq.len = save_len;
4610 break;
4611 }
4612
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004613 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004614 if (rem < count) {
4615 rem = 0;
4616 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004617 break;
4618 }
4619
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004620 if (ret != TRACE_TYPE_NO_CONSUME)
4621 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004622 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004623 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004624 rem = 0;
4625 iter->ent = NULL;
4626 break;
4627 }
4628 }
4629
4630 return rem;
4631}
4632
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004633static ssize_t tracing_splice_read_pipe(struct file *filp,
4634 loff_t *ppos,
4635 struct pipe_inode_info *pipe,
4636 size_t len,
4637 unsigned int flags)
4638{
Jens Axboe35f3d142010-05-20 10:43:18 +02004639 struct page *pages_def[PIPE_DEF_BUFFERS];
4640 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004641 struct trace_iterator *iter = filp->private_data;
4642 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004643 .pages = pages_def,
4644 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004645 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004646 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004647 .flags = flags,
4648 .ops = &tracing_pipe_buf_ops,
4649 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004650 };
4651 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004652 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004653 unsigned int i;
4654
Jens Axboe35f3d142010-05-20 10:43:18 +02004655 if (splice_grow_spd(pipe, &spd))
4656 return -ENOMEM;
4657
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004658 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004659
4660 if (iter->trace->splice_read) {
4661 ret = iter->trace->splice_read(iter, filp,
4662 ppos, pipe, len, flags);
4663 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004664 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004665 }
4666
4667 ret = tracing_wait_pipe(filp);
4668 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004669 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004670
Jason Wessel955b61e2010-08-05 09:22:23 -05004671 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004672 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004673 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004674 }
4675
Lai Jiangshan4f535962009-05-18 19:35:34 +08004676 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004677 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004678
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004679 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004680 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004681 spd.pages[i] = alloc_page(GFP_KERNEL);
4682 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004683 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004684
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004685 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004686
4687 /* Copy the data into the page, so we can start over. */
4688 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004689 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004690 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004691 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004692 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004693 break;
4694 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004695 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004696 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004697
Steven Rostedtf9520752009-03-02 14:04:40 -05004698 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004699 }
4700
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004701 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004702 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004703 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004704
4705 spd.nr_pages = i;
4706
Jens Axboe35f3d142010-05-20 10:43:18 +02004707 ret = splice_to_pipe(pipe, &spd);
4708out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004709 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004710 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004711
Steven Rostedt34cd4992009-02-09 12:06:29 -05004712out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004713 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004714 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004715}
4716
Steven Rostedta98a3c32008-05-12 21:20:59 +02004717static ssize_t
4718tracing_entries_read(struct file *filp, char __user *ubuf,
4719 size_t cnt, loff_t *ppos)
4720{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004721 struct inode *inode = file_inode(filp);
4722 struct trace_array *tr = inode->i_private;
4723 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004724 char buf[64];
4725 int r = 0;
4726 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004727
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004728 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004729
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004730 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004731 int cpu, buf_size_same;
4732 unsigned long size;
4733
4734 size = 0;
4735 buf_size_same = 1;
4736 /* check if all cpu sizes are same */
4737 for_each_tracing_cpu(cpu) {
4738 /* fill in the size from first enabled cpu */
4739 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004740 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4741 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004742 buf_size_same = 0;
4743 break;
4744 }
4745 }
4746
4747 if (buf_size_same) {
4748 if (!ring_buffer_expanded)
4749 r = sprintf(buf, "%lu (expanded: %lu)\n",
4750 size >> 10,
4751 trace_buf_size >> 10);
4752 else
4753 r = sprintf(buf, "%lu\n", size >> 10);
4754 } else
4755 r = sprintf(buf, "X\n");
4756 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004757 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004758
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004759 mutex_unlock(&trace_types_lock);
4760
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004761 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4762 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004763}
4764
4765static ssize_t
4766tracing_entries_write(struct file *filp, const char __user *ubuf,
4767 size_t cnt, loff_t *ppos)
4768{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004769 struct inode *inode = file_inode(filp);
4770 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004771 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004772 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004773
Peter Huewe22fe9b52011-06-07 21:58:27 +02004774 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4775 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004776 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004777
4778 /* must have at least 1 entry */
4779 if (!val)
4780 return -EINVAL;
4781
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004782 /* value is in KB */
4783 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004784 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004785 if (ret < 0)
4786 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004787
Jiri Olsacf8517c2009-10-23 19:36:16 -04004788 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004789
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004790 return cnt;
4791}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004792
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004793static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004794tracing_total_entries_read(struct file *filp, char __user *ubuf,
4795 size_t cnt, loff_t *ppos)
4796{
4797 struct trace_array *tr = filp->private_data;
4798 char buf[64];
4799 int r, cpu;
4800 unsigned long size = 0, expanded_size = 0;
4801
4802 mutex_lock(&trace_types_lock);
4803 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004804 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004805 if (!ring_buffer_expanded)
4806 expanded_size += trace_buf_size >> 10;
4807 }
4808 if (ring_buffer_expanded)
4809 r = sprintf(buf, "%lu\n", size);
4810 else
4811 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4812 mutex_unlock(&trace_types_lock);
4813
4814 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4815}
4816
4817static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004818tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4819 size_t cnt, loff_t *ppos)
4820{
4821 /*
4822 * There is no need to read what the user has written, this function
4823 * is just to make sure that there is no error when "echo" is used
4824 */
4825
4826 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004827
4828 return cnt;
4829}
4830
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004831static int
4832tracing_free_buffer_release(struct inode *inode, struct file *filp)
4833{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004834 struct trace_array *tr = inode->i_private;
4835
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004836 /* disable tracing ? */
4837 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004838 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004839 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004840 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004841
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004842 trace_array_put(tr);
4843
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004844 return 0;
4845}
4846
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004847static ssize_t
4848tracing_mark_write(struct file *filp, const char __user *ubuf,
4849 size_t cnt, loff_t *fpos)
4850{
Steven Rostedtd696b582011-09-22 11:50:27 -04004851 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004852 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004853 struct ring_buffer_event *event;
4854 struct ring_buffer *buffer;
4855 struct print_entry *entry;
4856 unsigned long irq_flags;
4857 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004858 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004859 int nr_pages = 1;
4860 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004861 int offset;
4862 int size;
4863 int len;
4864 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004865 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004866
Steven Rostedtc76f0692008-11-07 22:36:02 -05004867 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004868 return -EINVAL;
4869
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004870 if (!(trace_flags & TRACE_ITER_MARKERS))
4871 return -EINVAL;
4872
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004873 if (cnt > TRACE_BUF_SIZE)
4874 cnt = TRACE_BUF_SIZE;
4875
Steven Rostedtd696b582011-09-22 11:50:27 -04004876 /*
4877 * Userspace is injecting traces into the kernel trace buffer.
4878 * We want to be as non intrusive as possible.
4879 * To do so, we do not want to allocate any special buffers
4880 * or take any locks, but instead write the userspace data
4881 * straight into the ring buffer.
4882 *
4883 * First we need to pin the userspace buffer into memory,
4884 * which, most likely it is, because it just referenced it.
4885 * But there's no guarantee that it is. By using get_user_pages_fast()
4886 * and kmap_atomic/kunmap_atomic() we can get access to the
4887 * pages directly. We then write the data directly into the
4888 * ring buffer.
4889 */
4890 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004891
Steven Rostedtd696b582011-09-22 11:50:27 -04004892 /* check if we cross pages */
4893 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4894 nr_pages = 2;
4895
4896 offset = addr & (PAGE_SIZE - 1);
4897 addr &= PAGE_MASK;
4898
4899 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4900 if (ret < nr_pages) {
4901 while (--ret >= 0)
4902 put_page(pages[ret]);
4903 written = -EFAULT;
4904 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004905 }
4906
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004907 for (i = 0; i < nr_pages; i++)
4908 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004909
4910 local_save_flags(irq_flags);
4911 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004912 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004913 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4914 irq_flags, preempt_count());
4915 if (!event) {
4916 /* Ring buffer disabled, return as if not open for write */
4917 written = -EBADF;
4918 goto out_unlock;
4919 }
4920
4921 entry = ring_buffer_event_data(event);
4922 entry->ip = _THIS_IP_;
4923
4924 if (nr_pages == 2) {
4925 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004926 memcpy(&entry->buf, map_page[0] + offset, len);
4927 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004928 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004929 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004930
4931 if (entry->buf[cnt - 1] != '\n') {
4932 entry->buf[cnt] = '\n';
4933 entry->buf[cnt + 1] = '\0';
4934 } else
4935 entry->buf[cnt] = '\0';
4936
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004937 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004938
4939 written = cnt;
4940
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004941 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004942
Steven Rostedtd696b582011-09-22 11:50:27 -04004943 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004944 for (i = 0; i < nr_pages; i++){
4945 kunmap_atomic(map_page[i]);
4946 put_page(pages[i]);
4947 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004948 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004949 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004950}
4951
Li Zefan13f16d22009-12-08 11:16:11 +08004952static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004953{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004954 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004955 int i;
4956
4957 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004958 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004959 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004960 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4961 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004962 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004963
Li Zefan13f16d22009-12-08 11:16:11 +08004964 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004965}
4966
Steven Rostedte1e232c2014-02-10 23:38:46 -05004967static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004968{
Zhaolei5079f322009-08-25 16:12:56 +08004969 int i;
4970
Zhaolei5079f322009-08-25 16:12:56 +08004971 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4972 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4973 break;
4974 }
4975 if (i == ARRAY_SIZE(trace_clocks))
4976 return -EINVAL;
4977
Zhaolei5079f322009-08-25 16:12:56 +08004978 mutex_lock(&trace_types_lock);
4979
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004980 tr->clock_id = i;
4981
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004982 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004983
David Sharp60303ed2012-10-11 16:27:52 -07004984 /*
4985 * New clock may not be consistent with the previous clock.
4986 * Reset the buffer so that it doesn't have incomparable timestamps.
4987 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004988 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004989
4990#ifdef CONFIG_TRACER_MAX_TRACE
4991 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4992 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004993 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004994#endif
David Sharp60303ed2012-10-11 16:27:52 -07004995
Zhaolei5079f322009-08-25 16:12:56 +08004996 mutex_unlock(&trace_types_lock);
4997
Steven Rostedte1e232c2014-02-10 23:38:46 -05004998 return 0;
4999}
5000
5001static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5002 size_t cnt, loff_t *fpos)
5003{
5004 struct seq_file *m = filp->private_data;
5005 struct trace_array *tr = m->private;
5006 char buf[64];
5007 const char *clockstr;
5008 int ret;
5009
5010 if (cnt >= sizeof(buf))
5011 return -EINVAL;
5012
5013 if (copy_from_user(&buf, ubuf, cnt))
5014 return -EFAULT;
5015
5016 buf[cnt] = 0;
5017
5018 clockstr = strstrip(buf);
5019
5020 ret = tracing_set_clock(tr, clockstr);
5021 if (ret)
5022 return ret;
5023
Zhaolei5079f322009-08-25 16:12:56 +08005024 *fpos += cnt;
5025
5026 return cnt;
5027}
5028
Li Zefan13f16d22009-12-08 11:16:11 +08005029static int tracing_clock_open(struct inode *inode, struct file *file)
5030{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005031 struct trace_array *tr = inode->i_private;
5032 int ret;
5033
Li Zefan13f16d22009-12-08 11:16:11 +08005034 if (tracing_disabled)
5035 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005036
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005037 if (trace_array_get(tr))
5038 return -ENODEV;
5039
5040 ret = single_open(file, tracing_clock_show, inode->i_private);
5041 if (ret < 0)
5042 trace_array_put(tr);
5043
5044 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005045}
5046
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005047struct ftrace_buffer_info {
5048 struct trace_iterator iter;
5049 void *spare;
5050 unsigned int read;
5051};
5052
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005053#ifdef CONFIG_TRACER_SNAPSHOT
5054static int tracing_snapshot_open(struct inode *inode, struct file *file)
5055{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005056 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005057 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005058 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005059 int ret = 0;
5060
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005061 if (trace_array_get(tr) < 0)
5062 return -ENODEV;
5063
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005064 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005065 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005066 if (IS_ERR(iter))
5067 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005068 } else {
5069 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005070 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005071 m = kzalloc(sizeof(*m), GFP_KERNEL);
5072 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005073 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005074 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5075 if (!iter) {
5076 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005077 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005078 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005079 ret = 0;
5080
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005081 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005082 iter->trace_buffer = &tr->max_buffer;
5083 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005084 m->private = iter;
5085 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005086 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005087out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005088 if (ret < 0)
5089 trace_array_put(tr);
5090
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005091 return ret;
5092}
5093
5094static ssize_t
5095tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5096 loff_t *ppos)
5097{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005098 struct seq_file *m = filp->private_data;
5099 struct trace_iterator *iter = m->private;
5100 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005101 unsigned long val;
5102 int ret;
5103
5104 ret = tracing_update_buffers();
5105 if (ret < 0)
5106 return ret;
5107
5108 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5109 if (ret)
5110 return ret;
5111
5112 mutex_lock(&trace_types_lock);
5113
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005114 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005115 ret = -EBUSY;
5116 goto out;
5117 }
5118
5119 switch (val) {
5120 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005121 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5122 ret = -EINVAL;
5123 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005124 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005125 if (tr->allocated_snapshot)
5126 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005127 break;
5128 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005129/* Only allow per-cpu swap if the ring buffer supports it */
5130#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5131 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5132 ret = -EINVAL;
5133 break;
5134 }
5135#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005136 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005137 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005138 if (ret < 0)
5139 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005140 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005141 local_irq_disable();
5142 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005143 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005144 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005145 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005146 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005147 local_irq_enable();
5148 break;
5149 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005150 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005151 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5152 tracing_reset_online_cpus(&tr->max_buffer);
5153 else
5154 tracing_reset(&tr->max_buffer, iter->cpu_file);
5155 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005156 break;
5157 }
5158
5159 if (ret >= 0) {
5160 *ppos += cnt;
5161 ret = cnt;
5162 }
5163out:
5164 mutex_unlock(&trace_types_lock);
5165 return ret;
5166}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005167
5168static int tracing_snapshot_release(struct inode *inode, struct file *file)
5169{
5170 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005171 int ret;
5172
5173 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005174
5175 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005176 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005177
5178 /* If write only, the seq_file is just a stub */
5179 if (m)
5180 kfree(m->private);
5181 kfree(m);
5182
5183 return 0;
5184}
5185
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005186static int tracing_buffers_open(struct inode *inode, struct file *filp);
5187static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5188 size_t count, loff_t *ppos);
5189static int tracing_buffers_release(struct inode *inode, struct file *file);
5190static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5191 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5192
5193static int snapshot_raw_open(struct inode *inode, struct file *filp)
5194{
5195 struct ftrace_buffer_info *info;
5196 int ret;
5197
5198 ret = tracing_buffers_open(inode, filp);
5199 if (ret < 0)
5200 return ret;
5201
5202 info = filp->private_data;
5203
5204 if (info->iter.trace->use_max_tr) {
5205 tracing_buffers_release(inode, filp);
5206 return -EBUSY;
5207 }
5208
5209 info->iter.snapshot = true;
5210 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5211
5212 return ret;
5213}
5214
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005215#endif /* CONFIG_TRACER_SNAPSHOT */
5216
5217
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005218static const struct file_operations tracing_thresh_fops = {
5219 .open = tracing_open_generic,
5220 .read = tracing_thresh_read,
5221 .write = tracing_thresh_write,
5222 .llseek = generic_file_llseek,
5223};
5224
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005225static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005226 .open = tracing_open_generic,
5227 .read = tracing_max_lat_read,
5228 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005229 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005230};
5231
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005232static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005233 .open = tracing_open_generic,
5234 .read = tracing_set_trace_read,
5235 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005236 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005237};
5238
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005239static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005240 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005241 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005242 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005243 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005244 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005245 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005246};
5247
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005248static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005249 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005250 .read = tracing_entries_read,
5251 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005252 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005253 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005254};
5255
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005256static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005257 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005258 .read = tracing_total_entries_read,
5259 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005260 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005261};
5262
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005263static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005264 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005265 .write = tracing_free_buffer_write,
5266 .release = tracing_free_buffer_release,
5267};
5268
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005269static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005270 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005271 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005272 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005273 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005274};
5275
Zhaolei5079f322009-08-25 16:12:56 +08005276static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005277 .open = tracing_clock_open,
5278 .read = seq_read,
5279 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005280 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005281 .write = tracing_clock_write,
5282};
5283
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005284#ifdef CONFIG_TRACER_SNAPSHOT
5285static const struct file_operations snapshot_fops = {
5286 .open = tracing_snapshot_open,
5287 .read = seq_read,
5288 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005289 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005290 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005291};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005292
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005293static const struct file_operations snapshot_raw_fops = {
5294 .open = snapshot_raw_open,
5295 .read = tracing_buffers_read,
5296 .release = tracing_buffers_release,
5297 .splice_read = tracing_buffers_splice_read,
5298 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005299};
5300
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005301#endif /* CONFIG_TRACER_SNAPSHOT */
5302
Steven Rostedt2cadf912008-12-01 22:20:19 -05005303static int tracing_buffers_open(struct inode *inode, struct file *filp)
5304{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005305 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005306 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005307 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005308
5309 if (tracing_disabled)
5310 return -ENODEV;
5311
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005312 if (trace_array_get(tr) < 0)
5313 return -ENODEV;
5314
Steven Rostedt2cadf912008-12-01 22:20:19 -05005315 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005316 if (!info) {
5317 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005318 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005319 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005320
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005321 mutex_lock(&trace_types_lock);
5322
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005323 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005324 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005325 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005326 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005327 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005328 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005329 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005330
5331 filp->private_data = info;
5332
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005333 tr->current_trace->ref++;
5334
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005335 mutex_unlock(&trace_types_lock);
5336
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005337 ret = nonseekable_open(inode, filp);
5338 if (ret < 0)
5339 trace_array_put(tr);
5340
5341 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005342}
5343
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005344static unsigned int
5345tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5346{
5347 struct ftrace_buffer_info *info = filp->private_data;
5348 struct trace_iterator *iter = &info->iter;
5349
5350 return trace_poll(iter, filp, poll_table);
5351}
5352
Steven Rostedt2cadf912008-12-01 22:20:19 -05005353static ssize_t
5354tracing_buffers_read(struct file *filp, char __user *ubuf,
5355 size_t count, loff_t *ppos)
5356{
5357 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005358 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005359 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005360 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005361
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005362 if (!count)
5363 return 0;
5364
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005365#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005366 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5367 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005368#endif
5369
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005370 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005371 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5372 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005373 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005374 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005375
Steven Rostedt2cadf912008-12-01 22:20:19 -05005376 /* Do we have previous read data to read? */
5377 if (info->read < PAGE_SIZE)
5378 goto read;
5379
Steven Rostedtb6273442013-02-28 13:44:11 -05005380 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005381 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005382 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005383 &info->spare,
5384 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005385 iter->cpu_file, 0);
5386 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005387
5388 if (ret < 0) {
5389 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005390 if ((filp->f_flags & O_NONBLOCK))
5391 return -EAGAIN;
5392
Rabin Vincente30f53a2014-11-10 19:46:34 +01005393 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005394 if (ret)
5395 return ret;
5396
Steven Rostedtb6273442013-02-28 13:44:11 -05005397 goto again;
5398 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005399 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005400 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005401
Steven Rostedt436fc282011-10-14 10:44:25 -04005402 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005403 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005404 size = PAGE_SIZE - info->read;
5405 if (size > count)
5406 size = count;
5407
5408 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005409 if (ret == size)
5410 return -EFAULT;
5411
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005412 size -= ret;
5413
Steven Rostedt2cadf912008-12-01 22:20:19 -05005414 *ppos += size;
5415 info->read += size;
5416
5417 return size;
5418}
5419
5420static int tracing_buffers_release(struct inode *inode, struct file *file)
5421{
5422 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005423 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005424
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005425 mutex_lock(&trace_types_lock);
5426
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005427 iter->tr->current_trace->ref--;
5428
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005429 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005430
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005431 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005432 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005433 kfree(info);
5434
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005435 mutex_unlock(&trace_types_lock);
5436
Steven Rostedt2cadf912008-12-01 22:20:19 -05005437 return 0;
5438}
5439
5440struct buffer_ref {
5441 struct ring_buffer *buffer;
5442 void *page;
5443 int ref;
5444};
5445
5446static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5447 struct pipe_buffer *buf)
5448{
5449 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5450
5451 if (--ref->ref)
5452 return;
5453
5454 ring_buffer_free_read_page(ref->buffer, ref->page);
5455 kfree(ref);
5456 buf->private = 0;
5457}
5458
Steven Rostedt2cadf912008-12-01 22:20:19 -05005459static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5460 struct pipe_buffer *buf)
5461{
5462 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5463
5464 ref->ref++;
5465}
5466
5467/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005468static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005469 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005470 .confirm = generic_pipe_buf_confirm,
5471 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005472 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005473 .get = buffer_pipe_buf_get,
5474};
5475
5476/*
5477 * Callback from splice_to_pipe(), if we need to release some pages
5478 * at the end of the spd in case we error'ed out in filling the pipe.
5479 */
5480static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5481{
5482 struct buffer_ref *ref =
5483 (struct buffer_ref *)spd->partial[i].private;
5484
5485 if (--ref->ref)
5486 return;
5487
5488 ring_buffer_free_read_page(ref->buffer, ref->page);
5489 kfree(ref);
5490 spd->partial[i].private = 0;
5491}
5492
5493static ssize_t
5494tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5495 struct pipe_inode_info *pipe, size_t len,
5496 unsigned int flags)
5497{
5498 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005499 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005500 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5501 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005502 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005503 .pages = pages_def,
5504 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005505 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005506 .flags = flags,
5507 .ops = &buffer_pipe_buf_ops,
5508 .spd_release = buffer_spd_release,
5509 };
5510 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005511 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01005512 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005513
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005514#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005515 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5516 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005517#endif
5518
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005519 if (splice_grow_spd(pipe, &spd))
5520 return -ENOMEM;
Jens Axboe35f3d142010-05-20 10:43:18 +02005521
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005522 if (*ppos & (PAGE_SIZE - 1))
5523 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005524
5525 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005526 if (len < PAGE_SIZE)
5527 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005528 len &= PAGE_MASK;
5529 }
5530
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005531 again:
5532 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005533 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005534
Al Viroa786c062014-04-11 12:01:03 -04005535 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005536 struct page *page;
5537 int r;
5538
5539 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01005540 if (!ref) {
5541 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005542 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01005543 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005544
Steven Rostedt7267fa62009-04-29 00:16:21 -04005545 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005546 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005547 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005548 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005549 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005550 kfree(ref);
5551 break;
5552 }
5553
5554 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005555 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005556 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005557 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005558 kfree(ref);
5559 break;
5560 }
5561
5562 /*
5563 * zero out any left over data, this is going to
5564 * user land.
5565 */
5566 size = ring_buffer_page_len(ref->page);
5567 if (size < PAGE_SIZE)
5568 memset(ref->page + size, 0, PAGE_SIZE - size);
5569
5570 page = virt_to_page(ref->page);
5571
5572 spd.pages[i] = page;
5573 spd.partial[i].len = PAGE_SIZE;
5574 spd.partial[i].offset = 0;
5575 spd.partial[i].private = (unsigned long)ref;
5576 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005577 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005578
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005579 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005580 }
5581
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005582 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005583 spd.nr_pages = i;
5584
5585 /* did we read anything? */
5586 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005587 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005588 return ret;
Rabin Vincent07906da2014-11-06 22:26:07 +01005589
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005590 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5591 return -EAGAIN;
5592
Rabin Vincente30f53a2014-11-10 19:46:34 +01005593 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005594 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005595 return ret;
Rabin Vincente30f53a2014-11-10 19:46:34 +01005596
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005597 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005598 }
5599
5600 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005601 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005602
Steven Rostedt2cadf912008-12-01 22:20:19 -05005603 return ret;
5604}
5605
5606static const struct file_operations tracing_buffers_fops = {
5607 .open = tracing_buffers_open,
5608 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005609 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005610 .release = tracing_buffers_release,
5611 .splice_read = tracing_buffers_splice_read,
5612 .llseek = no_llseek,
5613};
5614
Steven Rostedtc8d77182009-04-29 18:03:45 -04005615static ssize_t
5616tracing_stats_read(struct file *filp, char __user *ubuf,
5617 size_t count, loff_t *ppos)
5618{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005619 struct inode *inode = file_inode(filp);
5620 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005621 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005622 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005623 struct trace_seq *s;
5624 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005625 unsigned long long t;
5626 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005627
Li Zefane4f2d102009-06-15 10:57:28 +08005628 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005629 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005630 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005631
5632 trace_seq_init(s);
5633
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005634 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005635 trace_seq_printf(s, "entries: %ld\n", cnt);
5636
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005637 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005638 trace_seq_printf(s, "overrun: %ld\n", cnt);
5639
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005640 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005641 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5642
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005643 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005644 trace_seq_printf(s, "bytes: %ld\n", cnt);
5645
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005646 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005647 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005648 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005649 usec_rem = do_div(t, USEC_PER_SEC);
5650 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5651 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005652
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005653 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005654 usec_rem = do_div(t, USEC_PER_SEC);
5655 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5656 } else {
5657 /* counter or tsc mode for trace_clock */
5658 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005659 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005660
5661 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005662 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005663 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005664
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005665 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005666 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5667
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005668 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005669 trace_seq_printf(s, "read events: %ld\n", cnt);
5670
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005671 count = simple_read_from_buffer(ubuf, count, ppos,
5672 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04005673
5674 kfree(s);
5675
5676 return count;
5677}
5678
5679static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005680 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005681 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005682 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005683 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005684};
5685
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005686#ifdef CONFIG_DYNAMIC_FTRACE
5687
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005688int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005689{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005690 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005691}
5692
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005693static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005694tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005695 size_t cnt, loff_t *ppos)
5696{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005697 static char ftrace_dyn_info_buffer[1024];
5698 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005699 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005700 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005701 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005702 int r;
5703
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005704 mutex_lock(&dyn_info_mutex);
5705 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005706
Steven Rostedta26a2a22008-10-31 00:03:22 -04005707 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005708 buf[r++] = '\n';
5709
5710 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5711
5712 mutex_unlock(&dyn_info_mutex);
5713
5714 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005715}
5716
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005717static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005718 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005719 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005720 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005721};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005722#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005723
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005724#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5725static void
5726ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005727{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005728 tracing_snapshot();
5729}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005730
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005731static void
5732ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5733{
5734 unsigned long *count = (long *)data;
5735
5736 if (!*count)
5737 return;
5738
5739 if (*count != -1)
5740 (*count)--;
5741
5742 tracing_snapshot();
5743}
5744
5745static int
5746ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5747 struct ftrace_probe_ops *ops, void *data)
5748{
5749 long count = (long)data;
5750
5751 seq_printf(m, "%ps:", (void *)ip);
5752
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005753 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005754
5755 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005756 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005757 else
5758 seq_printf(m, ":count=%ld\n", count);
5759
5760 return 0;
5761}
5762
5763static struct ftrace_probe_ops snapshot_probe_ops = {
5764 .func = ftrace_snapshot,
5765 .print = ftrace_snapshot_print,
5766};
5767
5768static struct ftrace_probe_ops snapshot_count_probe_ops = {
5769 .func = ftrace_count_snapshot,
5770 .print = ftrace_snapshot_print,
5771};
5772
5773static int
5774ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5775 char *glob, char *cmd, char *param, int enable)
5776{
5777 struct ftrace_probe_ops *ops;
5778 void *count = (void *)-1;
5779 char *number;
5780 int ret;
5781
5782 /* hash funcs only work with set_ftrace_filter */
5783 if (!enable)
5784 return -EINVAL;
5785
5786 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5787
5788 if (glob[0] == '!') {
5789 unregister_ftrace_function_probe_func(glob+1, ops);
5790 return 0;
5791 }
5792
5793 if (!param)
5794 goto out_reg;
5795
5796 number = strsep(&param, ":");
5797
5798 if (!strlen(number))
5799 goto out_reg;
5800
5801 /*
5802 * We use the callback data field (which is a pointer)
5803 * as our counter.
5804 */
5805 ret = kstrtoul(number, 0, (unsigned long *)&count);
5806 if (ret)
5807 return ret;
5808
5809 out_reg:
5810 ret = register_ftrace_function_probe(glob, ops, count);
5811
5812 if (ret >= 0)
5813 alloc_snapshot(&global_trace);
5814
5815 return ret < 0 ? ret : 0;
5816}
5817
5818static struct ftrace_func_command ftrace_snapshot_cmd = {
5819 .name = "snapshot",
5820 .func = ftrace_trace_snapshot_callback,
5821};
5822
Tom Zanussi38de93a2013-10-24 08:34:18 -05005823static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005824{
5825 return register_ftrace_command(&ftrace_snapshot_cmd);
5826}
5827#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005828static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005829#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005830
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05005831static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005832{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05005833 if (WARN_ON(!tr->dir))
5834 return ERR_PTR(-ENODEV);
5835
5836 /* Top directory uses NULL as the parent */
5837 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5838 return NULL;
5839
5840 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005841 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005842}
5843
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005844static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5845{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005846 struct dentry *d_tracer;
5847
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005848 if (tr->percpu_dir)
5849 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005850
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05005851 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05005852 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005853 return NULL;
5854
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05005855 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005856
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005857 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05005858 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005859
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005860 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005861}
5862
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005863static struct dentry *
5864trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5865 void *data, long cpu, const struct file_operations *fops)
5866{
5867 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5868
5869 if (ret) /* See tracing_get_cpu() */
5870 ret->d_inode->i_cdev = (void *)(cpu + 1);
5871 return ret;
5872}
5873
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005874static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05005875tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005876{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005877 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005878 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005879 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005880
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005881 if (!d_percpu)
5882 return;
5883
Steven Rostedtdd49a382010-10-20 21:51:26 -04005884 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05005885 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005886 if (!d_cpu) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05005887 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005888 return;
5889 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005890
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005891 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005892 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005893 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005894
5895 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005896 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005897 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005898
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005899 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005900 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005901
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005902 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005903 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005904
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005905 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005906 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005907
5908#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005909 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005910 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005911
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005912 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005913 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005914#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005915}
5916
Steven Rostedt60a11772008-05-12 21:20:44 +02005917#ifdef CONFIG_FTRACE_SELFTEST
5918/* Let selftest have access to static functions in this file */
5919#include "trace_selftest.c"
5920#endif
5921
Steven Rostedt577b7852009-02-26 23:43:05 -05005922struct trace_option_dentry {
5923 struct tracer_opt *opt;
5924 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005925 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005926 struct dentry *entry;
5927};
5928
5929static ssize_t
5930trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5931 loff_t *ppos)
5932{
5933 struct trace_option_dentry *topt = filp->private_data;
5934 char *buf;
5935
5936 if (topt->flags->val & topt->opt->bit)
5937 buf = "1\n";
5938 else
5939 buf = "0\n";
5940
5941 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5942}
5943
5944static ssize_t
5945trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5946 loff_t *ppos)
5947{
5948 struct trace_option_dentry *topt = filp->private_data;
5949 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005950 int ret;
5951
Peter Huewe22fe9b52011-06-07 21:58:27 +02005952 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5953 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005954 return ret;
5955
Li Zefan8d18eaa2009-12-08 11:17:06 +08005956 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005957 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005958
5959 if (!!(topt->flags->val & topt->opt->bit) != val) {
5960 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005961 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005962 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005963 mutex_unlock(&trace_types_lock);
5964 if (ret)
5965 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005966 }
5967
5968 *ppos += cnt;
5969
5970 return cnt;
5971}
5972
5973
5974static const struct file_operations trace_options_fops = {
5975 .open = tracing_open_generic,
5976 .read = trace_options_read,
5977 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005978 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005979};
5980
Steven Rostedta8259072009-02-26 22:19:12 -05005981static ssize_t
5982trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5983 loff_t *ppos)
5984{
5985 long index = (long)filp->private_data;
5986 char *buf;
5987
5988 if (trace_flags & (1 << index))
5989 buf = "1\n";
5990 else
5991 buf = "0\n";
5992
5993 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5994}
5995
5996static ssize_t
5997trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5998 loff_t *ppos)
5999{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006000 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006001 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05006002 unsigned long val;
6003 int ret;
6004
Peter Huewe22fe9b52011-06-07 21:58:27 +02006005 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6006 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006007 return ret;
6008
Zhaoleif2d84b62009-08-07 18:55:48 +08006009 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006010 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006011
6012 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006013 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006014 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006015
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006016 if (ret < 0)
6017 return ret;
6018
Steven Rostedta8259072009-02-26 22:19:12 -05006019 *ppos += cnt;
6020
6021 return cnt;
6022}
6023
Steven Rostedta8259072009-02-26 22:19:12 -05006024static const struct file_operations trace_options_core_fops = {
6025 .open = tracing_open_generic,
6026 .read = trace_options_core_read,
6027 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006028 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006029};
6030
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006031struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006032 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006033 struct dentry *parent,
6034 void *data,
6035 const struct file_operations *fops)
6036{
6037 struct dentry *ret;
6038
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006039 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006040 if (!ret)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006041 pr_warning("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006042
6043 return ret;
6044}
6045
6046
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006047static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006048{
6049 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006050
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006051 if (tr->options)
6052 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006053
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006054 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006055 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006056 return NULL;
6057
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006058 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006059 if (!tr->options) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006060 pr_warning("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006061 return NULL;
6062 }
6063
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006064 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006065}
6066
Steven Rostedt577b7852009-02-26 23:43:05 -05006067static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006068create_trace_option_file(struct trace_array *tr,
6069 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006070 struct tracer_flags *flags,
6071 struct tracer_opt *opt)
6072{
6073 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006074
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006075 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006076 if (!t_options)
6077 return;
6078
6079 topt->flags = flags;
6080 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006081 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006082
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006083 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006084 &trace_options_fops);
6085
Steven Rostedt577b7852009-02-26 23:43:05 -05006086}
6087
6088static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006089create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006090{
6091 struct trace_option_dentry *topts;
6092 struct tracer_flags *flags;
6093 struct tracer_opt *opts;
6094 int cnt;
6095
6096 if (!tracer)
6097 return NULL;
6098
6099 flags = tracer->flags;
6100
6101 if (!flags || !flags->opts)
6102 return NULL;
6103
6104 opts = flags->opts;
6105
6106 for (cnt = 0; opts[cnt].name; cnt++)
6107 ;
6108
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006109 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006110 if (!topts)
6111 return NULL;
6112
6113 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006114 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006115 &opts[cnt]);
6116
6117 return topts;
6118}
6119
6120static void
6121destroy_trace_option_files(struct trace_option_dentry *topts)
6122{
6123 int cnt;
6124
6125 if (!topts)
6126 return;
6127
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006128 for (cnt = 0; topts[cnt].opt; cnt++)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006129 tracefs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006130
6131 kfree(topts);
6132}
6133
Steven Rostedta8259072009-02-26 22:19:12 -05006134static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006135create_trace_option_core_file(struct trace_array *tr,
6136 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006137{
6138 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006139
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006140 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006141 if (!t_options)
6142 return NULL;
6143
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006144 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006145 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006146}
6147
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006148static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006149{
6150 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006151 int i;
6152
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006153 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006154 if (!t_options)
6155 return;
6156
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006157 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006158 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006159}
6160
Steven Rostedt499e5472012-02-22 15:50:28 -05006161static ssize_t
6162rb_simple_read(struct file *filp, char __user *ubuf,
6163 size_t cnt, loff_t *ppos)
6164{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006165 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006166 char buf[64];
6167 int r;
6168
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006169 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006170 r = sprintf(buf, "%d\n", r);
6171
6172 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6173}
6174
6175static ssize_t
6176rb_simple_write(struct file *filp, const char __user *ubuf,
6177 size_t cnt, loff_t *ppos)
6178{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006179 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006180 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006181 unsigned long val;
6182 int ret;
6183
6184 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6185 if (ret)
6186 return ret;
6187
6188 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006189 mutex_lock(&trace_types_lock);
6190 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006191 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006192 if (tr->current_trace->start)
6193 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006194 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006195 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006196 if (tr->current_trace->stop)
6197 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006198 }
6199 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006200 }
6201
6202 (*ppos)++;
6203
6204 return cnt;
6205}
6206
6207static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006208 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006209 .read = rb_simple_read,
6210 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006211 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006212 .llseek = default_llseek,
6213};
6214
Steven Rostedt277ba042012-08-03 16:10:49 -04006215struct dentry *trace_instance_dir;
6216
6217static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006218init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04006219
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006220static int
6221allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006222{
6223 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006224
6225 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6226
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006227 buf->tr = tr;
6228
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006229 buf->buffer = ring_buffer_alloc(size, rb_flags);
6230 if (!buf->buffer)
6231 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006232
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006233 buf->data = alloc_percpu(struct trace_array_cpu);
6234 if (!buf->data) {
6235 ring_buffer_free(buf->buffer);
6236 return -ENOMEM;
6237 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006238
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006239 /* Allocate the first page for all buffers */
6240 set_buffer_entries(&tr->trace_buffer,
6241 ring_buffer_size(tr->trace_buffer.buffer, 0));
6242
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006243 return 0;
6244}
6245
6246static int allocate_trace_buffers(struct trace_array *tr, int size)
6247{
6248 int ret;
6249
6250 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6251 if (ret)
6252 return ret;
6253
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006254#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006255 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6256 allocate_snapshot ? size : 1);
6257 if (WARN_ON(ret)) {
6258 ring_buffer_free(tr->trace_buffer.buffer);
6259 free_percpu(tr->trace_buffer.data);
6260 return -ENOMEM;
6261 }
6262 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006263
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006264 /*
6265 * Only the top level trace array gets its snapshot allocated
6266 * from the kernel command line.
6267 */
6268 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006269#endif
6270 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006271}
6272
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006273static void free_trace_buffer(struct trace_buffer *buf)
6274{
6275 if (buf->buffer) {
6276 ring_buffer_free(buf->buffer);
6277 buf->buffer = NULL;
6278 free_percpu(buf->data);
6279 buf->data = NULL;
6280 }
6281}
6282
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006283static void free_trace_buffers(struct trace_array *tr)
6284{
6285 if (!tr)
6286 return;
6287
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006288 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006289
6290#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006291 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006292#endif
6293}
6294
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006295static int new_instance_create(const char *name)
6296{
Steven Rostedt277ba042012-08-03 16:10:49 -04006297 struct trace_array *tr;
6298 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006299
6300 mutex_lock(&trace_types_lock);
6301
6302 ret = -EEXIST;
6303 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6304 if (tr->name && strcmp(tr->name, name) == 0)
6305 goto out_unlock;
6306 }
6307
6308 ret = -ENOMEM;
6309 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6310 if (!tr)
6311 goto out_unlock;
6312
6313 tr->name = kstrdup(name, GFP_KERNEL);
6314 if (!tr->name)
6315 goto out_free_tr;
6316
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006317 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6318 goto out_free_tr;
6319
6320 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6321
Steven Rostedt277ba042012-08-03 16:10:49 -04006322 raw_spin_lock_init(&tr->start_lock);
6323
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006324 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6325
Steven Rostedt277ba042012-08-03 16:10:49 -04006326 tr->current_trace = &nop_trace;
6327
6328 INIT_LIST_HEAD(&tr->systems);
6329 INIT_LIST_HEAD(&tr->events);
6330
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006331 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006332 goto out_free_tr;
6333
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006334 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006335 if (!tr->dir)
6336 goto out_free_tr;
6337
6338 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006339 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006340 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006341 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006342 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006343
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006344 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006345
6346 list_add(&tr->list, &ftrace_trace_arrays);
6347
6348 mutex_unlock(&trace_types_lock);
6349
6350 return 0;
6351
6352 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006353 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006354 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006355 kfree(tr->name);
6356 kfree(tr);
6357
6358 out_unlock:
6359 mutex_unlock(&trace_types_lock);
6360
6361 return ret;
6362
6363}
6364
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006365static int instance_delete(const char *name)
6366{
6367 struct trace_array *tr;
6368 int found = 0;
6369 int ret;
6370
6371 mutex_lock(&trace_types_lock);
6372
6373 ret = -ENODEV;
6374 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6375 if (tr->name && strcmp(tr->name, name) == 0) {
6376 found = 1;
6377 break;
6378 }
6379 }
6380 if (!found)
6381 goto out_unlock;
6382
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006383 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006384 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006385 goto out_unlock;
6386
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006387 list_del(&tr->list);
6388
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006389 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006390 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006391 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006392 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006393 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006394
6395 kfree(tr->name);
6396 kfree(tr);
6397
6398 ret = 0;
6399
6400 out_unlock:
6401 mutex_unlock(&trace_types_lock);
6402
6403 return ret;
6404}
6405
Steven Rostedt277ba042012-08-03 16:10:49 -04006406static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6407{
6408 struct dentry *parent;
6409 int ret;
6410
6411 /* Paranoid: Make sure the parent is the "instances" directory */
Al Viro946e51f2014-10-26 19:19:16 -04006412 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
Steven Rostedt277ba042012-08-03 16:10:49 -04006413 if (WARN_ON_ONCE(parent != trace_instance_dir))
6414 return -ENOENT;
6415
6416 /*
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006417 * The inode mutex is locked, but tracefs_create_dir() will also
Steven Rostedt277ba042012-08-03 16:10:49 -04006418 * take the mutex. As the instances directory can not be destroyed
6419 * or changed in any other way, it is safe to unlock it, and
6420 * let the dentry try. If two users try to make the same dir at
6421 * the same time, then the new_instance_create() will determine the
6422 * winner.
6423 */
6424 mutex_unlock(&inode->i_mutex);
6425
6426 ret = new_instance_create(dentry->d_iname);
6427
6428 mutex_lock(&inode->i_mutex);
6429
6430 return ret;
6431}
6432
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006433static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6434{
6435 struct dentry *parent;
6436 int ret;
6437
6438 /* Paranoid: Make sure the parent is the "instances" directory */
Al Viro946e51f2014-10-26 19:19:16 -04006439 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006440 if (WARN_ON_ONCE(parent != trace_instance_dir))
6441 return -ENOENT;
6442
6443 /* The caller did a dget() on dentry */
6444 mutex_unlock(&dentry->d_inode->i_mutex);
6445
6446 /*
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006447 * The inode mutex is locked, but tracefs_create_dir() will also
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006448 * take the mutex. As the instances directory can not be destroyed
6449 * or changed in any other way, it is safe to unlock it, and
6450 * let the dentry try. If two users try to make the same dir at
6451 * the same time, then the instance_delete() will determine the
6452 * winner.
6453 */
6454 mutex_unlock(&inode->i_mutex);
6455
6456 ret = instance_delete(dentry->d_iname);
6457
6458 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6459 mutex_lock(&dentry->d_inode->i_mutex);
6460
6461 return ret;
6462}
6463
Steven Rostedt277ba042012-08-03 16:10:49 -04006464static const struct inode_operations instance_dir_inode_operations = {
6465 .lookup = simple_lookup,
6466 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006467 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006468};
6469
6470static __init void create_trace_instances(struct dentry *d_tracer)
6471{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006472 trace_instance_dir = tracefs_create_dir("instances", d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04006473 if (WARN_ON(!trace_instance_dir))
6474 return;
6475
6476 /* Hijack the dir inode operations, to allow mkdir */
6477 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6478}
6479
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006480static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006481init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006482{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006483 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006484
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006485 trace_create_file("available_tracers", 0444, d_tracer,
6486 tr, &show_traces_fops);
6487
6488 trace_create_file("current_tracer", 0644, d_tracer,
6489 tr, &set_tracer_fops);
6490
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006491 trace_create_file("tracing_cpumask", 0644, d_tracer,
6492 tr, &tracing_cpumask_fops);
6493
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006494 trace_create_file("trace_options", 0644, d_tracer,
6495 tr, &tracing_iter_fops);
6496
6497 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006498 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006499
6500 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006501 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006502
6503 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006504 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006505
6506 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6507 tr, &tracing_total_entries_fops);
6508
Wang YanQing238ae932013-05-26 16:52:01 +08006509 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006510 tr, &tracing_free_buffer_fops);
6511
6512 trace_create_file("trace_marker", 0220, d_tracer,
6513 tr, &tracing_mark_fops);
6514
6515 trace_create_file("trace_clock", 0644, d_tracer, tr,
6516 &trace_clock_fops);
6517
6518 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006519 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006520
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006521#ifdef CONFIG_TRACER_MAX_TRACE
6522 trace_create_file("tracing_max_latency", 0644, d_tracer,
6523 &tr->max_latency, &tracing_max_lat_fops);
6524#endif
6525
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006526 if (ftrace_create_function_files(tr, d_tracer))
6527 WARN(1, "Could not allocate function filter files");
6528
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006529#ifdef CONFIG_TRACER_SNAPSHOT
6530 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006531 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006532#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006533
6534 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006535 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006536
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006537}
6538
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006539static struct vfsmount *trace_automount(void *ingore)
6540{
6541 struct vfsmount *mnt;
6542 struct file_system_type *type;
6543
6544 /*
6545 * To maintain backward compatibility for tools that mount
6546 * debugfs to get to the tracing facility, tracefs is automatically
6547 * mounted to the debugfs/tracing directory.
6548 */
6549 type = get_fs_type("tracefs");
6550 if (!type)
6551 return NULL;
6552 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6553 put_filesystem(type);
6554 if (IS_ERR(mnt))
6555 return NULL;
6556 mntget(mnt);
6557
6558 return mnt;
6559}
6560
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006561/**
6562 * tracing_init_dentry - initialize top level trace array
6563 *
6564 * This is called when creating files or directories in the tracing
6565 * directory. It is called via fs_initcall() by any of the boot up code
6566 * and expects to return the dentry of the top level tracing directory.
6567 */
6568struct dentry *tracing_init_dentry(void)
6569{
6570 struct trace_array *tr = &global_trace;
6571
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006572 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006573 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006574 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006575
6576 if (WARN_ON(!debugfs_initialized()))
6577 return ERR_PTR(-ENODEV);
6578
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006579 /*
6580 * As there may still be users that expect the tracing
6581 * files to exist in debugfs/tracing, we must automount
6582 * the tracefs file system there, so older tools still
6583 * work with the newer kerenl.
6584 */
6585 tr->dir = debugfs_create_automount("tracing", NULL,
6586 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006587 if (!tr->dir) {
6588 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6589 return ERR_PTR(-ENOMEM);
6590 }
6591
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006592 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006593}
6594
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006595static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006596{
6597 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006598
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006599 trace_access_lock_init();
6600
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006601 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006602 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006603 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006604
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006605 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006606
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006607 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006608 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006609
Li Zefan339ae5d2009-04-17 10:34:30 +08006610 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006611 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006612
Avadh Patel69abe6a2009-04-10 16:04:48 -04006613 trace_create_file("saved_cmdlines", 0444, d_tracer,
6614 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006615
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006616 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6617 NULL, &tracing_saved_cmdlines_size_fops);
6618
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006619#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006620 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6621 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006622#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006623
Steven Rostedt277ba042012-08-03 16:10:49 -04006624 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006625
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006626 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006627
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006628 /* If the tracer was started via cmdline, create options for it here */
6629 if (global_trace.current_trace != &nop_trace)
6630 update_tracer_options(&global_trace, global_trace.current_trace);
6631
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006632 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006633}
6634
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006635static int trace_panic_handler(struct notifier_block *this,
6636 unsigned long event, void *unused)
6637{
Steven Rostedt944ac422008-10-23 19:26:08 -04006638 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006639 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006640 return NOTIFY_OK;
6641}
6642
6643static struct notifier_block trace_panic_notifier = {
6644 .notifier_call = trace_panic_handler,
6645 .next = NULL,
6646 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6647};
6648
6649static int trace_die_handler(struct notifier_block *self,
6650 unsigned long val,
6651 void *data)
6652{
6653 switch (val) {
6654 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006655 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006656 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006657 break;
6658 default:
6659 break;
6660 }
6661 return NOTIFY_OK;
6662}
6663
6664static struct notifier_block trace_die_notifier = {
6665 .notifier_call = trace_die_handler,
6666 .priority = 200
6667};
6668
6669/*
6670 * printk is set to max of 1024, we really don't need it that big.
6671 * Nothing should be printing 1000 characters anyway.
6672 */
6673#define TRACE_MAX_PRINT 1000
6674
6675/*
6676 * Define here KERN_TRACE so that we have one place to modify
6677 * it if we decide to change what log level the ftrace dump
6678 * should be at.
6679 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006680#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006681
Jason Wessel955b61e2010-08-05 09:22:23 -05006682void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006683trace_printk_seq(struct trace_seq *s)
6684{
6685 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006686 if (s->seq.len >= TRACE_MAX_PRINT)
6687 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006688
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05006689 /*
6690 * More paranoid code. Although the buffer size is set to
6691 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6692 * an extra layer of protection.
6693 */
6694 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6695 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006696
6697 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006698 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006699
6700 printk(KERN_TRACE "%s", s->buffer);
6701
Steven Rostedtf9520752009-03-02 14:04:40 -05006702 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006703}
6704
Jason Wessel955b61e2010-08-05 09:22:23 -05006705void trace_init_global_iter(struct trace_iterator *iter)
6706{
6707 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006708 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006709 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006710 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006711
6712 if (iter->trace && iter->trace->open)
6713 iter->trace->open(iter);
6714
6715 /* Annotate start of buffers if we had overruns */
6716 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6717 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6718
6719 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6720 if (trace_clocks[iter->tr->clock_id].in_ns)
6721 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006722}
6723
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006724void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006725{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006726 /* use static because iter can be a bit big for the stack */
6727 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006728 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006729 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006730 unsigned long flags;
6731 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006732
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006733 /* Only allow one dump user at a time. */
6734 if (atomic_inc_return(&dump_running) != 1) {
6735 atomic_dec(&dump_running);
6736 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006737 }
6738
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006739 /*
6740 * Always turn off tracing when we dump.
6741 * We don't need to show trace output of what happens
6742 * between multiple crashes.
6743 *
6744 * If the user does a sysrq-z, then they can re-enable
6745 * tracing with echo 1 > tracing_on.
6746 */
6747 tracing_off();
6748
6749 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006750
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006751 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006752 trace_init_global_iter(&iter);
6753
Steven Rostedtd7690412008-10-01 00:29:53 -04006754 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006755 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006756 }
6757
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006758 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6759
Török Edwinb54d3de2008-11-22 13:28:48 +02006760 /* don't look at user memory in panic mode */
6761 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6762
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006763 switch (oops_dump_mode) {
6764 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006765 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006766 break;
6767 case DUMP_ORIG:
6768 iter.cpu_file = raw_smp_processor_id();
6769 break;
6770 case DUMP_NONE:
6771 goto out_enable;
6772 default:
6773 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006774 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006775 }
6776
6777 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006778
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006779 /* Did function tracer already get disabled? */
6780 if (ftrace_is_dead()) {
6781 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6782 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6783 }
6784
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006785 /*
6786 * We need to stop all tracing on all CPUS to read the
6787 * the next buffer. This is a bit expensive, but is
6788 * not done often. We fill all what we can read,
6789 * and then release the locks again.
6790 */
6791
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006792 while (!trace_empty(&iter)) {
6793
6794 if (!cnt)
6795 printk(KERN_TRACE "---------------------------------\n");
6796
6797 cnt++;
6798
6799 /* reset all but tr, trace, and overruns */
6800 memset(&iter.seq, 0,
6801 sizeof(struct trace_iterator) -
6802 offsetof(struct trace_iterator, seq));
6803 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6804 iter.pos = -1;
6805
Jason Wessel955b61e2010-08-05 09:22:23 -05006806 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006807 int ret;
6808
6809 ret = print_trace_line(&iter);
6810 if (ret != TRACE_TYPE_NO_CONSUME)
6811 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006812 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006813 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006814
6815 trace_printk_seq(&iter.seq);
6816 }
6817
6818 if (!cnt)
6819 printk(KERN_TRACE " (ftrace buffer empty)\n");
6820 else
6821 printk(KERN_TRACE "---------------------------------\n");
6822
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006823 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006824 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006825
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006826 for_each_tracing_cpu(cpu) {
6827 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006828 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006829 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006830 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006831}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006832EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006833
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006834__init static int tracer_alloc_buffers(void)
6835{
Steven Rostedt73c51622009-03-11 13:42:01 -04006836 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306837 int ret = -ENOMEM;
6838
6839 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6840 goto out;
6841
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006842 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306843 goto out_free_buffer_mask;
6844
Steven Rostedt07d777f2011-09-22 14:01:55 -04006845 /* Only allocate trace_printk buffers if a trace_printk exists */
6846 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006847 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006848 trace_printk_init_buffers();
6849
Steven Rostedt73c51622009-03-11 13:42:01 -04006850 /* To save memory, keep the ring buffer size to its minimum */
6851 if (ring_buffer_expanded)
6852 ring_buf_size = trace_buf_size;
6853 else
6854 ring_buf_size = 1;
6855
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306856 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006857 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006858
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006859 raw_spin_lock_init(&global_trace.start_lock);
6860
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006861 /* Used for event triggers */
6862 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6863 if (!temp_buffer)
6864 goto out_free_cpumask;
6865
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006866 if (trace_create_savedcmd() < 0)
6867 goto out_free_temp_buffer;
6868
Steven Rostedtab464282008-05-12 21:21:00 +02006869 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006870 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006871 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6872 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006873 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006874 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006875
Steven Rostedt499e5472012-02-22 15:50:28 -05006876 if (global_trace.buffer_disabled)
6877 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006878
Steven Rostedte1e232c2014-02-10 23:38:46 -05006879 if (trace_boot_clock) {
6880 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6881 if (ret < 0)
6882 pr_warning("Trace clock %s not defined, going back to default\n",
6883 trace_boot_clock);
6884 }
6885
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006886 /*
6887 * register_tracer() might reference current_trace, so it
6888 * needs to be set before we register anything. This is
6889 * just a bootstrap of current_trace anyway.
6890 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006891 global_trace.current_trace = &nop_trace;
6892
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006893 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6894
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006895 ftrace_init_global_array_ops(&global_trace);
6896
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006897 register_tracer(&nop_trace);
6898
Steven Rostedt60a11772008-05-12 21:20:44 +02006899 /* All seems OK, enable tracing */
6900 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006901
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006902 atomic_notifier_chain_register(&panic_notifier_list,
6903 &trace_panic_notifier);
6904
6905 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006906
Steven Rostedtae63b312012-05-03 23:09:03 -04006907 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6908
6909 INIT_LIST_HEAD(&global_trace.systems);
6910 INIT_LIST_HEAD(&global_trace.events);
6911 list_add(&global_trace.list, &ftrace_trace_arrays);
6912
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006913 while (trace_boot_options) {
6914 char *option;
6915
6916 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006917 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006918 }
6919
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006920 register_snapshot_cmd();
6921
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006922 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006923
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006924out_free_savedcmd:
6925 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006926out_free_temp_buffer:
6927 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306928out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006929 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306930out_free_buffer_mask:
6931 free_cpumask_var(tracing_buffer_mask);
6932out:
6933 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006934}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006935
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006936void __init trace_init(void)
6937{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05006938 if (tracepoint_printk) {
6939 tracepoint_print_iter =
6940 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
6941 if (WARN_ON(!tracepoint_print_iter))
6942 tracepoint_printk = 0;
6943 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006944 tracer_alloc_buffers();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05006945 trace_event_init();
6946}
6947
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006948__init static int clear_boot_tracer(void)
6949{
6950 /*
6951 * The default tracer at boot buffer is an init section.
6952 * This function is called in lateinit. If we did not
6953 * find the boot tracer, then clear it out, to prevent
6954 * later registration from accessing the buffer that is
6955 * about to be freed.
6956 */
6957 if (!default_bootup_tracer)
6958 return 0;
6959
6960 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6961 default_bootup_tracer);
6962 default_bootup_tracer = NULL;
6963
6964 return 0;
6965}
6966
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006967fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006968late_initcall(clear_boot_tracer);