blob: 4ceb2546c7efb98d2509a79e92816bb8f7282ef7 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200158 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
159 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400160 return 1;
161}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200162__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400163
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400164static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500165{
166 allocate_snapshot = true;
167 /* We also need the main ring buffer expanded */
168 ring_buffer_expanded = true;
169 return 1;
170}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400171__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500172
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400173
174static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
175static char *trace_boot_options __initdata;
176
177static int __init set_trace_boot_options(char *str)
178{
Chen Gang67012ab2013-04-08 12:06:44 +0800179 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400180 trace_boot_options = trace_boot_options_buf;
181 return 0;
182}
183__setup("trace_options=", set_trace_boot_options);
184
Steven Rostedte1e232c2014-02-10 23:38:46 -0500185static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
186static char *trace_boot_clock __initdata;
187
188static int __init set_trace_boot_clock(char *str)
189{
190 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
191 trace_boot_clock = trace_boot_clock_buf;
192 return 0;
193}
194__setup("trace_clock=", set_trace_boot_clock);
195
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400196
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800197unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200198{
199 nsec += 500;
200 do_div(nsec, 1000);
201 return nsec;
202}
203
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200204/*
205 * The global_trace is the descriptor that holds the tracing
206 * buffers for the live tracing. For each CPU, it contains
207 * a link list of pages that will store trace entries. The
208 * page descriptor of the pages in the memory is used to hold
209 * the link list by linking the lru item in the page descriptor
210 * to each of the pages in the buffer per CPU.
211 *
212 * For each active CPU there is a data field that holds the
213 * pages for the buffer for that CPU. Each CPU has the same number
214 * of pages allocated for its buffer.
215 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200216static struct trace_array global_trace;
217
Steven Rostedtae63b312012-05-03 23:09:03 -0400218LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200219
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400220int trace_array_get(struct trace_array *this_tr)
221{
222 struct trace_array *tr;
223 int ret = -ENODEV;
224
225 mutex_lock(&trace_types_lock);
226 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
227 if (tr == this_tr) {
228 tr->ref++;
229 ret = 0;
230 break;
231 }
232 }
233 mutex_unlock(&trace_types_lock);
234
235 return ret;
236}
237
238static void __trace_array_put(struct trace_array *this_tr)
239{
240 WARN_ON(!this_tr->ref);
241 this_tr->ref--;
242}
243
244void trace_array_put(struct trace_array *this_tr)
245{
246 mutex_lock(&trace_types_lock);
247 __trace_array_put(this_tr);
248 mutex_unlock(&trace_types_lock);
249}
250
Tom Zanussif306cc82013-10-24 08:34:17 -0500251int filter_check_discard(struct ftrace_event_file *file, void *rec,
252 struct ring_buffer *buffer,
253 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500254{
Tom Zanussif306cc82013-10-24 08:34:17 -0500255 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
256 !filter_match_preds(file->filter, rec)) {
257 ring_buffer_discard_commit(buffer, event);
258 return 1;
259 }
260
261 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500262}
Tom Zanussif306cc82013-10-24 08:34:17 -0500263EXPORT_SYMBOL_GPL(filter_check_discard);
264
265int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
266 struct ring_buffer *buffer,
267 struct ring_buffer_event *event)
268{
269 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
270 !filter_match_preds(call->filter, rec)) {
271 ring_buffer_discard_commit(buffer, event);
272 return 1;
273 }
274
275 return 0;
276}
277EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500278
Fabian Frederickad1438a2014-04-17 21:44:42 +0200279static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400280{
281 u64 ts;
282
283 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700284 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400285 return trace_clock_local();
286
Alexander Z Lam94571582013-08-02 18:36:16 -0700287 ts = ring_buffer_time_stamp(buf->buffer, cpu);
288 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400289
290 return ts;
291}
292
Alexander Z Lam94571582013-08-02 18:36:16 -0700293cycle_t ftrace_now(int cpu)
294{
295 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
296}
297
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400298/**
299 * tracing_is_enabled - Show if global_trace has been disabled
300 *
301 * Shows if the global trace has been enabled or not. It uses the
302 * mirror flag "buffer_disabled" to be used in fast paths such as for
303 * the irqsoff tracer. But it may be inaccurate due to races. If you
304 * need to know the accurate state, use tracing_is_on() which is a little
305 * slower, but accurate.
306 */
Steven Rostedt90369902008-11-05 16:05:44 -0500307int tracing_is_enabled(void)
308{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400309 /*
310 * For quick access (irqsoff uses this in fast path), just
311 * return the mirror variable of the state of the ring buffer.
312 * It's a little racy, but we don't really care.
313 */
314 smp_rmb();
315 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500316}
317
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200318/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400319 * trace_buf_size is the size in bytes that is allocated
320 * for a buffer. Note, the number of bytes is always rounded
321 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400322 *
323 * This number is purposely set to a low number of 16384.
324 * If the dump on oops happens, it will be much appreciated
325 * to not have to wait for all that output. Anyway this can be
326 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200327 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400328#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400329
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400330static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200331
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200332/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200333static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200337 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700338DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200339
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800340/*
341 * serialize the access of the ring buffer
342 *
343 * ring buffer serializes readers, but it is low level protection.
344 * The validity of the events (which returns by ring_buffer_peek() ..etc)
345 * are not protected by ring buffer.
346 *
347 * The content of events may become garbage if we allow other process consumes
348 * these events concurrently:
349 * A) the page of the consumed events may become a normal page
350 * (not reader page) in ring buffer, and this page will be rewrited
351 * by events producer.
352 * B) The page of the consumed events may become a page for splice_read,
353 * and this page will be returned to system.
354 *
355 * These primitives allow multi process access to different cpu ring buffer
356 * concurrently.
357 *
358 * These primitives don't distinguish read-only and read-consume access.
359 * Multi read-only access are also serialized.
360 */
361
362#ifdef CONFIG_SMP
363static DECLARE_RWSEM(all_cpu_access_lock);
364static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
365
366static inline void trace_access_lock(int cpu)
367{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500368 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800369 /* gain it for accessing the whole ring buffer. */
370 down_write(&all_cpu_access_lock);
371 } else {
372 /* gain it for accessing a cpu ring buffer. */
373
Steven Rostedtae3b5092013-01-23 15:22:59 -0500374 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800375 down_read(&all_cpu_access_lock);
376
377 /* Secondly block other access to this @cpu ring buffer. */
378 mutex_lock(&per_cpu(cpu_access_lock, cpu));
379 }
380}
381
382static inline void trace_access_unlock(int cpu)
383{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500384 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800385 up_write(&all_cpu_access_lock);
386 } else {
387 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
388 up_read(&all_cpu_access_lock);
389 }
390}
391
392static inline void trace_access_lock_init(void)
393{
394 int cpu;
395
396 for_each_possible_cpu(cpu)
397 mutex_init(&per_cpu(cpu_access_lock, cpu));
398}
399
400#else
401
402static DEFINE_MUTEX(access_lock);
403
404static inline void trace_access_lock(int cpu)
405{
406 (void)cpu;
407 mutex_lock(&access_lock);
408}
409
410static inline void trace_access_unlock(int cpu)
411{
412 (void)cpu;
413 mutex_unlock(&access_lock);
414}
415
416static inline void trace_access_lock_init(void)
417{
418}
419
420#endif
421
Steven Rostedtee6bce52008-11-12 17:52:37 -0500422/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500423unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400424 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500425 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400426 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700427
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400428static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400429{
430 if (tr->trace_buffer.buffer)
431 ring_buffer_record_on(tr->trace_buffer.buffer);
432 /*
433 * This flag is looked at when buffers haven't been allocated
434 * yet, or by some tracers (like irqsoff), that just want to
435 * know if the ring buffer has been disabled, but it can handle
436 * races of where it gets disabled but we still do a record.
437 * As the check is in the fast path of the tracers, it is more
438 * important to be fast than accurate.
439 */
440 tr->buffer_disabled = 0;
441 /* Make the flag seen by readers */
442 smp_wmb();
443}
444
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200445/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500446 * tracing_on - enable tracing buffers
447 *
448 * This function enables tracing buffers that may have been
449 * disabled with tracing_off.
450 */
451void tracing_on(void)
452{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400453 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500454}
455EXPORT_SYMBOL_GPL(tracing_on);
456
457/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500458 * __trace_puts - write a constant string into the trace buffer.
459 * @ip: The address of the caller
460 * @str: The constant string to write
461 * @size: The size of the string.
462 */
463int __trace_puts(unsigned long ip, const char *str, int size)
464{
465 struct ring_buffer_event *event;
466 struct ring_buffer *buffer;
467 struct print_entry *entry;
468 unsigned long irq_flags;
469 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800470 int pc;
471
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800472 if (!(trace_flags & TRACE_ITER_PRINTK))
473 return 0;
474
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800475 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500476
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500477 if (unlikely(tracing_selftest_running || tracing_disabled))
478 return 0;
479
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500480 alloc = sizeof(*entry) + size + 2; /* possible \n added */
481
482 local_save_flags(irq_flags);
483 buffer = global_trace.trace_buffer.buffer;
484 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800485 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500486 if (!event)
487 return 0;
488
489 entry = ring_buffer_event_data(event);
490 entry->ip = ip;
491
492 memcpy(&entry->buf, str, size);
493
494 /* Add a newline if necessary */
495 if (entry->buf[size - 1] != '\n') {
496 entry->buf[size] = '\n';
497 entry->buf[size + 1] = '\0';
498 } else
499 entry->buf[size] = '\0';
500
501 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800502 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500503
504 return size;
505}
506EXPORT_SYMBOL_GPL(__trace_puts);
507
508/**
509 * __trace_bputs - write the pointer to a constant string into trace buffer
510 * @ip: The address of the caller
511 * @str: The constant string to write to the buffer to
512 */
513int __trace_bputs(unsigned long ip, const char *str)
514{
515 struct ring_buffer_event *event;
516 struct ring_buffer *buffer;
517 struct bputs_entry *entry;
518 unsigned long irq_flags;
519 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800520 int pc;
521
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800522 if (!(trace_flags & TRACE_ITER_PRINTK))
523 return 0;
524
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800525 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500526
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500527 if (unlikely(tracing_selftest_running || tracing_disabled))
528 return 0;
529
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500530 local_save_flags(irq_flags);
531 buffer = global_trace.trace_buffer.buffer;
532 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800533 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500534 if (!event)
535 return 0;
536
537 entry = ring_buffer_event_data(event);
538 entry->ip = ip;
539 entry->str = str;
540
541 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800542 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500543
544 return 1;
545}
546EXPORT_SYMBOL_GPL(__trace_bputs);
547
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500548#ifdef CONFIG_TRACER_SNAPSHOT
549/**
550 * trace_snapshot - take a snapshot of the current buffer.
551 *
552 * This causes a swap between the snapshot buffer and the current live
553 * tracing buffer. You can use this to take snapshots of the live
554 * trace when some condition is triggered, but continue to trace.
555 *
556 * Note, make sure to allocate the snapshot with either
557 * a tracing_snapshot_alloc(), or by doing it manually
558 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
559 *
560 * If the snapshot buffer is not allocated, it will stop tracing.
561 * Basically making a permanent snapshot.
562 */
563void tracing_snapshot(void)
564{
565 struct trace_array *tr = &global_trace;
566 struct tracer *tracer = tr->current_trace;
567 unsigned long flags;
568
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500569 if (in_nmi()) {
570 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
571 internal_trace_puts("*** snapshot is being ignored ***\n");
572 return;
573 }
574
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500575 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500576 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
577 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500578 tracing_off();
579 return;
580 }
581
582 /* Note, snapshot can not be used when the tracer uses it */
583 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500584 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
585 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500586 return;
587 }
588
589 local_irq_save(flags);
590 update_max_tr(tr, current, smp_processor_id());
591 local_irq_restore(flags);
592}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500593EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500594
595static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
596 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400597static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
598
599static int alloc_snapshot(struct trace_array *tr)
600{
601 int ret;
602
603 if (!tr->allocated_snapshot) {
604
605 /* allocate spare buffer */
606 ret = resize_buffer_duplicate_size(&tr->max_buffer,
607 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
608 if (ret < 0)
609 return ret;
610
611 tr->allocated_snapshot = true;
612 }
613
614 return 0;
615}
616
Fabian Frederickad1438a2014-04-17 21:44:42 +0200617static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400618{
619 /*
620 * We don't free the ring buffer. instead, resize it because
621 * The max_tr ring buffer has some state (e.g. ring->clock) and
622 * we want preserve it.
623 */
624 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
625 set_buffer_entries(&tr->max_buffer, 1);
626 tracing_reset_online_cpus(&tr->max_buffer);
627 tr->allocated_snapshot = false;
628}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500629
630/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500631 * tracing_alloc_snapshot - allocate snapshot buffer.
632 *
633 * This only allocates the snapshot buffer if it isn't already
634 * allocated - it doesn't also take a snapshot.
635 *
636 * This is meant to be used in cases where the snapshot buffer needs
637 * to be set up for events that can't sleep but need to be able to
638 * trigger a snapshot.
639 */
640int tracing_alloc_snapshot(void)
641{
642 struct trace_array *tr = &global_trace;
643 int ret;
644
645 ret = alloc_snapshot(tr);
646 WARN_ON(ret < 0);
647
648 return ret;
649}
650EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
651
652/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500653 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
654 *
655 * This is similar to trace_snapshot(), but it will allocate the
656 * snapshot buffer if it isn't already allocated. Use this only
657 * where it is safe to sleep, as the allocation may sleep.
658 *
659 * This causes a swap between the snapshot buffer and the current live
660 * tracing buffer. You can use this to take snapshots of the live
661 * trace when some condition is triggered, but continue to trace.
662 */
663void tracing_snapshot_alloc(void)
664{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500665 int ret;
666
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500667 ret = tracing_alloc_snapshot();
668 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400669 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500670
671 tracing_snapshot();
672}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500673EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500674#else
675void tracing_snapshot(void)
676{
677 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
678}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500679EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500680int tracing_alloc_snapshot(void)
681{
682 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
683 return -ENODEV;
684}
685EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500686void tracing_snapshot_alloc(void)
687{
688 /* Give warning */
689 tracing_snapshot();
690}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500691EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500692#endif /* CONFIG_TRACER_SNAPSHOT */
693
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400694static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400695{
696 if (tr->trace_buffer.buffer)
697 ring_buffer_record_off(tr->trace_buffer.buffer);
698 /*
699 * This flag is looked at when buffers haven't been allocated
700 * yet, or by some tracers (like irqsoff), that just want to
701 * know if the ring buffer has been disabled, but it can handle
702 * races of where it gets disabled but we still do a record.
703 * As the check is in the fast path of the tracers, it is more
704 * important to be fast than accurate.
705 */
706 tr->buffer_disabled = 1;
707 /* Make the flag seen by readers */
708 smp_wmb();
709}
710
Steven Rostedt499e5472012-02-22 15:50:28 -0500711/**
712 * tracing_off - turn off tracing buffers
713 *
714 * This function stops the tracing buffers from recording data.
715 * It does not disable any overhead the tracers themselves may
716 * be causing. This function simply causes all recording to
717 * the ring buffers to fail.
718 */
719void tracing_off(void)
720{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400721 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500722}
723EXPORT_SYMBOL_GPL(tracing_off);
724
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400725void disable_trace_on_warning(void)
726{
727 if (__disable_trace_on_warning)
728 tracing_off();
729}
730
Steven Rostedt499e5472012-02-22 15:50:28 -0500731/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400732 * tracer_tracing_is_on - show real state of ring buffer enabled
733 * @tr : the trace array to know if ring buffer is enabled
734 *
735 * Shows real state of the ring buffer if it is enabled or not.
736 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400737static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400738{
739 if (tr->trace_buffer.buffer)
740 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
741 return !tr->buffer_disabled;
742}
743
Steven Rostedt499e5472012-02-22 15:50:28 -0500744/**
745 * tracing_is_on - show state of ring buffers enabled
746 */
747int tracing_is_on(void)
748{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400749 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500750}
751EXPORT_SYMBOL_GPL(tracing_is_on);
752
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400753static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200754{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400755 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200756
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200757 if (!str)
758 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800759 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200760 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800761 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200762 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400763 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200764 return 1;
765}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400766__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200767
Tim Bird0e950172010-02-25 15:36:43 -0800768static int __init set_tracing_thresh(char *str)
769{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800770 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800771 int ret;
772
773 if (!str)
774 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200775 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800776 if (ret < 0)
777 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800778 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800779 return 1;
780}
781__setup("tracing_thresh=", set_tracing_thresh);
782
Steven Rostedt57f50be2008-05-12 21:20:44 +0200783unsigned long nsecs_to_usecs(unsigned long nsecs)
784{
785 return nsecs / 1000;
786}
787
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200788/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200789static const char *trace_options[] = {
790 "print-parent",
791 "sym-offset",
792 "sym-addr",
793 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200794 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200795 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200796 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200797 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200798 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100799 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500800 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500801 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500802 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200803 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200804 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100805 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200806 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500807 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400808 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400809 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800810 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800811 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400812 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500813 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700814 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400815 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200816 NULL
817};
818
Zhaolei5079f322009-08-25 16:12:56 +0800819static struct {
820 u64 (*func)(void);
821 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800822 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800823} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000824 { trace_clock_local, "local", 1 },
825 { trace_clock_global, "global", 1 },
826 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700827 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000828 { trace_clock, "perf", 1 },
829 { ktime_get_mono_fast_ns, "mono", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800830 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800831};
832
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200833/*
834 * trace_parser_get_init - gets the buffer for trace parser
835 */
836int trace_parser_get_init(struct trace_parser *parser, int size)
837{
838 memset(parser, 0, sizeof(*parser));
839
840 parser->buffer = kmalloc(size, GFP_KERNEL);
841 if (!parser->buffer)
842 return 1;
843
844 parser->size = size;
845 return 0;
846}
847
848/*
849 * trace_parser_put - frees the buffer for trace parser
850 */
851void trace_parser_put(struct trace_parser *parser)
852{
853 kfree(parser->buffer);
854}
855
856/*
857 * trace_get_user - reads the user input string separated by space
858 * (matched by isspace(ch))
859 *
860 * For each string found the 'struct trace_parser' is updated,
861 * and the function returns.
862 *
863 * Returns number of bytes read.
864 *
865 * See kernel/trace/trace.h for 'struct trace_parser' details.
866 */
867int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
868 size_t cnt, loff_t *ppos)
869{
870 char ch;
871 size_t read = 0;
872 ssize_t ret;
873
874 if (!*ppos)
875 trace_parser_clear(parser);
876
877 ret = get_user(ch, ubuf++);
878 if (ret)
879 goto out;
880
881 read++;
882 cnt--;
883
884 /*
885 * The parser is not finished with the last write,
886 * continue reading the user input without skipping spaces.
887 */
888 if (!parser->cont) {
889 /* skip white space */
890 while (cnt && isspace(ch)) {
891 ret = get_user(ch, ubuf++);
892 if (ret)
893 goto out;
894 read++;
895 cnt--;
896 }
897
898 /* only spaces were written */
899 if (isspace(ch)) {
900 *ppos += read;
901 ret = read;
902 goto out;
903 }
904
905 parser->idx = 0;
906 }
907
908 /* read the non-space input */
909 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800910 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200911 parser->buffer[parser->idx++] = ch;
912 else {
913 ret = -EINVAL;
914 goto out;
915 }
916 ret = get_user(ch, ubuf++);
917 if (ret)
918 goto out;
919 read++;
920 cnt--;
921 }
922
923 /* We either got finished input or we have to wait for another call. */
924 if (isspace(ch)) {
925 parser->buffer[parser->idx] = 0;
926 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400927 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200928 parser->cont = true;
929 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400930 } else {
931 ret = -EINVAL;
932 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200933 }
934
935 *ppos += read;
936 ret = read;
937
938out:
939 return ret;
940}
941
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200942static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200943{
944 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200945
946 if (s->len <= s->readpos)
947 return -EBUSY;
948
949 len = s->len - s->readpos;
950 if (cnt > len)
951 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300952 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200953
Steven Rostedte74da522009-03-04 20:31:11 -0500954 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200955 return cnt;
956}
957
Tim Bird0e950172010-02-25 15:36:43 -0800958unsigned long __read_mostly tracing_thresh;
959
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400960#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400961/*
962 * Copy the new maximum trace into the separate maximum-trace
963 * structure. (this way the maximum trace is permanently saved,
964 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
965 */
966static void
967__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
968{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500969 struct trace_buffer *trace_buf = &tr->trace_buffer;
970 struct trace_buffer *max_buf = &tr->max_buffer;
971 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
972 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400973
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500974 max_buf->cpu = cpu;
975 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400976
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500977 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400978 max_data->critical_start = data->critical_start;
979 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400980
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300981 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400982 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400983 /*
984 * If tsk == current, then use current_uid(), as that does not use
985 * RCU. The irq tracer can be called out of RCU scope.
986 */
987 if (tsk == current)
988 max_data->uid = current_uid();
989 else
990 max_data->uid = task_uid(tsk);
991
Steven Rostedt8248ac02009-09-02 12:27:41 -0400992 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
993 max_data->policy = tsk->policy;
994 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400995
996 /* record this tasks comm */
997 tracing_record_cmdline(tsk);
998}
999
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001000/**
1001 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1002 * @tr: tracer
1003 * @tsk: the task with the latency
1004 * @cpu: The cpu that initiated the trace.
1005 *
1006 * Flip the buffers between the @tr and the max_tr and record information
1007 * about which task was the cause of this latency.
1008 */
Ingo Molnare309b412008-05-12 21:20:51 +02001009void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001010update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1011{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001012 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001013
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001014 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001015 return;
1016
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001017 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001018
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001019 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001020 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001021 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001022 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001023 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001024
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001025 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001026
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001027 buf = tr->trace_buffer.buffer;
1028 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1029 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001030
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001031 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001032 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001033}
1034
1035/**
1036 * update_max_tr_single - only copy one trace over, and reset the rest
1037 * @tr - tracer
1038 * @tsk - task with the latency
1039 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001040 *
1041 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001042 */
Ingo Molnare309b412008-05-12 21:20:51 +02001043void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001044update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1045{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001046 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001047
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001048 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001049 return;
1050
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001051 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001052 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001053 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001054 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001055 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001056 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001057
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001058 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001059
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001060 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001061
Steven Rostedte8165db2009-09-03 19:13:05 -04001062 if (ret == -EBUSY) {
1063 /*
1064 * We failed to swap the buffer due to a commit taking
1065 * place on this CPU. We fail to record, but we reset
1066 * the max trace buffer (no one writes directly to it)
1067 * and flag that it failed.
1068 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001069 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001070 "Failed to swap buffers due to commit in progress\n");
1071 }
1072
Steven Rostedte8165db2009-09-03 19:13:05 -04001073 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001074
1075 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001076 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001077}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001078#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001079
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001080static int wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001081{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001082 /* Iterators are static, they should be filled or empty */
1083 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001084 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001085
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001086 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001087}
1088
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001089#ifdef CONFIG_FTRACE_STARTUP_TEST
1090static int run_tracer_selftest(struct tracer *type)
1091{
1092 struct trace_array *tr = &global_trace;
1093 struct tracer *saved_tracer = tr->current_trace;
1094 int ret;
1095
1096 if (!type->selftest || tracing_selftest_disabled)
1097 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001098
1099 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001100 * Run a selftest on this tracer.
1101 * Here we reset the trace buffer, and set the current
1102 * tracer to be this tracer. The tracer can then run some
1103 * internal tracing to verify that everything is in order.
1104 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001105 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001106 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001107
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001108 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001109
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001110#ifdef CONFIG_TRACER_MAX_TRACE
1111 if (type->use_max_tr) {
1112 /* If we expanded the buffers, make sure the max is expanded too */
1113 if (ring_buffer_expanded)
1114 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1115 RING_BUFFER_ALL_CPUS);
1116 tr->allocated_snapshot = true;
1117 }
1118#endif
1119
1120 /* the test is responsible for initializing and enabling */
1121 pr_info("Testing tracer %s: ", type->name);
1122 ret = type->selftest(type, tr);
1123 /* the test is responsible for resetting too */
1124 tr->current_trace = saved_tracer;
1125 if (ret) {
1126 printk(KERN_CONT "FAILED!\n");
1127 /* Add the warning after printing 'FAILED' */
1128 WARN_ON(1);
1129 return -1;
1130 }
1131 /* Only reset on passing, to avoid touching corrupted buffers */
1132 tracing_reset_online_cpus(&tr->trace_buffer);
1133
1134#ifdef CONFIG_TRACER_MAX_TRACE
1135 if (type->use_max_tr) {
1136 tr->allocated_snapshot = false;
1137
1138 /* Shrink the max buffer again */
1139 if (ring_buffer_expanded)
1140 ring_buffer_resize(tr->max_buffer.buffer, 1,
1141 RING_BUFFER_ALL_CPUS);
1142 }
1143#endif
1144
1145 printk(KERN_CONT "PASSED\n");
1146 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001147}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001148#else
1149static inline int run_tracer_selftest(struct tracer *type)
1150{
1151 return 0;
1152}
1153#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001154
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001155/**
1156 * register_tracer - register a tracer with the ftrace system.
1157 * @type - the plugin for the tracer
1158 *
1159 * Register a new plugin tracer.
1160 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001161int register_tracer(struct tracer *type)
1162{
1163 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001164 int ret = 0;
1165
1166 if (!type->name) {
1167 pr_info("Tracer must have a name\n");
1168 return -1;
1169 }
1170
Dan Carpenter24a461d2010-07-10 12:06:44 +02001171 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001172 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1173 return -1;
1174 }
1175
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001176 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001177
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001178 tracing_selftest_running = true;
1179
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001180 for (t = trace_types; t; t = t->next) {
1181 if (strcmp(type->name, t->name) == 0) {
1182 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001183 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001184 type->name);
1185 ret = -1;
1186 goto out;
1187 }
1188 }
1189
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001190 if (!type->set_flag)
1191 type->set_flag = &dummy_set_flag;
1192 if (!type->flags)
1193 type->flags = &dummy_tracer_flags;
1194 else
1195 if (!type->flags->opts)
1196 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001197
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001198 ret = run_tracer_selftest(type);
1199 if (ret < 0)
1200 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001201
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001202 type->next = trace_types;
1203 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001204
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001205 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001206 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001207 mutex_unlock(&trace_types_lock);
1208
Steven Rostedtdac74942009-02-05 01:13:38 -05001209 if (ret || !default_bootup_tracer)
1210 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001211
Li Zefanee6c2c12009-09-18 14:06:47 +08001212 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001213 goto out_unlock;
1214
1215 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1216 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001217 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001218 default_bootup_tracer = NULL;
1219 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001220 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001221#ifdef CONFIG_FTRACE_STARTUP_TEST
1222 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1223 type->name);
1224#endif
1225
1226 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001227 return ret;
1228}
1229
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001230void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001231{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001232 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001233
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001234 if (!buffer)
1235 return;
1236
Steven Rostedtf6339032009-09-04 12:35:16 -04001237 ring_buffer_record_disable(buffer);
1238
1239 /* Make sure all commits have finished */
1240 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001241 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001242
1243 ring_buffer_record_enable(buffer);
1244}
1245
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001246void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001247{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001248 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001249 int cpu;
1250
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001251 if (!buffer)
1252 return;
1253
Steven Rostedt621968c2009-09-04 12:02:35 -04001254 ring_buffer_record_disable(buffer);
1255
1256 /* Make sure all commits have finished */
1257 synchronize_sched();
1258
Alexander Z Lam94571582013-08-02 18:36:16 -07001259 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001260
1261 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001262 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001263
1264 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001265}
1266
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001267/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001268void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001269{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001270 struct trace_array *tr;
1271
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001272 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001273 tracing_reset_online_cpus(&tr->trace_buffer);
1274#ifdef CONFIG_TRACER_MAX_TRACE
1275 tracing_reset_online_cpus(&tr->max_buffer);
1276#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001277 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001278}
1279
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001280#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001281#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001282static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001283struct saved_cmdlines_buffer {
1284 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1285 unsigned *map_cmdline_to_pid;
1286 unsigned cmdline_num;
1287 int cmdline_idx;
1288 char *saved_cmdlines;
1289};
1290static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001291
Steven Rostedt25b0b442008-05-12 21:21:00 +02001292/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001293static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001294
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001295static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001296{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001297 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1298}
1299
1300static inline void set_cmdline(int idx, const char *cmdline)
1301{
1302 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1303}
1304
1305static int allocate_cmdlines_buffer(unsigned int val,
1306 struct saved_cmdlines_buffer *s)
1307{
1308 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1309 GFP_KERNEL);
1310 if (!s->map_cmdline_to_pid)
1311 return -ENOMEM;
1312
1313 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1314 if (!s->saved_cmdlines) {
1315 kfree(s->map_cmdline_to_pid);
1316 return -ENOMEM;
1317 }
1318
1319 s->cmdline_idx = 0;
1320 s->cmdline_num = val;
1321 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1322 sizeof(s->map_pid_to_cmdline));
1323 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1324 val * sizeof(*s->map_cmdline_to_pid));
1325
1326 return 0;
1327}
1328
1329static int trace_create_savedcmd(void)
1330{
1331 int ret;
1332
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001333 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001334 if (!savedcmd)
1335 return -ENOMEM;
1336
1337 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1338 if (ret < 0) {
1339 kfree(savedcmd);
1340 savedcmd = NULL;
1341 return -ENOMEM;
1342 }
1343
1344 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001345}
1346
Carsten Emdeb5130b12009-09-13 01:43:07 +02001347int is_tracing_stopped(void)
1348{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001349 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001350}
1351
Steven Rostedt0f048702008-11-05 16:05:44 -05001352/**
1353 * tracing_start - quick start of the tracer
1354 *
1355 * If tracing is enabled but was stopped by tracing_stop,
1356 * this will start the tracer back up.
1357 */
1358void tracing_start(void)
1359{
1360 struct ring_buffer *buffer;
1361 unsigned long flags;
1362
1363 if (tracing_disabled)
1364 return;
1365
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001366 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1367 if (--global_trace.stop_count) {
1368 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001369 /* Someone screwed up their debugging */
1370 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001371 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001372 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001373 goto out;
1374 }
1375
Steven Rostedta2f80712010-03-12 19:56:00 -05001376 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001377 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001378
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001379 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001380 if (buffer)
1381 ring_buffer_record_enable(buffer);
1382
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001383#ifdef CONFIG_TRACER_MAX_TRACE
1384 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001385 if (buffer)
1386 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001387#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001388
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001389 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001390
Steven Rostedt0f048702008-11-05 16:05:44 -05001391 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001392 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1393}
1394
1395static void tracing_start_tr(struct trace_array *tr)
1396{
1397 struct ring_buffer *buffer;
1398 unsigned long flags;
1399
1400 if (tracing_disabled)
1401 return;
1402
1403 /* If global, we need to also start the max tracer */
1404 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1405 return tracing_start();
1406
1407 raw_spin_lock_irqsave(&tr->start_lock, flags);
1408
1409 if (--tr->stop_count) {
1410 if (tr->stop_count < 0) {
1411 /* Someone screwed up their debugging */
1412 WARN_ON_ONCE(1);
1413 tr->stop_count = 0;
1414 }
1415 goto out;
1416 }
1417
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001418 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001419 if (buffer)
1420 ring_buffer_record_enable(buffer);
1421
1422 out:
1423 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001424}
1425
1426/**
1427 * tracing_stop - quick stop of the tracer
1428 *
1429 * Light weight way to stop tracing. Use in conjunction with
1430 * tracing_start.
1431 */
1432void tracing_stop(void)
1433{
1434 struct ring_buffer *buffer;
1435 unsigned long flags;
1436
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001437 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1438 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001439 goto out;
1440
Steven Rostedta2f80712010-03-12 19:56:00 -05001441 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001442 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001443
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001444 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001445 if (buffer)
1446 ring_buffer_record_disable(buffer);
1447
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001448#ifdef CONFIG_TRACER_MAX_TRACE
1449 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001450 if (buffer)
1451 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001452#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001453
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001454 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001455
Steven Rostedt0f048702008-11-05 16:05:44 -05001456 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001457 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1458}
1459
1460static void tracing_stop_tr(struct trace_array *tr)
1461{
1462 struct ring_buffer *buffer;
1463 unsigned long flags;
1464
1465 /* If global, we need to also stop the max tracer */
1466 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1467 return tracing_stop();
1468
1469 raw_spin_lock_irqsave(&tr->start_lock, flags);
1470 if (tr->stop_count++)
1471 goto out;
1472
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001473 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001474 if (buffer)
1475 ring_buffer_record_disable(buffer);
1476
1477 out:
1478 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001479}
1480
Ingo Molnare309b412008-05-12 21:20:51 +02001481void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001482
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001483static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001484{
Carsten Emdea635cf02009-03-18 09:00:41 +01001485 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001486
1487 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001488 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001489
1490 /*
1491 * It's not the end of the world if we don't get
1492 * the lock, but we also don't want to spin
1493 * nor do we want to disable interrupts,
1494 * so if we miss here, then better luck next time.
1495 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001496 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001497 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001498
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001499 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001500 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001501 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001502
Carsten Emdea635cf02009-03-18 09:00:41 +01001503 /*
1504 * Check whether the cmdline buffer at idx has a pid
1505 * mapped. We are going to overwrite that entry so we
1506 * need to clear the map_pid_to_cmdline. Otherwise we
1507 * would read the new comm for the old pid.
1508 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001509 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001510 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001511 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001512
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001513 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1514 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001515
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001516 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001517 }
1518
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001519 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001520
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001521 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001522
1523 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001524}
1525
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001526static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001527{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528 unsigned map;
1529
Steven Rostedt4ca53082009-03-16 19:20:15 -04001530 if (!pid) {
1531 strcpy(comm, "<idle>");
1532 return;
1533 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001534
Steven Rostedt74bf4072010-01-25 15:11:53 -05001535 if (WARN_ON_ONCE(pid < 0)) {
1536 strcpy(comm, "<XXX>");
1537 return;
1538 }
1539
Steven Rostedt4ca53082009-03-16 19:20:15 -04001540 if (pid > PID_MAX_DEFAULT) {
1541 strcpy(comm, "<...>");
1542 return;
1543 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001544
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001545 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001546 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001547 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001548 else
1549 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001550}
1551
1552void trace_find_cmdline(int pid, char comm[])
1553{
1554 preempt_disable();
1555 arch_spin_lock(&trace_cmdline_lock);
1556
1557 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001558
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001559 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001560 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001561}
1562
Ingo Molnare309b412008-05-12 21:20:51 +02001563void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001564{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001565 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001566 return;
1567
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001568 if (!__this_cpu_read(trace_cmdline_save))
1569 return;
1570
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001571 if (trace_save_cmdline(tsk))
1572 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001573}
1574
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001575void
Steven Rostedt38697052008-10-01 13:14:09 -04001576tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1577 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001578{
1579 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580
Steven Rostedt777e2082008-09-29 23:02:42 -04001581 entry->preempt_count = pc & 0xff;
1582 entry->pid = (tsk) ? tsk->pid : 0;
1583 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001584#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001585 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001586#else
1587 TRACE_FLAG_IRQS_NOSUPPORT |
1588#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001589 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1590 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001591 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1592 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001593}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001594EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001595
Steven Rostedte77405a2009-09-02 14:17:06 -04001596struct ring_buffer_event *
1597trace_buffer_lock_reserve(struct ring_buffer *buffer,
1598 int type,
1599 unsigned long len,
1600 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001601{
1602 struct ring_buffer_event *event;
1603
Steven Rostedte77405a2009-09-02 14:17:06 -04001604 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001605 if (event != NULL) {
1606 struct trace_entry *ent = ring_buffer_event_data(event);
1607
1608 tracing_generic_entry_update(ent, flags, pc);
1609 ent->type = type;
1610 }
1611
1612 return event;
1613}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001614
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001615void
1616__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1617{
1618 __this_cpu_write(trace_cmdline_save, true);
1619 ring_buffer_unlock_commit(buffer, event);
1620}
1621
Steven Rostedte77405a2009-09-02 14:17:06 -04001622static inline void
1623__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1624 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001625 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001626{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001627 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001628
Steven Rostedte77405a2009-09-02 14:17:06 -04001629 ftrace_trace_stack(buffer, flags, 6, pc);
1630 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001631}
1632
Steven Rostedte77405a2009-09-02 14:17:06 -04001633void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1634 struct ring_buffer_event *event,
1635 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001636{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001637 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001638}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001639EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001640
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001641static struct ring_buffer *temp_buffer;
1642
Steven Rostedtef5580d2009-02-27 19:38:04 -05001643struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001644trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1645 struct ftrace_event_file *ftrace_file,
1646 int type, unsigned long len,
1647 unsigned long flags, int pc)
1648{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001649 struct ring_buffer_event *entry;
1650
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001651 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001652 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001653 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001654 /*
1655 * If tracing is off, but we have triggers enabled
1656 * we still need to look at the event data. Use the temp_buffer
1657 * to store the trace event for the tigger to use. It's recusive
1658 * safe and will not be recorded anywhere.
1659 */
1660 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1661 *current_rb = temp_buffer;
1662 entry = trace_buffer_lock_reserve(*current_rb,
1663 type, len, flags, pc);
1664 }
1665 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001666}
1667EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1668
1669struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001670trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1671 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001672 unsigned long flags, int pc)
1673{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001674 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001675 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001676 type, len, flags, pc);
1677}
Steven Rostedt94487d62009-05-05 19:22:53 -04001678EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001679
Steven Rostedte77405a2009-09-02 14:17:06 -04001680void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1681 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001682 unsigned long flags, int pc)
1683{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001684 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001685}
Steven Rostedt94487d62009-05-05 19:22:53 -04001686EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001687
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001688void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1689 struct ring_buffer_event *event,
1690 unsigned long flags, int pc,
1691 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001692{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001693 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001694
1695 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1696 ftrace_trace_userstack(buffer, flags, pc);
1697}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001698EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001699
Steven Rostedte77405a2009-09-02 14:17:06 -04001700void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1701 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001702{
Steven Rostedte77405a2009-09-02 14:17:06 -04001703 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001704}
Steven Rostedt12acd472009-04-17 16:01:56 -04001705EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001706
Ingo Molnare309b412008-05-12 21:20:51 +02001707void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001708trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001709 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1710 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001711{
Tom Zanussie1112b42009-03-31 00:48:49 -05001712 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001713 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001714 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001715 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001716
Steven Rostedtd7690412008-10-01 00:29:53 -04001717 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001718 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001719 return;
1720
Steven Rostedte77405a2009-09-02 14:17:06 -04001721 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001722 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001723 if (!event)
1724 return;
1725 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001726 entry->ip = ip;
1727 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001728
Tom Zanussif306cc82013-10-24 08:34:17 -05001729 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001730 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001731}
1732
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001733#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001734
1735#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1736struct ftrace_stack {
1737 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1738};
1739
1740static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1741static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1742
Steven Rostedte77405a2009-09-02 14:17:06 -04001743static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001744 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001745 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001746{
Tom Zanussie1112b42009-03-31 00:48:49 -05001747 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001748 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001749 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001750 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001751 int use_stack;
1752 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001753
1754 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001755 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001756
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001757 /*
1758 * Since events can happen in NMIs there's no safe way to
1759 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1760 * or NMI comes in, it will just have to use the default
1761 * FTRACE_STACK_SIZE.
1762 */
1763 preempt_disable_notrace();
1764
Shan Wei82146522012-11-19 13:21:01 +08001765 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001766 /*
1767 * We don't need any atomic variables, just a barrier.
1768 * If an interrupt comes in, we don't care, because it would
1769 * have exited and put the counter back to what we want.
1770 * We just need a barrier to keep gcc from moving things
1771 * around.
1772 */
1773 barrier();
1774 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001775 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001776 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1777
1778 if (regs)
1779 save_stack_trace_regs(regs, &trace);
1780 else
1781 save_stack_trace(&trace);
1782
1783 if (trace.nr_entries > size)
1784 size = trace.nr_entries;
1785 } else
1786 /* From now on, use_stack is a boolean */
1787 use_stack = 0;
1788
1789 size *= sizeof(unsigned long);
1790
1791 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1792 sizeof(*entry) + size, flags, pc);
1793 if (!event)
1794 goto out;
1795 entry = ring_buffer_event_data(event);
1796
1797 memset(&entry->caller, 0, size);
1798
1799 if (use_stack)
1800 memcpy(&entry->caller, trace.entries,
1801 trace.nr_entries * sizeof(unsigned long));
1802 else {
1803 trace.max_entries = FTRACE_STACK_ENTRIES;
1804 trace.entries = entry->caller;
1805 if (regs)
1806 save_stack_trace_regs(regs, &trace);
1807 else
1808 save_stack_trace(&trace);
1809 }
1810
1811 entry->size = trace.nr_entries;
1812
Tom Zanussif306cc82013-10-24 08:34:17 -05001813 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001814 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001815
1816 out:
1817 /* Again, don't let gcc optimize things here */
1818 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001819 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001820 preempt_enable_notrace();
1821
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001822}
1823
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001824void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1825 int skip, int pc, struct pt_regs *regs)
1826{
1827 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1828 return;
1829
1830 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1831}
1832
Steven Rostedte77405a2009-09-02 14:17:06 -04001833void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1834 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001835{
1836 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1837 return;
1838
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001839 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001840}
1841
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001842void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1843 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001844{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001845 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001846}
1847
Steven Rostedt03889382009-12-11 09:48:22 -05001848/**
1849 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001850 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001851 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001852void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001853{
1854 unsigned long flags;
1855
1856 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001857 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001858
1859 local_save_flags(flags);
1860
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001861 /*
1862 * Skip 3 more, seems to get us at the caller of
1863 * this function.
1864 */
1865 skip += 3;
1866 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1867 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001868}
1869
Steven Rostedt91e86e52010-11-10 12:56:12 +01001870static DEFINE_PER_CPU(int, user_stack_count);
1871
Steven Rostedte77405a2009-09-02 14:17:06 -04001872void
1873ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001874{
Tom Zanussie1112b42009-03-31 00:48:49 -05001875 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001876 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001877 struct userstack_entry *entry;
1878 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001879
1880 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1881 return;
1882
Steven Rostedtb6345872010-03-12 20:03:30 -05001883 /*
1884 * NMIs can not handle page faults, even with fix ups.
1885 * The save user stack can (and often does) fault.
1886 */
1887 if (unlikely(in_nmi()))
1888 return;
1889
Steven Rostedt91e86e52010-11-10 12:56:12 +01001890 /*
1891 * prevent recursion, since the user stack tracing may
1892 * trigger other kernel events.
1893 */
1894 preempt_disable();
1895 if (__this_cpu_read(user_stack_count))
1896 goto out;
1897
1898 __this_cpu_inc(user_stack_count);
1899
Steven Rostedte77405a2009-09-02 14:17:06 -04001900 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001901 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001902 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001903 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001904 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001905
Steven Rostedt48659d32009-09-11 11:36:23 -04001906 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001907 memset(&entry->caller, 0, sizeof(entry->caller));
1908
1909 trace.nr_entries = 0;
1910 trace.max_entries = FTRACE_STACK_ENTRIES;
1911 trace.skip = 0;
1912 trace.entries = entry->caller;
1913
1914 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001915 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001916 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001917
Li Zefan1dbd1952010-12-09 15:47:56 +08001918 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001919 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001920 out:
1921 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001922}
1923
Hannes Eder4fd27352009-02-10 19:44:12 +01001924#ifdef UNUSED
1925static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001926{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001927 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001928}
Hannes Eder4fd27352009-02-10 19:44:12 +01001929#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001930
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001931#endif /* CONFIG_STACKTRACE */
1932
Steven Rostedt07d777f2011-09-22 14:01:55 -04001933/* created for use with alloc_percpu */
1934struct trace_buffer_struct {
1935 char buffer[TRACE_BUF_SIZE];
1936};
1937
1938static struct trace_buffer_struct *trace_percpu_buffer;
1939static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1940static struct trace_buffer_struct *trace_percpu_irq_buffer;
1941static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1942
1943/*
1944 * The buffer used is dependent on the context. There is a per cpu
1945 * buffer for normal context, softirq contex, hard irq context and
1946 * for NMI context. Thise allows for lockless recording.
1947 *
1948 * Note, if the buffers failed to be allocated, then this returns NULL
1949 */
1950static char *get_trace_buf(void)
1951{
1952 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001953
1954 /*
1955 * If we have allocated per cpu buffers, then we do not
1956 * need to do any locking.
1957 */
1958 if (in_nmi())
1959 percpu_buffer = trace_percpu_nmi_buffer;
1960 else if (in_irq())
1961 percpu_buffer = trace_percpu_irq_buffer;
1962 else if (in_softirq())
1963 percpu_buffer = trace_percpu_sirq_buffer;
1964 else
1965 percpu_buffer = trace_percpu_buffer;
1966
1967 if (!percpu_buffer)
1968 return NULL;
1969
Shan Weid8a03492012-11-13 09:53:04 +08001970 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001971}
1972
1973static int alloc_percpu_trace_buffer(void)
1974{
1975 struct trace_buffer_struct *buffers;
1976 struct trace_buffer_struct *sirq_buffers;
1977 struct trace_buffer_struct *irq_buffers;
1978 struct trace_buffer_struct *nmi_buffers;
1979
1980 buffers = alloc_percpu(struct trace_buffer_struct);
1981 if (!buffers)
1982 goto err_warn;
1983
1984 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1985 if (!sirq_buffers)
1986 goto err_sirq;
1987
1988 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1989 if (!irq_buffers)
1990 goto err_irq;
1991
1992 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1993 if (!nmi_buffers)
1994 goto err_nmi;
1995
1996 trace_percpu_buffer = buffers;
1997 trace_percpu_sirq_buffer = sirq_buffers;
1998 trace_percpu_irq_buffer = irq_buffers;
1999 trace_percpu_nmi_buffer = nmi_buffers;
2000
2001 return 0;
2002
2003 err_nmi:
2004 free_percpu(irq_buffers);
2005 err_irq:
2006 free_percpu(sirq_buffers);
2007 err_sirq:
2008 free_percpu(buffers);
2009 err_warn:
2010 WARN(1, "Could not allocate percpu trace_printk buffer");
2011 return -ENOMEM;
2012}
2013
Steven Rostedt81698832012-10-11 10:15:05 -04002014static int buffers_allocated;
2015
Steven Rostedt07d777f2011-09-22 14:01:55 -04002016void trace_printk_init_buffers(void)
2017{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002018 if (buffers_allocated)
2019 return;
2020
2021 if (alloc_percpu_trace_buffer())
2022 return;
2023
Steven Rostedt2184db42014-05-28 13:14:40 -04002024 /* trace_printk() is for debug use only. Don't use it in production. */
2025
2026 pr_warning("\n**********************************************************\n");
2027 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2028 pr_warning("** **\n");
2029 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2030 pr_warning("** **\n");
2031 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2032 pr_warning("** unsafe for produciton use. **\n");
2033 pr_warning("** **\n");
2034 pr_warning("** If you see this message and you are not debugging **\n");
2035 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2036 pr_warning("** **\n");
2037 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2038 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002039
Steven Rostedtb382ede62012-10-10 21:44:34 -04002040 /* Expand the buffers to set size */
2041 tracing_update_buffers();
2042
Steven Rostedt07d777f2011-09-22 14:01:55 -04002043 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002044
2045 /*
2046 * trace_printk_init_buffers() can be called by modules.
2047 * If that happens, then we need to start cmdline recording
2048 * directly here. If the global_trace.buffer is already
2049 * allocated here, then this was called by module code.
2050 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002051 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002052 tracing_start_cmdline_record();
2053}
2054
2055void trace_printk_start_comm(void)
2056{
2057 /* Start tracing comms if trace printk is set */
2058 if (!buffers_allocated)
2059 return;
2060 tracing_start_cmdline_record();
2061}
2062
2063static void trace_printk_start_stop_comm(int enabled)
2064{
2065 if (!buffers_allocated)
2066 return;
2067
2068 if (enabled)
2069 tracing_start_cmdline_record();
2070 else
2071 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002072}
2073
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002074/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002075 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002076 *
2077 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002078int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002079{
Tom Zanussie1112b42009-03-31 00:48:49 -05002080 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002081 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002082 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002083 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002084 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002085 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002086 char *tbuffer;
2087 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002088
2089 if (unlikely(tracing_selftest_running || tracing_disabled))
2090 return 0;
2091
2092 /* Don't pollute graph traces with trace_vprintk internals */
2093 pause_graph_tracing();
2094
2095 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002096 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002097
Steven Rostedt07d777f2011-09-22 14:01:55 -04002098 tbuffer = get_trace_buf();
2099 if (!tbuffer) {
2100 len = 0;
2101 goto out;
2102 }
2103
2104 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2105
2106 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002107 goto out;
2108
Steven Rostedt07d777f2011-09-22 14:01:55 -04002109 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002110 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002111 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002112 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2113 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002114 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002115 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002116 entry = ring_buffer_event_data(event);
2117 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002118 entry->fmt = fmt;
2119
Steven Rostedt07d777f2011-09-22 14:01:55 -04002120 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002121 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002122 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002123 ftrace_trace_stack(buffer, flags, 6, pc);
2124 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002125
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002126out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002127 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002128 unpause_graph_tracing();
2129
2130 return len;
2131}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002132EXPORT_SYMBOL_GPL(trace_vbprintk);
2133
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002134static int
2135__trace_array_vprintk(struct ring_buffer *buffer,
2136 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002137{
Tom Zanussie1112b42009-03-31 00:48:49 -05002138 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002139 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002140 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002141 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002142 unsigned long flags;
2143 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002144
2145 if (tracing_disabled || tracing_selftest_running)
2146 return 0;
2147
Steven Rostedt07d777f2011-09-22 14:01:55 -04002148 /* Don't pollute graph traces with trace_vprintk internals */
2149 pause_graph_tracing();
2150
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002151 pc = preempt_count();
2152 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002153
Steven Rostedt07d777f2011-09-22 14:01:55 -04002154
2155 tbuffer = get_trace_buf();
2156 if (!tbuffer) {
2157 len = 0;
2158 goto out;
2159 }
2160
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002161 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002162
Steven Rostedt07d777f2011-09-22 14:01:55 -04002163 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002164 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002165 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002166 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002167 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002168 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002169 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002170 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002171
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002172 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002173 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002174 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002175 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002176 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002177 out:
2178 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002179 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002180
2181 return len;
2182}
Steven Rostedt659372d2009-09-03 19:11:07 -04002183
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002184int trace_array_vprintk(struct trace_array *tr,
2185 unsigned long ip, const char *fmt, va_list args)
2186{
2187 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2188}
2189
2190int trace_array_printk(struct trace_array *tr,
2191 unsigned long ip, const char *fmt, ...)
2192{
2193 int ret;
2194 va_list ap;
2195
2196 if (!(trace_flags & TRACE_ITER_PRINTK))
2197 return 0;
2198
2199 va_start(ap, fmt);
2200 ret = trace_array_vprintk(tr, ip, fmt, ap);
2201 va_end(ap);
2202 return ret;
2203}
2204
2205int trace_array_printk_buf(struct ring_buffer *buffer,
2206 unsigned long ip, const char *fmt, ...)
2207{
2208 int ret;
2209 va_list ap;
2210
2211 if (!(trace_flags & TRACE_ITER_PRINTK))
2212 return 0;
2213
2214 va_start(ap, fmt);
2215 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2216 va_end(ap);
2217 return ret;
2218}
2219
Steven Rostedt659372d2009-09-03 19:11:07 -04002220int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2221{
Steven Rostedta813a152009-10-09 01:41:35 -04002222 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002223}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002224EXPORT_SYMBOL_GPL(trace_vprintk);
2225
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002226static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002227{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002228 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2229
Steven Rostedt5a90f572008-09-03 17:42:51 -04002230 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002231 if (buf_iter)
2232 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002233}
2234
Ingo Molnare309b412008-05-12 21:20:51 +02002235static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002236peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2237 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002238{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002239 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002240 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002241
Steven Rostedtd7690412008-10-01 00:29:53 -04002242 if (buf_iter)
2243 event = ring_buffer_iter_peek(buf_iter, ts);
2244 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002245 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002246 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002247
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002248 if (event) {
2249 iter->ent_size = ring_buffer_event_length(event);
2250 return ring_buffer_event_data(event);
2251 }
2252 iter->ent_size = 0;
2253 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002254}
Steven Rostedtd7690412008-10-01 00:29:53 -04002255
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002256static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002257__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2258 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002259{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002260 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002261 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002262 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002263 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002264 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002265 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002266 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002267 int cpu;
2268
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002269 /*
2270 * If we are in a per_cpu trace file, don't bother by iterating over
2271 * all cpu and peek directly.
2272 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002273 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002274 if (ring_buffer_empty_cpu(buffer, cpu_file))
2275 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002276 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002277 if (ent_cpu)
2278 *ent_cpu = cpu_file;
2279
2280 return ent;
2281 }
2282
Steven Rostedtab464282008-05-12 21:21:00 +02002283 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002284
2285 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002286 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002287
Steven Rostedtbc21b472010-03-31 19:49:26 -04002288 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002289
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002290 /*
2291 * Pick the entry with the smallest timestamp:
2292 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002293 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002294 next = ent;
2295 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002296 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002297 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002298 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002299 }
2300 }
2301
Steven Rostedt12b5da32012-03-27 10:43:28 -04002302 iter->ent_size = next_size;
2303
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002304 if (ent_cpu)
2305 *ent_cpu = next_cpu;
2306
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002307 if (ent_ts)
2308 *ent_ts = next_ts;
2309
Steven Rostedtbc21b472010-03-31 19:49:26 -04002310 if (missing_events)
2311 *missing_events = next_lost;
2312
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002313 return next;
2314}
2315
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002316/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002317struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2318 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002319{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002320 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002321}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002322
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002323/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002324void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002325{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002326 iter->ent = __find_next_entry(iter, &iter->cpu,
2327 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002328
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002329 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002330 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002331
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002332 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002333}
2334
Ingo Molnare309b412008-05-12 21:20:51 +02002335static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002336{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002337 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002338 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002339}
2340
Ingo Molnare309b412008-05-12 21:20:51 +02002341static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002342{
2343 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002344 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002345 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002346
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002347 WARN_ON_ONCE(iter->leftover);
2348
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002349 (*pos)++;
2350
2351 /* can't go backwards */
2352 if (iter->idx > i)
2353 return NULL;
2354
2355 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002356 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002357 else
2358 ent = iter;
2359
2360 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002361 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002362
2363 iter->pos = *pos;
2364
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002365 return ent;
2366}
2367
Jason Wessel955b61e2010-08-05 09:22:23 -05002368void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002369{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002370 struct ring_buffer_event *event;
2371 struct ring_buffer_iter *buf_iter;
2372 unsigned long entries = 0;
2373 u64 ts;
2374
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002375 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002376
Steven Rostedt6d158a82012-06-27 20:46:14 -04002377 buf_iter = trace_buffer_iter(iter, cpu);
2378 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002379 return;
2380
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002381 ring_buffer_iter_reset(buf_iter);
2382
2383 /*
2384 * We could have the case with the max latency tracers
2385 * that a reset never took place on a cpu. This is evident
2386 * by the timestamp being before the start of the buffer.
2387 */
2388 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002389 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002390 break;
2391 entries++;
2392 ring_buffer_read(buf_iter, NULL);
2393 }
2394
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002395 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002396}
2397
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002398/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002399 * The current tracer is copied to avoid a global locking
2400 * all around.
2401 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002402static void *s_start(struct seq_file *m, loff_t *pos)
2403{
2404 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002405 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002406 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002407 void *p = NULL;
2408 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002409 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002410
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002411 /*
2412 * copy the tracer to avoid using a global lock all around.
2413 * iter->trace is a copy of current_trace, the pointer to the
2414 * name may be used instead of a strcmp(), as iter->trace->name
2415 * will point to the same string as current_trace->name.
2416 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002417 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002418 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2419 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002420 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002421
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002422#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002423 if (iter->snapshot && iter->trace->use_max_tr)
2424 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002425#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002426
2427 if (!iter->snapshot)
2428 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002429
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002430 if (*pos != iter->pos) {
2431 iter->ent = NULL;
2432 iter->cpu = 0;
2433 iter->idx = -1;
2434
Steven Rostedtae3b5092013-01-23 15:22:59 -05002435 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002436 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002437 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002438 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002439 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002440
Lai Jiangshanac91d852010-03-02 17:54:50 +08002441 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002442 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2443 ;
2444
2445 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002446 /*
2447 * If we overflowed the seq_file before, then we want
2448 * to just reuse the trace_seq buffer again.
2449 */
2450 if (iter->leftover)
2451 p = iter;
2452 else {
2453 l = *pos - 1;
2454 p = s_next(m, p, &l);
2455 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002456 }
2457
Lai Jiangshan4f535962009-05-18 19:35:34 +08002458 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002459 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002460 return p;
2461}
2462
2463static void s_stop(struct seq_file *m, void *p)
2464{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002465 struct trace_iterator *iter = m->private;
2466
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002467#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002468 if (iter->snapshot && iter->trace->use_max_tr)
2469 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002470#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002471
2472 if (!iter->snapshot)
2473 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002474
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002475 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002476 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002477}
2478
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002479static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002480get_total_entries(struct trace_buffer *buf,
2481 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002482{
2483 unsigned long count;
2484 int cpu;
2485
2486 *total = 0;
2487 *entries = 0;
2488
2489 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002490 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002491 /*
2492 * If this buffer has skipped entries, then we hold all
2493 * entries for the trace and we need to ignore the
2494 * ones before the time stamp.
2495 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002496 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2497 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002498 /* total is the same as the entries */
2499 *total += count;
2500 } else
2501 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002502 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002503 *entries += count;
2504 }
2505}
2506
Ingo Molnare309b412008-05-12 21:20:51 +02002507static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002508{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002509 seq_puts(m, "# _------=> CPU# \n"
2510 "# / _-----=> irqs-off \n"
2511 "# | / _----=> need-resched \n"
2512 "# || / _---=> hardirq/softirq \n"
2513 "# ||| / _--=> preempt-depth \n"
2514 "# |||| / delay \n"
2515 "# cmd pid ||||| time | caller \n"
2516 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002517}
2518
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002519static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002520{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002521 unsigned long total;
2522 unsigned long entries;
2523
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002524 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002525 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2526 entries, total, num_online_cpus());
2527 seq_puts(m, "#\n");
2528}
2529
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002530static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002531{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002532 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002533 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2534 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002535}
2536
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002537static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002538{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002539 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002540 seq_puts(m, "# _-----=> irqs-off\n"
2541 "# / _----=> need-resched\n"
2542 "# | / _---=> hardirq/softirq\n"
2543 "# || / _--=> preempt-depth\n"
2544 "# ||| / delay\n"
2545 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2546 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002547}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002548
Jiri Olsa62b915f2010-04-02 19:01:22 +02002549void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002550print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2551{
2552 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002553 struct trace_buffer *buf = iter->trace_buffer;
2554 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002555 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002556 unsigned long entries;
2557 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002558 const char *name = "preemption";
2559
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002560 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002561
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002562 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002563
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002564 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002565 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002566 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002567 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002568 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002569 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002570 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002571 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002572 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002573 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002574#if defined(CONFIG_PREEMPT_NONE)
2575 "server",
2576#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2577 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002578#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002579 "preempt",
2580#else
2581 "unknown",
2582#endif
2583 /* These are reserved for later use */
2584 0, 0, 0, 0);
2585#ifdef CONFIG_SMP
2586 seq_printf(m, " #P:%d)\n", num_online_cpus());
2587#else
2588 seq_puts(m, ")\n");
2589#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002590 seq_puts(m, "# -----------------\n");
2591 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002592 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002593 data->comm, data->pid,
2594 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002595 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002596 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002597
2598 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002599 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002600 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2601 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002602 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002603 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2604 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002605 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002606 }
2607
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002608 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002609}
2610
Steven Rostedta3097202008-11-07 22:36:02 -05002611static void test_cpu_buff_start(struct trace_iterator *iter)
2612{
2613 struct trace_seq *s = &iter->seq;
2614
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002615 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2616 return;
2617
2618 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2619 return;
2620
Rusty Russell44623442009-01-01 10:12:23 +10302621 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002622 return;
2623
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002624 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002625 return;
2626
Rusty Russell44623442009-01-01 10:12:23 +10302627 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002628
2629 /* Don't print started cpu buffer for the first entry of the trace */
2630 if (iter->idx > 1)
2631 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2632 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002633}
2634
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002635static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002636{
Steven Rostedt214023c2008-05-12 21:20:46 +02002637 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002638 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002639 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002640 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002641
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002642 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002643
Steven Rostedta3097202008-11-07 22:36:02 -05002644 test_cpu_buff_start(iter);
2645
Steven Rostedtf633cef2008-12-23 23:24:13 -05002646 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002647
2648 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002649 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2650 trace_print_lat_context(iter);
2651 else
2652 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002653 }
2654
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002655 if (trace_seq_has_overflowed(s))
2656 return TRACE_TYPE_PARTIAL_LINE;
2657
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002658 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002659 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002660
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002661 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002662
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002663 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002664}
2665
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002666static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002667{
2668 struct trace_seq *s = &iter->seq;
2669 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002670 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002671
2672 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002673
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002674 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2675 trace_seq_printf(s, "%d %d %llu ",
2676 entry->pid, iter->cpu, iter->ts);
2677
2678 if (trace_seq_has_overflowed(s))
2679 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002680
Steven Rostedtf633cef2008-12-23 23:24:13 -05002681 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002682 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002683 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002684
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002685 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002686
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002687 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002688}
2689
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002690static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002691{
2692 struct trace_seq *s = &iter->seq;
2693 unsigned char newline = '\n';
2694 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002695 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002696
2697 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002698
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002699 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002700 SEQ_PUT_HEX_FIELD(s, entry->pid);
2701 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2702 SEQ_PUT_HEX_FIELD(s, iter->ts);
2703 if (trace_seq_has_overflowed(s))
2704 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002705 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002706
Steven Rostedtf633cef2008-12-23 23:24:13 -05002707 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002708 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002709 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002710 if (ret != TRACE_TYPE_HANDLED)
2711 return ret;
2712 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002713
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002714 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002715
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002716 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002717}
2718
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002719static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002720{
2721 struct trace_seq *s = &iter->seq;
2722 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002723 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002724
2725 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002726
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002727 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002728 SEQ_PUT_FIELD(s, entry->pid);
2729 SEQ_PUT_FIELD(s, iter->cpu);
2730 SEQ_PUT_FIELD(s, iter->ts);
2731 if (trace_seq_has_overflowed(s))
2732 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002733 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002734
Steven Rostedtf633cef2008-12-23 23:24:13 -05002735 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002736 return event ? event->funcs->binary(iter, 0, event) :
2737 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002738}
2739
Jiri Olsa62b915f2010-04-02 19:01:22 +02002740int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002741{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002742 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002743 int cpu;
2744
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002745 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002746 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002747 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002748 buf_iter = trace_buffer_iter(iter, cpu);
2749 if (buf_iter) {
2750 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002751 return 0;
2752 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002753 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002754 return 0;
2755 }
2756 return 1;
2757 }
2758
Steven Rostedtab464282008-05-12 21:21:00 +02002759 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002760 buf_iter = trace_buffer_iter(iter, cpu);
2761 if (buf_iter) {
2762 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002763 return 0;
2764 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002765 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002766 return 0;
2767 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002768 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002769
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002770 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002771}
2772
Lai Jiangshan4f535962009-05-18 19:35:34 +08002773/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002774enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002775{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002776 enum print_line_t ret;
2777
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002778 if (iter->lost_events) {
2779 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2780 iter->cpu, iter->lost_events);
2781 if (trace_seq_has_overflowed(&iter->seq))
2782 return TRACE_TYPE_PARTIAL_LINE;
2783 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002784
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002785 if (iter->trace && iter->trace->print_line) {
2786 ret = iter->trace->print_line(iter);
2787 if (ret != TRACE_TYPE_UNHANDLED)
2788 return ret;
2789 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002790
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002791 if (iter->ent->type == TRACE_BPUTS &&
2792 trace_flags & TRACE_ITER_PRINTK &&
2793 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2794 return trace_print_bputs_msg_only(iter);
2795
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002796 if (iter->ent->type == TRACE_BPRINT &&
2797 trace_flags & TRACE_ITER_PRINTK &&
2798 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002799 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002800
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002801 if (iter->ent->type == TRACE_PRINT &&
2802 trace_flags & TRACE_ITER_PRINTK &&
2803 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002804 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002805
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002806 if (trace_flags & TRACE_ITER_BIN)
2807 return print_bin_fmt(iter);
2808
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002809 if (trace_flags & TRACE_ITER_HEX)
2810 return print_hex_fmt(iter);
2811
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002812 if (trace_flags & TRACE_ITER_RAW)
2813 return print_raw_fmt(iter);
2814
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002815 return print_trace_fmt(iter);
2816}
2817
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002818void trace_latency_header(struct seq_file *m)
2819{
2820 struct trace_iterator *iter = m->private;
2821
2822 /* print nothing if the buffers are empty */
2823 if (trace_empty(iter))
2824 return;
2825
2826 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2827 print_trace_header(m, iter);
2828
2829 if (!(trace_flags & TRACE_ITER_VERBOSE))
2830 print_lat_help_header(m);
2831}
2832
Jiri Olsa62b915f2010-04-02 19:01:22 +02002833void trace_default_header(struct seq_file *m)
2834{
2835 struct trace_iterator *iter = m->private;
2836
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002837 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2838 return;
2839
Jiri Olsa62b915f2010-04-02 19:01:22 +02002840 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2841 /* print nothing if the buffers are empty */
2842 if (trace_empty(iter))
2843 return;
2844 print_trace_header(m, iter);
2845 if (!(trace_flags & TRACE_ITER_VERBOSE))
2846 print_lat_help_header(m);
2847 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002848 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2849 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002850 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002851 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002852 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002853 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002854 }
2855}
2856
Steven Rostedte0a413f2011-09-29 21:26:16 -04002857static void test_ftrace_alive(struct seq_file *m)
2858{
2859 if (!ftrace_is_dead())
2860 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002861 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2862 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002863}
2864
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002865#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002866static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002867{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002868 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2869 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2870 "# Takes a snapshot of the main buffer.\n"
2871 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2872 "# (Doesn't have to be '2' works with any number that\n"
2873 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002874}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002875
2876static void show_snapshot_percpu_help(struct seq_file *m)
2877{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002878 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002879#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002880 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2881 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002882#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002883 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2884 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002885#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002886 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2887 "# (Doesn't have to be '2' works with any number that\n"
2888 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002889}
2890
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002891static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2892{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002893 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002894 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002895 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002896 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002897
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002898 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002899 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2900 show_snapshot_main_help(m);
2901 else
2902 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002903}
2904#else
2905/* Should never be called */
2906static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2907#endif
2908
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002909static int s_show(struct seq_file *m, void *v)
2910{
2911 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002912 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002913
2914 if (iter->ent == NULL) {
2915 if (iter->tr) {
2916 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2917 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002918 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002919 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002920 if (iter->snapshot && trace_empty(iter))
2921 print_snapshot_help(m, iter);
2922 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002923 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002924 else
2925 trace_default_header(m);
2926
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002927 } else if (iter->leftover) {
2928 /*
2929 * If we filled the seq_file buffer earlier, we
2930 * want to just show it now.
2931 */
2932 ret = trace_print_seq(m, &iter->seq);
2933
2934 /* ret should this time be zero, but you never know */
2935 iter->leftover = ret;
2936
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002937 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002938 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002939 ret = trace_print_seq(m, &iter->seq);
2940 /*
2941 * If we overflow the seq_file buffer, then it will
2942 * ask us for this data again at start up.
2943 * Use that instead.
2944 * ret is 0 if seq_file write succeeded.
2945 * -1 otherwise.
2946 */
2947 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002948 }
2949
2950 return 0;
2951}
2952
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002953/*
2954 * Should be used after trace_array_get(), trace_types_lock
2955 * ensures that i_cdev was already initialized.
2956 */
2957static inline int tracing_get_cpu(struct inode *inode)
2958{
2959 if (inode->i_cdev) /* See trace_create_cpu_file() */
2960 return (long)inode->i_cdev - 1;
2961 return RING_BUFFER_ALL_CPUS;
2962}
2963
James Morris88e9d342009-09-22 16:43:43 -07002964static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002965 .start = s_start,
2966 .next = s_next,
2967 .stop = s_stop,
2968 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002969};
2970
Ingo Molnare309b412008-05-12 21:20:51 +02002971static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002972__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002973{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002974 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002975 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002976 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002977
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002978 if (tracing_disabled)
2979 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002980
Jiri Olsa50e18b92012-04-25 10:23:39 +02002981 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002982 if (!iter)
2983 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002984
Steven Rostedt6d158a82012-06-27 20:46:14 -04002985 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2986 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002987 if (!iter->buffer_iter)
2988 goto release;
2989
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002990 /*
2991 * We make a copy of the current tracer to avoid concurrent
2992 * changes on it while we are reading.
2993 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002994 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002995 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002996 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002997 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002998
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002999 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003000
Li Zefan79f55992009-06-15 14:58:26 +08003001 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003002 goto fail;
3003
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003004 iter->tr = tr;
3005
3006#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003007 /* Currently only the top directory has a snapshot */
3008 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003009 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003010 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003011#endif
3012 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003013 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003014 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003015 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003016 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003017
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003018 /* Notify the tracer early; before we stop tracing. */
3019 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003020 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003021
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003022 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003023 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003024 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3025
David Sharp8be07092012-11-13 12:18:22 -08003026 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003027 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003028 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3029
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003030 /* stop the trace while dumping if we are not opening "snapshot" */
3031 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003032 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003033
Steven Rostedtae3b5092013-01-23 15:22:59 -05003034 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003035 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003036 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003037 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003038 }
3039 ring_buffer_read_prepare_sync();
3040 for_each_tracing_cpu(cpu) {
3041 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003042 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003043 }
3044 } else {
3045 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003046 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003047 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003048 ring_buffer_read_prepare_sync();
3049 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003050 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003051 }
3052
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003053 mutex_unlock(&trace_types_lock);
3054
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003055 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003056
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003057 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003058 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003059 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003060 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003061release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003062 seq_release_private(inode, file);
3063 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003064}
3065
3066int tracing_open_generic(struct inode *inode, struct file *filp)
3067{
Steven Rostedt60a11772008-05-12 21:20:44 +02003068 if (tracing_disabled)
3069 return -ENODEV;
3070
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003071 filp->private_data = inode->i_private;
3072 return 0;
3073}
3074
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003075bool tracing_is_disabled(void)
3076{
3077 return (tracing_disabled) ? true: false;
3078}
3079
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003080/*
3081 * Open and update trace_array ref count.
3082 * Must have the current trace_array passed to it.
3083 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003084static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003085{
3086 struct trace_array *tr = inode->i_private;
3087
3088 if (tracing_disabled)
3089 return -ENODEV;
3090
3091 if (trace_array_get(tr) < 0)
3092 return -ENODEV;
3093
3094 filp->private_data = inode->i_private;
3095
3096 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003097}
3098
Hannes Eder4fd27352009-02-10 19:44:12 +01003099static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003100{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003101 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003102 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003103 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003104 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003105
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003106 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003107 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003108 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003109 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003110
Oleg Nesterov6484c712013-07-23 17:26:10 +02003111 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003112 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003113 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003114
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003115 for_each_tracing_cpu(cpu) {
3116 if (iter->buffer_iter[cpu])
3117 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3118 }
3119
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003120 if (iter->trace && iter->trace->close)
3121 iter->trace->close(iter);
3122
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003123 if (!iter->snapshot)
3124 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003125 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003126
3127 __trace_array_put(tr);
3128
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003129 mutex_unlock(&trace_types_lock);
3130
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003131 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003132 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003133 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003134 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003135 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003136
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003137 return 0;
3138}
3139
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003140static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3141{
3142 struct trace_array *tr = inode->i_private;
3143
3144 trace_array_put(tr);
3145 return 0;
3146}
3147
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003148static int tracing_single_release_tr(struct inode *inode, struct file *file)
3149{
3150 struct trace_array *tr = inode->i_private;
3151
3152 trace_array_put(tr);
3153
3154 return single_release(inode, file);
3155}
3156
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003157static int tracing_open(struct inode *inode, struct file *file)
3158{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003159 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003160 struct trace_iterator *iter;
3161 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003162
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003163 if (trace_array_get(tr) < 0)
3164 return -ENODEV;
3165
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003166 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003167 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3168 int cpu = tracing_get_cpu(inode);
3169
3170 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003171 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003172 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003173 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003174 }
3175
3176 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003177 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003178 if (IS_ERR(iter))
3179 ret = PTR_ERR(iter);
3180 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3181 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3182 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003183
3184 if (ret < 0)
3185 trace_array_put(tr);
3186
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003187 return ret;
3188}
3189
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003190/*
3191 * Some tracers are not suitable for instance buffers.
3192 * A tracer is always available for the global array (toplevel)
3193 * or if it explicitly states that it is.
3194 */
3195static bool
3196trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3197{
3198 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3199}
3200
3201/* Find the next tracer that this trace array may use */
3202static struct tracer *
3203get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3204{
3205 while (t && !trace_ok_for_array(t, tr))
3206 t = t->next;
3207
3208 return t;
3209}
3210
Ingo Molnare309b412008-05-12 21:20:51 +02003211static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003212t_next(struct seq_file *m, void *v, loff_t *pos)
3213{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003214 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003215 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003216
3217 (*pos)++;
3218
3219 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003220 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003221
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003222 return t;
3223}
3224
3225static void *t_start(struct seq_file *m, loff_t *pos)
3226{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003227 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003228 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003229 loff_t l = 0;
3230
3231 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003232
3233 t = get_tracer_for_array(tr, trace_types);
3234 for (; t && l < *pos; t = t_next(m, t, &l))
3235 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003236
3237 return t;
3238}
3239
3240static void t_stop(struct seq_file *m, void *p)
3241{
3242 mutex_unlock(&trace_types_lock);
3243}
3244
3245static int t_show(struct seq_file *m, void *v)
3246{
3247 struct tracer *t = v;
3248
3249 if (!t)
3250 return 0;
3251
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003252 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003253 if (t->next)
3254 seq_putc(m, ' ');
3255 else
3256 seq_putc(m, '\n');
3257
3258 return 0;
3259}
3260
James Morris88e9d342009-09-22 16:43:43 -07003261static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003262 .start = t_start,
3263 .next = t_next,
3264 .stop = t_stop,
3265 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003266};
3267
3268static int show_traces_open(struct inode *inode, struct file *file)
3269{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003270 struct trace_array *tr = inode->i_private;
3271 struct seq_file *m;
3272 int ret;
3273
Steven Rostedt60a11772008-05-12 21:20:44 +02003274 if (tracing_disabled)
3275 return -ENODEV;
3276
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003277 ret = seq_open(file, &show_traces_seq_ops);
3278 if (ret)
3279 return ret;
3280
3281 m = file->private_data;
3282 m->private = tr;
3283
3284 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003285}
3286
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003287static ssize_t
3288tracing_write_stub(struct file *filp, const char __user *ubuf,
3289 size_t count, loff_t *ppos)
3290{
3291 return count;
3292}
3293
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003294loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003295{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003296 int ret;
3297
Slava Pestov364829b2010-11-24 15:13:16 -08003298 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003299 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003300 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003301 file->f_pos = ret = 0;
3302
3303 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003304}
3305
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003306static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003307 .open = tracing_open,
3308 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003309 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003310 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003311 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003312};
3313
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003314static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003315 .open = show_traces_open,
3316 .read = seq_read,
3317 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003318 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003319};
3320
Ingo Molnar36dfe922008-05-12 21:20:52 +02003321/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003322 * The tracer itself will not take this lock, but still we want
3323 * to provide a consistent cpumask to user-space:
3324 */
3325static DEFINE_MUTEX(tracing_cpumask_update_lock);
3326
3327/*
3328 * Temporary storage for the character representation of the
3329 * CPU bitmask (and one more byte for the newline):
3330 */
3331static char mask_str[NR_CPUS + 1];
3332
Ingo Molnarc7078de2008-05-12 21:20:52 +02003333static ssize_t
3334tracing_cpumask_read(struct file *filp, char __user *ubuf,
3335 size_t count, loff_t *ppos)
3336{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003337 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003338 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003339
3340 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003341
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003342 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003343 if (count - len < 2) {
3344 count = -EINVAL;
3345 goto out_err;
3346 }
3347 len += sprintf(mask_str + len, "\n");
3348 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3349
3350out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003351 mutex_unlock(&tracing_cpumask_update_lock);
3352
3353 return count;
3354}
3355
3356static ssize_t
3357tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3358 size_t count, loff_t *ppos)
3359{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003360 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303361 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003362 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303363
3364 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3365 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003366
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303367 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003368 if (err)
3369 goto err_unlock;
3370
Li Zefan215368e2009-06-15 10:56:42 +08003371 mutex_lock(&tracing_cpumask_update_lock);
3372
Steven Rostedta5e25882008-12-02 15:34:05 -05003373 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003374 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003375 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003376 /*
3377 * Increase/decrease the disabled counter if we are
3378 * about to flip a bit in the cpumask:
3379 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003380 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303381 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003382 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3383 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003384 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003385 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303386 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003387 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3388 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003389 }
3390 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003391 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003392 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003393
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003394 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003395
Ingo Molnarc7078de2008-05-12 21:20:52 +02003396 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303397 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003398
Ingo Molnarc7078de2008-05-12 21:20:52 +02003399 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003400
3401err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003402 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003403
3404 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003405}
3406
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003407static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003408 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003409 .read = tracing_cpumask_read,
3410 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003411 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003412 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003413};
3414
Li Zefanfdb372e2009-12-08 11:15:59 +08003415static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003416{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003417 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003418 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003419 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003420 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003421
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003422 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003423 tracer_flags = tr->current_trace->flags->val;
3424 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003425
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003426 for (i = 0; trace_options[i]; i++) {
3427 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003428 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003429 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003430 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003431 }
3432
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003433 for (i = 0; trace_opts[i].name; i++) {
3434 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003435 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003436 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003437 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003438 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003439 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003440
Li Zefanfdb372e2009-12-08 11:15:59 +08003441 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003442}
3443
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003444static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003445 struct tracer_flags *tracer_flags,
3446 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003447{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003448 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003449 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003450
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003451 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003452 if (ret)
3453 return ret;
3454
3455 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003456 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003457 else
Zhaolei77708412009-08-07 18:53:21 +08003458 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003459 return 0;
3460}
3461
Li Zefan8d18eaa2009-12-08 11:17:06 +08003462/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003463static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003464{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003465 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003466 struct tracer_flags *tracer_flags = trace->flags;
3467 struct tracer_opt *opts = NULL;
3468 int i;
3469
3470 for (i = 0; tracer_flags->opts[i].name; i++) {
3471 opts = &tracer_flags->opts[i];
3472
3473 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003474 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003475 }
3476
3477 return -EINVAL;
3478}
3479
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003480/* Some tracers require overwrite to stay enabled */
3481int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3482{
3483 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3484 return -1;
3485
3486 return 0;
3487}
3488
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003489int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003490{
3491 /* do nothing if flag is already set */
3492 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003493 return 0;
3494
3495 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003496 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003497 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003498 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003499
3500 if (enabled)
3501 trace_flags |= mask;
3502 else
3503 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003504
3505 if (mask == TRACE_ITER_RECORD_CMD)
3506 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003507
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003508 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003509 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003510#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003511 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003512#endif
3513 }
Steven Rostedt81698832012-10-11 10:15:05 -04003514
3515 if (mask == TRACE_ITER_PRINTK)
3516 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003517
3518 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003519}
3520
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003521static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003522{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003523 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003524 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003525 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003526 int i;
3527
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003528 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003529
Li Zefan8d18eaa2009-12-08 11:17:06 +08003530 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003531 neg = 1;
3532 cmp += 2;
3533 }
3534
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003535 mutex_lock(&trace_types_lock);
3536
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003537 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003538 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003539 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003540 break;
3541 }
3542 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003543
3544 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003545 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003546 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003547
3548 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003549
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003550 return ret;
3551}
3552
3553static ssize_t
3554tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3555 size_t cnt, loff_t *ppos)
3556{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003557 struct seq_file *m = filp->private_data;
3558 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003559 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003560 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003561
3562 if (cnt >= sizeof(buf))
3563 return -EINVAL;
3564
3565 if (copy_from_user(&buf, ubuf, cnt))
3566 return -EFAULT;
3567
Steven Rostedta8dd2172013-01-09 20:54:17 -05003568 buf[cnt] = 0;
3569
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003570 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003571 if (ret < 0)
3572 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003573
Jiri Olsacf8517c2009-10-23 19:36:16 -04003574 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003575
3576 return cnt;
3577}
3578
Li Zefanfdb372e2009-12-08 11:15:59 +08003579static int tracing_trace_options_open(struct inode *inode, struct file *file)
3580{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003581 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003582 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003583
Li Zefanfdb372e2009-12-08 11:15:59 +08003584 if (tracing_disabled)
3585 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003586
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003587 if (trace_array_get(tr) < 0)
3588 return -ENODEV;
3589
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003590 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3591 if (ret < 0)
3592 trace_array_put(tr);
3593
3594 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003595}
3596
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003597static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003598 .open = tracing_trace_options_open,
3599 .read = seq_read,
3600 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003601 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003602 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003603};
3604
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003605static const char readme_msg[] =
3606 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003607 "# echo 0 > tracing_on : quick way to disable tracing\n"
3608 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3609 " Important files:\n"
3610 " trace\t\t\t- The static contents of the buffer\n"
3611 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3612 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3613 " current_tracer\t- function and latency tracers\n"
3614 " available_tracers\t- list of configured tracers for current_tracer\n"
3615 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3616 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3617 " trace_clock\t\t-change the clock used to order events\n"
3618 " local: Per cpu clock but may not be synced across CPUs\n"
3619 " global: Synced across CPUs but slows tracing down.\n"
3620 " counter: Not a clock, but just an increment\n"
3621 " uptime: Jiffy counter from time of boot\n"
3622 " perf: Same clock that perf events use\n"
3623#ifdef CONFIG_X86_64
3624 " x86-tsc: TSC cycle counter\n"
3625#endif
3626 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3627 " tracing_cpumask\t- Limit which CPUs to trace\n"
3628 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3629 "\t\t\t Remove sub-buffer with rmdir\n"
3630 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003631 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3632 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003633 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003634#ifdef CONFIG_DYNAMIC_FTRACE
3635 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003636 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3637 "\t\t\t functions\n"
3638 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3639 "\t modules: Can select a group via module\n"
3640 "\t Format: :mod:<module-name>\n"
3641 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3642 "\t triggers: a command to perform when function is hit\n"
3643 "\t Format: <function>:<trigger>[:count]\n"
3644 "\t trigger: traceon, traceoff\n"
3645 "\t\t enable_event:<system>:<event>\n"
3646 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003647#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003648 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003649#endif
3650#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003651 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003652#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003653 "\t\t dump\n"
3654 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003655 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3656 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3657 "\t The first one will disable tracing every time do_fault is hit\n"
3658 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3659 "\t The first time do trap is hit and it disables tracing, the\n"
3660 "\t counter will decrement to 2. If tracing is already disabled,\n"
3661 "\t the counter will not decrement. It only decrements when the\n"
3662 "\t trigger did work\n"
3663 "\t To remove trigger without count:\n"
3664 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3665 "\t To remove trigger with a count:\n"
3666 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003667 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003668 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3669 "\t modules: Can select a group via module command :mod:\n"
3670 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003671#endif /* CONFIG_DYNAMIC_FTRACE */
3672#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003673 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3674 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003675#endif
3676#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3677 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003678 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003679 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3680#endif
3681#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003682 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3683 "\t\t\t snapshot buffer. Read the contents for more\n"
3684 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003685#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003686#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003687 " stack_trace\t\t- Shows the max stack trace when active\n"
3688 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003689 "\t\t\t Write into this file to reset the max size (trigger a\n"
3690 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003691#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003692 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3693 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003694#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003695#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003696 " events/\t\t- Directory containing all trace event subsystems:\n"
3697 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3698 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003699 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3700 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003701 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003702 " events/<system>/<event>/\t- Directory containing control files for\n"
3703 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003704 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3705 " filter\t\t- If set, only events passing filter are traced\n"
3706 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003707 "\t Format: <trigger>[:count][if <filter>]\n"
3708 "\t trigger: traceon, traceoff\n"
3709 "\t enable_event:<system>:<event>\n"
3710 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003711#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003712 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003713#endif
3714#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003715 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003716#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003717 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3718 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3719 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3720 "\t events/block/block_unplug/trigger\n"
3721 "\t The first disables tracing every time block_unplug is hit.\n"
3722 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3723 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3724 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3725 "\t Like function triggers, the counter is only decremented if it\n"
3726 "\t enabled or disabled tracing.\n"
3727 "\t To remove a trigger without a count:\n"
3728 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3729 "\t To remove a trigger with a count:\n"
3730 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3731 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003732;
3733
3734static ssize_t
3735tracing_readme_read(struct file *filp, char __user *ubuf,
3736 size_t cnt, loff_t *ppos)
3737{
3738 return simple_read_from_buffer(ubuf, cnt, ppos,
3739 readme_msg, strlen(readme_msg));
3740}
3741
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003742static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003743 .open = tracing_open_generic,
3744 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003745 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003746};
3747
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003748static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003749{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003750 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003751
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003752 if (*pos || m->count)
3753 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003754
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003755 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003756
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003757 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3758 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003759 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003760 continue;
3761
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003762 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003763 }
3764
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003765 return NULL;
3766}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003767
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003768static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3769{
3770 void *v;
3771 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003772
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003773 preempt_disable();
3774 arch_spin_lock(&trace_cmdline_lock);
3775
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003776 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003777 while (l <= *pos) {
3778 v = saved_cmdlines_next(m, v, &l);
3779 if (!v)
3780 return NULL;
3781 }
3782
3783 return v;
3784}
3785
3786static void saved_cmdlines_stop(struct seq_file *m, void *v)
3787{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003788 arch_spin_unlock(&trace_cmdline_lock);
3789 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003790}
3791
3792static int saved_cmdlines_show(struct seq_file *m, void *v)
3793{
3794 char buf[TASK_COMM_LEN];
3795 unsigned int *pid = v;
3796
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003797 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003798 seq_printf(m, "%d %s\n", *pid, buf);
3799 return 0;
3800}
3801
3802static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3803 .start = saved_cmdlines_start,
3804 .next = saved_cmdlines_next,
3805 .stop = saved_cmdlines_stop,
3806 .show = saved_cmdlines_show,
3807};
3808
3809static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3810{
3811 if (tracing_disabled)
3812 return -ENODEV;
3813
3814 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003815}
3816
3817static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003818 .open = tracing_saved_cmdlines_open,
3819 .read = seq_read,
3820 .llseek = seq_lseek,
3821 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003822};
3823
3824static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003825tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3826 size_t cnt, loff_t *ppos)
3827{
3828 char buf[64];
3829 int r;
3830
3831 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003832 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003833 arch_spin_unlock(&trace_cmdline_lock);
3834
3835 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3836}
3837
3838static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3839{
3840 kfree(s->saved_cmdlines);
3841 kfree(s->map_cmdline_to_pid);
3842 kfree(s);
3843}
3844
3845static int tracing_resize_saved_cmdlines(unsigned int val)
3846{
3847 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3848
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003849 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003850 if (!s)
3851 return -ENOMEM;
3852
3853 if (allocate_cmdlines_buffer(val, s) < 0) {
3854 kfree(s);
3855 return -ENOMEM;
3856 }
3857
3858 arch_spin_lock(&trace_cmdline_lock);
3859 savedcmd_temp = savedcmd;
3860 savedcmd = s;
3861 arch_spin_unlock(&trace_cmdline_lock);
3862 free_saved_cmdlines_buffer(savedcmd_temp);
3863
3864 return 0;
3865}
3866
3867static ssize_t
3868tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3869 size_t cnt, loff_t *ppos)
3870{
3871 unsigned long val;
3872 int ret;
3873
3874 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3875 if (ret)
3876 return ret;
3877
3878 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3879 if (!val || val > PID_MAX_DEFAULT)
3880 return -EINVAL;
3881
3882 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3883 if (ret < 0)
3884 return ret;
3885
3886 *ppos += cnt;
3887
3888 return cnt;
3889}
3890
3891static const struct file_operations tracing_saved_cmdlines_size_fops = {
3892 .open = tracing_open_generic,
3893 .read = tracing_saved_cmdlines_size_read,
3894 .write = tracing_saved_cmdlines_size_write,
3895};
3896
3897static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003898tracing_set_trace_read(struct file *filp, char __user *ubuf,
3899 size_t cnt, loff_t *ppos)
3900{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003901 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003902 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003903 int r;
3904
3905 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003906 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003907 mutex_unlock(&trace_types_lock);
3908
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003909 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003910}
3911
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003912int tracer_init(struct tracer *t, struct trace_array *tr)
3913{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003914 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003915 return t->init(tr);
3916}
3917
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003918static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003919{
3920 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003921
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003922 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003923 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003924}
3925
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003926#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003927/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003928static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3929 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003930{
3931 int cpu, ret = 0;
3932
3933 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3934 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003935 ret = ring_buffer_resize(trace_buf->buffer,
3936 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003937 if (ret < 0)
3938 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003939 per_cpu_ptr(trace_buf->data, cpu)->entries =
3940 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003941 }
3942 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003943 ret = ring_buffer_resize(trace_buf->buffer,
3944 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003945 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003946 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3947 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003948 }
3949
3950 return ret;
3951}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003952#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003953
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003954static int __tracing_resize_ring_buffer(struct trace_array *tr,
3955 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003956{
3957 int ret;
3958
3959 /*
3960 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003961 * we use the size that was given, and we can forget about
3962 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003963 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003964 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003965
Steven Rostedtb382ede62012-10-10 21:44:34 -04003966 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003967 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003968 return 0;
3969
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003970 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003971 if (ret < 0)
3972 return ret;
3973
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003974#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003975 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3976 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003977 goto out;
3978
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003979 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003980 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003981 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3982 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003983 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003984 /*
3985 * AARGH! We are left with different
3986 * size max buffer!!!!
3987 * The max buffer is our "snapshot" buffer.
3988 * When a tracer needs a snapshot (one of the
3989 * latency tracers), it swaps the max buffer
3990 * with the saved snap shot. We succeeded to
3991 * update the size of the main buffer, but failed to
3992 * update the size of the max buffer. But when we tried
3993 * to reset the main buffer to the original size, we
3994 * failed there too. This is very unlikely to
3995 * happen, but if it does, warn and kill all
3996 * tracing.
3997 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003998 WARN_ON(1);
3999 tracing_disabled = 1;
4000 }
4001 return ret;
4002 }
4003
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004004 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004005 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004006 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004007 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004008
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004009 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004010#endif /* CONFIG_TRACER_MAX_TRACE */
4011
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004012 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004013 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004014 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004015 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004016
4017 return ret;
4018}
4019
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004020static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4021 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004022{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004023 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004024
4025 mutex_lock(&trace_types_lock);
4026
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004027 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4028 /* make sure, this cpu is enabled in the mask */
4029 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4030 ret = -EINVAL;
4031 goto out;
4032 }
4033 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004034
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004035 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004036 if (ret < 0)
4037 ret = -ENOMEM;
4038
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004039out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004040 mutex_unlock(&trace_types_lock);
4041
4042 return ret;
4043}
4044
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004045
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004046/**
4047 * tracing_update_buffers - used by tracing facility to expand ring buffers
4048 *
4049 * To save on memory when the tracing is never used on a system with it
4050 * configured in. The ring buffers are set to a minimum size. But once
4051 * a user starts to use the tracing facility, then they need to grow
4052 * to their default size.
4053 *
4054 * This function is to be called when a tracer is about to be used.
4055 */
4056int tracing_update_buffers(void)
4057{
4058 int ret = 0;
4059
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004060 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004061 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004062 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004063 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004064 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004065
4066 return ret;
4067}
4068
Steven Rostedt577b7852009-02-26 23:43:05 -05004069struct trace_option_dentry;
4070
4071static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004072create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004073
4074static void
4075destroy_trace_option_files(struct trace_option_dentry *topts);
4076
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004077/*
4078 * Used to clear out the tracer before deletion of an instance.
4079 * Must have trace_types_lock held.
4080 */
4081static void tracing_set_nop(struct trace_array *tr)
4082{
4083 if (tr->current_trace == &nop_trace)
4084 return;
4085
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004086 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004087
4088 if (tr->current_trace->reset)
4089 tr->current_trace->reset(tr);
4090
4091 tr->current_trace = &nop_trace;
4092}
4093
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004094static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004095{
Steven Rostedt577b7852009-02-26 23:43:05 -05004096 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004097 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004098#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004099 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004100#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004101 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004102
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004103 mutex_lock(&trace_types_lock);
4104
Steven Rostedt73c51622009-03-11 13:42:01 -04004105 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004106 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004107 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004108 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004109 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004110 ret = 0;
4111 }
4112
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004113 for (t = trace_types; t; t = t->next) {
4114 if (strcmp(t->name, buf) == 0)
4115 break;
4116 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004117 if (!t) {
4118 ret = -EINVAL;
4119 goto out;
4120 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004121 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004122 goto out;
4123
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004124 /* Some tracers are only allowed for the top level buffer */
4125 if (!trace_ok_for_array(t, tr)) {
4126 ret = -EINVAL;
4127 goto out;
4128 }
4129
Steven Rostedt9f029e82008-11-12 15:24:24 -05004130 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004131
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004132 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004133
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004134 if (tr->current_trace->reset)
4135 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004136
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004137 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004138 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004139
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004140#ifdef CONFIG_TRACER_MAX_TRACE
4141 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004142
4143 if (had_max_tr && !t->use_max_tr) {
4144 /*
4145 * We need to make sure that the update_max_tr sees that
4146 * current_trace changed to nop_trace to keep it from
4147 * swapping the buffers after we resize it.
4148 * The update_max_tr is called from interrupts disabled
4149 * so a synchronized_sched() is sufficient.
4150 */
4151 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004152 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004153 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004154#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004155 /* Currently, only the top instance has options */
4156 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4157 destroy_trace_option_files(topts);
4158 topts = create_trace_option_files(tr, t);
4159 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004160
4161#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004162 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004163 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004164 if (ret < 0)
4165 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004166 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004167#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004168
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004169 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004170 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004171 if (ret)
4172 goto out;
4173 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004174
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004175 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004176 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004177 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004178 out:
4179 mutex_unlock(&trace_types_lock);
4180
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004181 return ret;
4182}
4183
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004184static ssize_t
4185tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4186 size_t cnt, loff_t *ppos)
4187{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004188 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004189 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004190 int i;
4191 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004192 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004193
Steven Rostedt60063a62008-10-28 10:44:24 -04004194 ret = cnt;
4195
Li Zefanee6c2c12009-09-18 14:06:47 +08004196 if (cnt > MAX_TRACER_SIZE)
4197 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004198
4199 if (copy_from_user(&buf, ubuf, cnt))
4200 return -EFAULT;
4201
4202 buf[cnt] = 0;
4203
4204 /* strip ending whitespace. */
4205 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4206 buf[i] = 0;
4207
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004208 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004209 if (err)
4210 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004211
Jiri Olsacf8517c2009-10-23 19:36:16 -04004212 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004213
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004214 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004215}
4216
4217static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004218tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4219 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004220{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004221 char buf[64];
4222 int r;
4223
Steven Rostedtcffae432008-05-12 21:21:00 +02004224 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004225 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004226 if (r > sizeof(buf))
4227 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004228 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004229}
4230
4231static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004232tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4233 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004234{
Hannes Eder5e398412009-02-10 19:44:34 +01004235 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004236 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004237
Peter Huewe22fe9b52011-06-07 21:58:27 +02004238 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4239 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004240 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004241
4242 *ptr = val * 1000;
4243
4244 return cnt;
4245}
4246
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004247static ssize_t
4248tracing_thresh_read(struct file *filp, char __user *ubuf,
4249 size_t cnt, loff_t *ppos)
4250{
4251 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4252}
4253
4254static ssize_t
4255tracing_thresh_write(struct file *filp, const char __user *ubuf,
4256 size_t cnt, loff_t *ppos)
4257{
4258 struct trace_array *tr = filp->private_data;
4259 int ret;
4260
4261 mutex_lock(&trace_types_lock);
4262 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4263 if (ret < 0)
4264 goto out;
4265
4266 if (tr->current_trace->update_thresh) {
4267 ret = tr->current_trace->update_thresh(tr);
4268 if (ret < 0)
4269 goto out;
4270 }
4271
4272 ret = cnt;
4273out:
4274 mutex_unlock(&trace_types_lock);
4275
4276 return ret;
4277}
4278
4279static ssize_t
4280tracing_max_lat_read(struct file *filp, char __user *ubuf,
4281 size_t cnt, loff_t *ppos)
4282{
4283 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4284}
4285
4286static ssize_t
4287tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4288 size_t cnt, loff_t *ppos)
4289{
4290 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4291}
4292
Steven Rostedtb3806b42008-05-12 21:20:46 +02004293static int tracing_open_pipe(struct inode *inode, struct file *filp)
4294{
Oleg Nesterov15544202013-07-23 17:25:57 +02004295 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004296 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004297 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004298
4299 if (tracing_disabled)
4300 return -ENODEV;
4301
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004302 if (trace_array_get(tr) < 0)
4303 return -ENODEV;
4304
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004305 mutex_lock(&trace_types_lock);
4306
Steven Rostedtb3806b42008-05-12 21:20:46 +02004307 /* create a buffer to store the information to pass to userspace */
4308 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004309 if (!iter) {
4310 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004311 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004312 goto out;
4313 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004314
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004315 /*
4316 * We make a copy of the current tracer to avoid concurrent
4317 * changes on it while we are reading.
4318 */
4319 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4320 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004321 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004322 goto fail;
4323 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004324 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004325
4326 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4327 ret = -ENOMEM;
4328 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304329 }
4330
Steven Rostedta3097202008-11-07 22:36:02 -05004331 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304332 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004333
Steven Rostedt112f38a72009-06-01 15:16:05 -04004334 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4335 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4336
David Sharp8be07092012-11-13 12:18:22 -08004337 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004338 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004339 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4340
Oleg Nesterov15544202013-07-23 17:25:57 +02004341 iter->tr = tr;
4342 iter->trace_buffer = &tr->trace_buffer;
4343 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004344 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004345 filp->private_data = iter;
4346
Steven Rostedt107bad82008-05-12 21:21:01 +02004347 if (iter->trace->pipe_open)
4348 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004349
Arnd Bergmannb4447862010-07-07 23:40:11 +02004350 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004351out:
4352 mutex_unlock(&trace_types_lock);
4353 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004354
4355fail:
4356 kfree(iter->trace);
4357 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004358 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004359 mutex_unlock(&trace_types_lock);
4360 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004361}
4362
4363static int tracing_release_pipe(struct inode *inode, struct file *file)
4364{
4365 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004366 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004367
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004368 mutex_lock(&trace_types_lock);
4369
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004370 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004371 iter->trace->pipe_close(iter);
4372
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004373 mutex_unlock(&trace_types_lock);
4374
Rusty Russell44623442009-01-01 10:12:23 +10304375 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004376 mutex_destroy(&iter->mutex);
4377 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004378 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004379
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004380 trace_array_put(tr);
4381
Steven Rostedtb3806b42008-05-12 21:20:46 +02004382 return 0;
4383}
4384
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004385static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004386trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004387{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004388 /* Iterators are static, they should be filled or empty */
4389 if (trace_buffer_iter(iter, iter->cpu_file))
4390 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004391
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004392 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004393 /*
4394 * Always select as readable when in blocking mode
4395 */
4396 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004397 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004398 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004399 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004400}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004401
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004402static unsigned int
4403tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4404{
4405 struct trace_iterator *iter = filp->private_data;
4406
4407 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004408}
4409
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004410/* Must be called with trace_types_lock mutex held. */
4411static int tracing_wait_pipe(struct file *filp)
4412{
4413 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004414 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004415
4416 while (trace_empty(iter)) {
4417
4418 if ((filp->f_flags & O_NONBLOCK)) {
4419 return -EAGAIN;
4420 }
4421
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004422 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004423 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004424 * We still block if tracing is disabled, but we have never
4425 * read anything. This allows a user to cat this file, and
4426 * then enable tracing. But after we have read something,
4427 * we give an EOF when tracing is again disabled.
4428 *
4429 * iter->pos will be 0 if we haven't read anything.
4430 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004431 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004432 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004433
4434 mutex_unlock(&iter->mutex);
4435
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004436 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004437
4438 mutex_lock(&iter->mutex);
4439
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004440 if (ret)
4441 return ret;
4442
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004443 if (signal_pending(current))
4444 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004445 }
4446
4447 return 1;
4448}
4449
Steven Rostedtb3806b42008-05-12 21:20:46 +02004450/*
4451 * Consumer reader.
4452 */
4453static ssize_t
4454tracing_read_pipe(struct file *filp, char __user *ubuf,
4455 size_t cnt, loff_t *ppos)
4456{
4457 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004458 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004459 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004460
4461 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004462 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4463 if (sret != -EBUSY)
4464 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004465
Steven Rostedtf9520752009-03-02 14:04:40 -05004466 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004467
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004468 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004469 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004470 if (unlikely(iter->trace->name != tr->current_trace->name))
4471 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004472 mutex_unlock(&trace_types_lock);
4473
4474 /*
4475 * Avoid more than one consumer on a single file descriptor
4476 * This is just a matter of traces coherency, the ring buffer itself
4477 * is protected.
4478 */
4479 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004480 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004481 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4482 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004483 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004484 }
4485
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004486waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004487 sret = tracing_wait_pipe(filp);
4488 if (sret <= 0)
4489 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004490
4491 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004492 if (trace_empty(iter)) {
4493 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004494 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004495 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004496
4497 if (cnt >= PAGE_SIZE)
4498 cnt = PAGE_SIZE - 1;
4499
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004500 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004501 memset(&iter->seq, 0,
4502 sizeof(struct trace_iterator) -
4503 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004504 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004505 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004506
Lai Jiangshan4f535962009-05-18 19:35:34 +08004507 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004508 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004509 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004510 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004511 int len = iter->seq.len;
4512
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004513 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004514 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004515 /* don't print partial lines */
4516 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004517 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004518 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004519 if (ret != TRACE_TYPE_NO_CONSUME)
4520 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004521
4522 if (iter->seq.len >= cnt)
4523 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004524
4525 /*
4526 * Setting the full flag means we reached the trace_seq buffer
4527 * size and we should leave by partial output condition above.
4528 * One of the trace_seq_* functions is not used properly.
4529 */
4530 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4531 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004532 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004533 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004534 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004535
Steven Rostedtb3806b42008-05-12 21:20:46 +02004536 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004537 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4538 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004539 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004540
4541 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004542 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004543 * entries, go back to wait for more entries.
4544 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004545 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004546 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004547
Steven Rostedt107bad82008-05-12 21:21:01 +02004548out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004549 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004550
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004551 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004552}
4553
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004554static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4555 unsigned int idx)
4556{
4557 __free_page(spd->pages[idx]);
4558}
4559
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004560static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004561 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004562 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004563 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004564 .steal = generic_pipe_buf_steal,
4565 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004566};
4567
Steven Rostedt34cd4992009-02-09 12:06:29 -05004568static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004569tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004570{
4571 size_t count;
4572 int ret;
4573
4574 /* Seq buffer is page-sized, exactly what we need. */
4575 for (;;) {
4576 count = iter->seq.len;
4577 ret = print_trace_line(iter);
4578 count = iter->seq.len - count;
4579 if (rem < count) {
4580 rem = 0;
4581 iter->seq.len -= count;
4582 break;
4583 }
4584 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4585 iter->seq.len -= count;
4586 break;
4587 }
4588
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004589 if (ret != TRACE_TYPE_NO_CONSUME)
4590 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004591 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004592 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004593 rem = 0;
4594 iter->ent = NULL;
4595 break;
4596 }
4597 }
4598
4599 return rem;
4600}
4601
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004602static ssize_t tracing_splice_read_pipe(struct file *filp,
4603 loff_t *ppos,
4604 struct pipe_inode_info *pipe,
4605 size_t len,
4606 unsigned int flags)
4607{
Jens Axboe35f3d142010-05-20 10:43:18 +02004608 struct page *pages_def[PIPE_DEF_BUFFERS];
4609 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004610 struct trace_iterator *iter = filp->private_data;
4611 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004612 .pages = pages_def,
4613 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004614 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004615 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004616 .flags = flags,
4617 .ops = &tracing_pipe_buf_ops,
4618 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004619 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004620 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004621 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004622 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004623 unsigned int i;
4624
Jens Axboe35f3d142010-05-20 10:43:18 +02004625 if (splice_grow_spd(pipe, &spd))
4626 return -ENOMEM;
4627
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004628 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004629 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004630 if (unlikely(iter->trace->name != tr->current_trace->name))
4631 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004632 mutex_unlock(&trace_types_lock);
4633
4634 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004635
4636 if (iter->trace->splice_read) {
4637 ret = iter->trace->splice_read(iter, filp,
4638 ppos, pipe, len, flags);
4639 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004640 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004641 }
4642
4643 ret = tracing_wait_pipe(filp);
4644 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004645 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004646
Jason Wessel955b61e2010-08-05 09:22:23 -05004647 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004648 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004649 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004650 }
4651
Lai Jiangshan4f535962009-05-18 19:35:34 +08004652 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004653 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004654
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004655 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004656 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004657 spd.pages[i] = alloc_page(GFP_KERNEL);
4658 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004659 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004660
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004661 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004662
4663 /* Copy the data into the page, so we can start over. */
4664 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004665 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004666 iter->seq.len);
4667 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004668 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004669 break;
4670 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004671 spd.partial[i].offset = 0;
4672 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004673
Steven Rostedtf9520752009-03-02 14:04:40 -05004674 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004675 }
4676
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004677 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004678 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004679 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004680
4681 spd.nr_pages = i;
4682
Jens Axboe35f3d142010-05-20 10:43:18 +02004683 ret = splice_to_pipe(pipe, &spd);
4684out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004685 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004686 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004687
Steven Rostedt34cd4992009-02-09 12:06:29 -05004688out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004689 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004690 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004691}
4692
Steven Rostedta98a3c32008-05-12 21:20:59 +02004693static ssize_t
4694tracing_entries_read(struct file *filp, char __user *ubuf,
4695 size_t cnt, loff_t *ppos)
4696{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004697 struct inode *inode = file_inode(filp);
4698 struct trace_array *tr = inode->i_private;
4699 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004700 char buf[64];
4701 int r = 0;
4702 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004703
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004704 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004705
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004706 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004707 int cpu, buf_size_same;
4708 unsigned long size;
4709
4710 size = 0;
4711 buf_size_same = 1;
4712 /* check if all cpu sizes are same */
4713 for_each_tracing_cpu(cpu) {
4714 /* fill in the size from first enabled cpu */
4715 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004716 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4717 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004718 buf_size_same = 0;
4719 break;
4720 }
4721 }
4722
4723 if (buf_size_same) {
4724 if (!ring_buffer_expanded)
4725 r = sprintf(buf, "%lu (expanded: %lu)\n",
4726 size >> 10,
4727 trace_buf_size >> 10);
4728 else
4729 r = sprintf(buf, "%lu\n", size >> 10);
4730 } else
4731 r = sprintf(buf, "X\n");
4732 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004733 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004734
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004735 mutex_unlock(&trace_types_lock);
4736
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004737 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4738 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004739}
4740
4741static ssize_t
4742tracing_entries_write(struct file *filp, const char __user *ubuf,
4743 size_t cnt, loff_t *ppos)
4744{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004745 struct inode *inode = file_inode(filp);
4746 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004747 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004748 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004749
Peter Huewe22fe9b52011-06-07 21:58:27 +02004750 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4751 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004752 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004753
4754 /* must have at least 1 entry */
4755 if (!val)
4756 return -EINVAL;
4757
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004758 /* value is in KB */
4759 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004760 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004761 if (ret < 0)
4762 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004763
Jiri Olsacf8517c2009-10-23 19:36:16 -04004764 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004765
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004766 return cnt;
4767}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004768
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004769static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004770tracing_total_entries_read(struct file *filp, char __user *ubuf,
4771 size_t cnt, loff_t *ppos)
4772{
4773 struct trace_array *tr = filp->private_data;
4774 char buf[64];
4775 int r, cpu;
4776 unsigned long size = 0, expanded_size = 0;
4777
4778 mutex_lock(&trace_types_lock);
4779 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004780 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004781 if (!ring_buffer_expanded)
4782 expanded_size += trace_buf_size >> 10;
4783 }
4784 if (ring_buffer_expanded)
4785 r = sprintf(buf, "%lu\n", size);
4786 else
4787 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4788 mutex_unlock(&trace_types_lock);
4789
4790 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4791}
4792
4793static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004794tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4795 size_t cnt, loff_t *ppos)
4796{
4797 /*
4798 * There is no need to read what the user has written, this function
4799 * is just to make sure that there is no error when "echo" is used
4800 */
4801
4802 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004803
4804 return cnt;
4805}
4806
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004807static int
4808tracing_free_buffer_release(struct inode *inode, struct file *filp)
4809{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004810 struct trace_array *tr = inode->i_private;
4811
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004812 /* disable tracing ? */
4813 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004814 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004815 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004816 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004817
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004818 trace_array_put(tr);
4819
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004820 return 0;
4821}
4822
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004823static ssize_t
4824tracing_mark_write(struct file *filp, const char __user *ubuf,
4825 size_t cnt, loff_t *fpos)
4826{
Steven Rostedtd696b582011-09-22 11:50:27 -04004827 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004828 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004829 struct ring_buffer_event *event;
4830 struct ring_buffer *buffer;
4831 struct print_entry *entry;
4832 unsigned long irq_flags;
4833 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004834 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004835 int nr_pages = 1;
4836 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004837 int offset;
4838 int size;
4839 int len;
4840 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004841 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004842
Steven Rostedtc76f0692008-11-07 22:36:02 -05004843 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004844 return -EINVAL;
4845
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004846 if (!(trace_flags & TRACE_ITER_MARKERS))
4847 return -EINVAL;
4848
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004849 if (cnt > TRACE_BUF_SIZE)
4850 cnt = TRACE_BUF_SIZE;
4851
Steven Rostedtd696b582011-09-22 11:50:27 -04004852 /*
4853 * Userspace is injecting traces into the kernel trace buffer.
4854 * We want to be as non intrusive as possible.
4855 * To do so, we do not want to allocate any special buffers
4856 * or take any locks, but instead write the userspace data
4857 * straight into the ring buffer.
4858 *
4859 * First we need to pin the userspace buffer into memory,
4860 * which, most likely it is, because it just referenced it.
4861 * But there's no guarantee that it is. By using get_user_pages_fast()
4862 * and kmap_atomic/kunmap_atomic() we can get access to the
4863 * pages directly. We then write the data directly into the
4864 * ring buffer.
4865 */
4866 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004867
Steven Rostedtd696b582011-09-22 11:50:27 -04004868 /* check if we cross pages */
4869 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4870 nr_pages = 2;
4871
4872 offset = addr & (PAGE_SIZE - 1);
4873 addr &= PAGE_MASK;
4874
4875 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4876 if (ret < nr_pages) {
4877 while (--ret >= 0)
4878 put_page(pages[ret]);
4879 written = -EFAULT;
4880 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004881 }
4882
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004883 for (i = 0; i < nr_pages; i++)
4884 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004885
4886 local_save_flags(irq_flags);
4887 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004888 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004889 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4890 irq_flags, preempt_count());
4891 if (!event) {
4892 /* Ring buffer disabled, return as if not open for write */
4893 written = -EBADF;
4894 goto out_unlock;
4895 }
4896
4897 entry = ring_buffer_event_data(event);
4898 entry->ip = _THIS_IP_;
4899
4900 if (nr_pages == 2) {
4901 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004902 memcpy(&entry->buf, map_page[0] + offset, len);
4903 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004904 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004905 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004906
4907 if (entry->buf[cnt - 1] != '\n') {
4908 entry->buf[cnt] = '\n';
4909 entry->buf[cnt + 1] = '\0';
4910 } else
4911 entry->buf[cnt] = '\0';
4912
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004913 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004914
4915 written = cnt;
4916
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004917 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004918
Steven Rostedtd696b582011-09-22 11:50:27 -04004919 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004920 for (i = 0; i < nr_pages; i++){
4921 kunmap_atomic(map_page[i]);
4922 put_page(pages[i]);
4923 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004924 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004925 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004926}
4927
Li Zefan13f16d22009-12-08 11:16:11 +08004928static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004929{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004930 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004931 int i;
4932
4933 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004934 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004935 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004936 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4937 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004938 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004939
Li Zefan13f16d22009-12-08 11:16:11 +08004940 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004941}
4942
Steven Rostedte1e232c2014-02-10 23:38:46 -05004943static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004944{
Zhaolei5079f322009-08-25 16:12:56 +08004945 int i;
4946
Zhaolei5079f322009-08-25 16:12:56 +08004947 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4948 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4949 break;
4950 }
4951 if (i == ARRAY_SIZE(trace_clocks))
4952 return -EINVAL;
4953
Zhaolei5079f322009-08-25 16:12:56 +08004954 mutex_lock(&trace_types_lock);
4955
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004956 tr->clock_id = i;
4957
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004958 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004959
David Sharp60303ed2012-10-11 16:27:52 -07004960 /*
4961 * New clock may not be consistent with the previous clock.
4962 * Reset the buffer so that it doesn't have incomparable timestamps.
4963 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004964 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004965
4966#ifdef CONFIG_TRACER_MAX_TRACE
4967 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4968 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004969 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004970#endif
David Sharp60303ed2012-10-11 16:27:52 -07004971
Zhaolei5079f322009-08-25 16:12:56 +08004972 mutex_unlock(&trace_types_lock);
4973
Steven Rostedte1e232c2014-02-10 23:38:46 -05004974 return 0;
4975}
4976
4977static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4978 size_t cnt, loff_t *fpos)
4979{
4980 struct seq_file *m = filp->private_data;
4981 struct trace_array *tr = m->private;
4982 char buf[64];
4983 const char *clockstr;
4984 int ret;
4985
4986 if (cnt >= sizeof(buf))
4987 return -EINVAL;
4988
4989 if (copy_from_user(&buf, ubuf, cnt))
4990 return -EFAULT;
4991
4992 buf[cnt] = 0;
4993
4994 clockstr = strstrip(buf);
4995
4996 ret = tracing_set_clock(tr, clockstr);
4997 if (ret)
4998 return ret;
4999
Zhaolei5079f322009-08-25 16:12:56 +08005000 *fpos += cnt;
5001
5002 return cnt;
5003}
5004
Li Zefan13f16d22009-12-08 11:16:11 +08005005static int tracing_clock_open(struct inode *inode, struct file *file)
5006{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005007 struct trace_array *tr = inode->i_private;
5008 int ret;
5009
Li Zefan13f16d22009-12-08 11:16:11 +08005010 if (tracing_disabled)
5011 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005012
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005013 if (trace_array_get(tr))
5014 return -ENODEV;
5015
5016 ret = single_open(file, tracing_clock_show, inode->i_private);
5017 if (ret < 0)
5018 trace_array_put(tr);
5019
5020 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005021}
5022
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005023struct ftrace_buffer_info {
5024 struct trace_iterator iter;
5025 void *spare;
5026 unsigned int read;
5027};
5028
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005029#ifdef CONFIG_TRACER_SNAPSHOT
5030static int tracing_snapshot_open(struct inode *inode, struct file *file)
5031{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005032 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005033 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005034 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005035 int ret = 0;
5036
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005037 if (trace_array_get(tr) < 0)
5038 return -ENODEV;
5039
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005040 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005041 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005042 if (IS_ERR(iter))
5043 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005044 } else {
5045 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005046 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005047 m = kzalloc(sizeof(*m), GFP_KERNEL);
5048 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005049 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005050 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5051 if (!iter) {
5052 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005053 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005054 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005055 ret = 0;
5056
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005057 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005058 iter->trace_buffer = &tr->max_buffer;
5059 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005060 m->private = iter;
5061 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005062 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005063out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005064 if (ret < 0)
5065 trace_array_put(tr);
5066
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005067 return ret;
5068}
5069
5070static ssize_t
5071tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5072 loff_t *ppos)
5073{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005074 struct seq_file *m = filp->private_data;
5075 struct trace_iterator *iter = m->private;
5076 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005077 unsigned long val;
5078 int ret;
5079
5080 ret = tracing_update_buffers();
5081 if (ret < 0)
5082 return ret;
5083
5084 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5085 if (ret)
5086 return ret;
5087
5088 mutex_lock(&trace_types_lock);
5089
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005090 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005091 ret = -EBUSY;
5092 goto out;
5093 }
5094
5095 switch (val) {
5096 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005097 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5098 ret = -EINVAL;
5099 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005100 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005101 if (tr->allocated_snapshot)
5102 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005103 break;
5104 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005105/* Only allow per-cpu swap if the ring buffer supports it */
5106#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5107 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5108 ret = -EINVAL;
5109 break;
5110 }
5111#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005112 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005113 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005114 if (ret < 0)
5115 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005116 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005117 local_irq_disable();
5118 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005119 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005120 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005121 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005122 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005123 local_irq_enable();
5124 break;
5125 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005126 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005127 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5128 tracing_reset_online_cpus(&tr->max_buffer);
5129 else
5130 tracing_reset(&tr->max_buffer, iter->cpu_file);
5131 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005132 break;
5133 }
5134
5135 if (ret >= 0) {
5136 *ppos += cnt;
5137 ret = cnt;
5138 }
5139out:
5140 mutex_unlock(&trace_types_lock);
5141 return ret;
5142}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005143
5144static int tracing_snapshot_release(struct inode *inode, struct file *file)
5145{
5146 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005147 int ret;
5148
5149 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005150
5151 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005152 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005153
5154 /* If write only, the seq_file is just a stub */
5155 if (m)
5156 kfree(m->private);
5157 kfree(m);
5158
5159 return 0;
5160}
5161
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005162static int tracing_buffers_open(struct inode *inode, struct file *filp);
5163static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5164 size_t count, loff_t *ppos);
5165static int tracing_buffers_release(struct inode *inode, struct file *file);
5166static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5167 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5168
5169static int snapshot_raw_open(struct inode *inode, struct file *filp)
5170{
5171 struct ftrace_buffer_info *info;
5172 int ret;
5173
5174 ret = tracing_buffers_open(inode, filp);
5175 if (ret < 0)
5176 return ret;
5177
5178 info = filp->private_data;
5179
5180 if (info->iter.trace->use_max_tr) {
5181 tracing_buffers_release(inode, filp);
5182 return -EBUSY;
5183 }
5184
5185 info->iter.snapshot = true;
5186 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5187
5188 return ret;
5189}
5190
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005191#endif /* CONFIG_TRACER_SNAPSHOT */
5192
5193
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005194static const struct file_operations tracing_thresh_fops = {
5195 .open = tracing_open_generic,
5196 .read = tracing_thresh_read,
5197 .write = tracing_thresh_write,
5198 .llseek = generic_file_llseek,
5199};
5200
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005201static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005202 .open = tracing_open_generic,
5203 .read = tracing_max_lat_read,
5204 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005205 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005206};
5207
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005208static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005209 .open = tracing_open_generic,
5210 .read = tracing_set_trace_read,
5211 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005212 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005213};
5214
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005215static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005216 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005217 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005218 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005219 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005220 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005221 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005222};
5223
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005224static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005225 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005226 .read = tracing_entries_read,
5227 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005228 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005229 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005230};
5231
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005232static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005233 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005234 .read = tracing_total_entries_read,
5235 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005236 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005237};
5238
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005239static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005240 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005241 .write = tracing_free_buffer_write,
5242 .release = tracing_free_buffer_release,
5243};
5244
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005245static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005246 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005247 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005248 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005249 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005250};
5251
Zhaolei5079f322009-08-25 16:12:56 +08005252static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005253 .open = tracing_clock_open,
5254 .read = seq_read,
5255 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005256 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005257 .write = tracing_clock_write,
5258};
5259
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005260#ifdef CONFIG_TRACER_SNAPSHOT
5261static const struct file_operations snapshot_fops = {
5262 .open = tracing_snapshot_open,
5263 .read = seq_read,
5264 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005265 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005266 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005267};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005268
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005269static const struct file_operations snapshot_raw_fops = {
5270 .open = snapshot_raw_open,
5271 .read = tracing_buffers_read,
5272 .release = tracing_buffers_release,
5273 .splice_read = tracing_buffers_splice_read,
5274 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005275};
5276
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005277#endif /* CONFIG_TRACER_SNAPSHOT */
5278
Steven Rostedt2cadf912008-12-01 22:20:19 -05005279static int tracing_buffers_open(struct inode *inode, struct file *filp)
5280{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005281 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005282 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005283 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005284
5285 if (tracing_disabled)
5286 return -ENODEV;
5287
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005288 if (trace_array_get(tr) < 0)
5289 return -ENODEV;
5290
Steven Rostedt2cadf912008-12-01 22:20:19 -05005291 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005292 if (!info) {
5293 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005294 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005295 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005296
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005297 mutex_lock(&trace_types_lock);
5298
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005299 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005300 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005301 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005302 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005303 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005304 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005305 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005306
5307 filp->private_data = info;
5308
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005309 mutex_unlock(&trace_types_lock);
5310
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005311 ret = nonseekable_open(inode, filp);
5312 if (ret < 0)
5313 trace_array_put(tr);
5314
5315 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005316}
5317
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005318static unsigned int
5319tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5320{
5321 struct ftrace_buffer_info *info = filp->private_data;
5322 struct trace_iterator *iter = &info->iter;
5323
5324 return trace_poll(iter, filp, poll_table);
5325}
5326
Steven Rostedt2cadf912008-12-01 22:20:19 -05005327static ssize_t
5328tracing_buffers_read(struct file *filp, char __user *ubuf,
5329 size_t count, loff_t *ppos)
5330{
5331 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005332 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005333 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005334 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005335
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005336 if (!count)
5337 return 0;
5338
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005339 mutex_lock(&trace_types_lock);
5340
5341#ifdef CONFIG_TRACER_MAX_TRACE
5342 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5343 size = -EBUSY;
5344 goto out_unlock;
5345 }
5346#endif
5347
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005348 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005349 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5350 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005351 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005352 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005353 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005354
Steven Rostedt2cadf912008-12-01 22:20:19 -05005355 /* Do we have previous read data to read? */
5356 if (info->read < PAGE_SIZE)
5357 goto read;
5358
Steven Rostedtb6273442013-02-28 13:44:11 -05005359 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005360 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005361 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005362 &info->spare,
5363 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005364 iter->cpu_file, 0);
5365 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005366
5367 if (ret < 0) {
5368 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005369 if ((filp->f_flags & O_NONBLOCK)) {
5370 size = -EAGAIN;
5371 goto out_unlock;
5372 }
5373 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005374 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005375 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005376 if (ret) {
5377 size = ret;
5378 goto out_unlock;
5379 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005380 if (signal_pending(current)) {
5381 size = -EINTR;
5382 goto out_unlock;
5383 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005384 goto again;
5385 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005386 size = 0;
5387 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005388 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005389
Steven Rostedt436fc282011-10-14 10:44:25 -04005390 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005391 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005392 size = PAGE_SIZE - info->read;
5393 if (size > count)
5394 size = count;
5395
5396 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005397 if (ret == size) {
5398 size = -EFAULT;
5399 goto out_unlock;
5400 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005401 size -= ret;
5402
Steven Rostedt2cadf912008-12-01 22:20:19 -05005403 *ppos += size;
5404 info->read += size;
5405
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005406 out_unlock:
5407 mutex_unlock(&trace_types_lock);
5408
Steven Rostedt2cadf912008-12-01 22:20:19 -05005409 return size;
5410}
5411
5412static int tracing_buffers_release(struct inode *inode, struct file *file)
5413{
5414 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005415 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005416
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005417 mutex_lock(&trace_types_lock);
5418
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005419 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005420
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005421 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005422 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005423 kfree(info);
5424
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005425 mutex_unlock(&trace_types_lock);
5426
Steven Rostedt2cadf912008-12-01 22:20:19 -05005427 return 0;
5428}
5429
5430struct buffer_ref {
5431 struct ring_buffer *buffer;
5432 void *page;
5433 int ref;
5434};
5435
5436static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5437 struct pipe_buffer *buf)
5438{
5439 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5440
5441 if (--ref->ref)
5442 return;
5443
5444 ring_buffer_free_read_page(ref->buffer, ref->page);
5445 kfree(ref);
5446 buf->private = 0;
5447}
5448
Steven Rostedt2cadf912008-12-01 22:20:19 -05005449static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5450 struct pipe_buffer *buf)
5451{
5452 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5453
5454 ref->ref++;
5455}
5456
5457/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005458static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005459 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005460 .confirm = generic_pipe_buf_confirm,
5461 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005462 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005463 .get = buffer_pipe_buf_get,
5464};
5465
5466/*
5467 * Callback from splice_to_pipe(), if we need to release some pages
5468 * at the end of the spd in case we error'ed out in filling the pipe.
5469 */
5470static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5471{
5472 struct buffer_ref *ref =
5473 (struct buffer_ref *)spd->partial[i].private;
5474
5475 if (--ref->ref)
5476 return;
5477
5478 ring_buffer_free_read_page(ref->buffer, ref->page);
5479 kfree(ref);
5480 spd->partial[i].private = 0;
5481}
5482
5483static ssize_t
5484tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5485 struct pipe_inode_info *pipe, size_t len,
5486 unsigned int flags)
5487{
5488 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005489 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005490 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5491 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005492 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005493 .pages = pages_def,
5494 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005495 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005496 .flags = flags,
5497 .ops = &buffer_pipe_buf_ops,
5498 .spd_release = buffer_spd_release,
5499 };
5500 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005501 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005502 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005503
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005504 mutex_lock(&trace_types_lock);
5505
5506#ifdef CONFIG_TRACER_MAX_TRACE
5507 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5508 ret = -EBUSY;
5509 goto out;
5510 }
5511#endif
5512
5513 if (splice_grow_spd(pipe, &spd)) {
5514 ret = -ENOMEM;
5515 goto out;
5516 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005517
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005518 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005519 ret = -EINVAL;
5520 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005521 }
5522
5523 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005524 if (len < PAGE_SIZE) {
5525 ret = -EINVAL;
5526 goto out;
5527 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005528 len &= PAGE_MASK;
5529 }
5530
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005531 again:
5532 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005533 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005534
Al Viroa786c062014-04-11 12:01:03 -04005535 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005536 struct page *page;
5537 int r;
5538
5539 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5540 if (!ref)
5541 break;
5542
Steven Rostedt7267fa62009-04-29 00:16:21 -04005543 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005544 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005545 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005546 if (!ref->page) {
5547 kfree(ref);
5548 break;
5549 }
5550
5551 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005552 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005553 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005554 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005555 kfree(ref);
5556 break;
5557 }
5558
5559 /*
5560 * zero out any left over data, this is going to
5561 * user land.
5562 */
5563 size = ring_buffer_page_len(ref->page);
5564 if (size < PAGE_SIZE)
5565 memset(ref->page + size, 0, PAGE_SIZE - size);
5566
5567 page = virt_to_page(ref->page);
5568
5569 spd.pages[i] = page;
5570 spd.partial[i].len = PAGE_SIZE;
5571 spd.partial[i].offset = 0;
5572 spd.partial[i].private = (unsigned long)ref;
5573 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005574 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005575
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005576 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005577 }
5578
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005579 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005580 spd.nr_pages = i;
5581
5582 /* did we read anything? */
5583 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005584 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005585 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005586 goto out;
5587 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005588 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005589 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005590 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005591 if (ret)
5592 goto out;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005593 if (signal_pending(current)) {
5594 ret = -EINTR;
5595 goto out;
5596 }
5597 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005598 }
5599
5600 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005601 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005602out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005603 mutex_unlock(&trace_types_lock);
5604
Steven Rostedt2cadf912008-12-01 22:20:19 -05005605 return ret;
5606}
5607
5608static const struct file_operations tracing_buffers_fops = {
5609 .open = tracing_buffers_open,
5610 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005611 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005612 .release = tracing_buffers_release,
5613 .splice_read = tracing_buffers_splice_read,
5614 .llseek = no_llseek,
5615};
5616
Steven Rostedtc8d77182009-04-29 18:03:45 -04005617static ssize_t
5618tracing_stats_read(struct file *filp, char __user *ubuf,
5619 size_t count, loff_t *ppos)
5620{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005621 struct inode *inode = file_inode(filp);
5622 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005623 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005624 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005625 struct trace_seq *s;
5626 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005627 unsigned long long t;
5628 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005629
Li Zefane4f2d102009-06-15 10:57:28 +08005630 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005631 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005632 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005633
5634 trace_seq_init(s);
5635
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005636 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005637 trace_seq_printf(s, "entries: %ld\n", cnt);
5638
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005639 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005640 trace_seq_printf(s, "overrun: %ld\n", cnt);
5641
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005642 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005643 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5644
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005645 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005646 trace_seq_printf(s, "bytes: %ld\n", cnt);
5647
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005648 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005649 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005650 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005651 usec_rem = do_div(t, USEC_PER_SEC);
5652 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5653 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005654
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005655 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005656 usec_rem = do_div(t, USEC_PER_SEC);
5657 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5658 } else {
5659 /* counter or tsc mode for trace_clock */
5660 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005661 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005662
5663 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005664 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005665 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005666
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005667 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005668 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5669
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005670 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005671 trace_seq_printf(s, "read events: %ld\n", cnt);
5672
Steven Rostedtc8d77182009-04-29 18:03:45 -04005673 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5674
5675 kfree(s);
5676
5677 return count;
5678}
5679
5680static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005681 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005682 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005683 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005684 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005685};
5686
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005687#ifdef CONFIG_DYNAMIC_FTRACE
5688
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005689int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005690{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005691 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005692}
5693
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005694static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005695tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005696 size_t cnt, loff_t *ppos)
5697{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005698 static char ftrace_dyn_info_buffer[1024];
5699 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005700 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005701 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005702 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005703 int r;
5704
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005705 mutex_lock(&dyn_info_mutex);
5706 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005707
Steven Rostedta26a2a22008-10-31 00:03:22 -04005708 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005709 buf[r++] = '\n';
5710
5711 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5712
5713 mutex_unlock(&dyn_info_mutex);
5714
5715 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005716}
5717
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005718static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005719 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005720 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005721 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005722};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005723#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005724
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005725#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5726static void
5727ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005728{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005729 tracing_snapshot();
5730}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005731
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005732static void
5733ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5734{
5735 unsigned long *count = (long *)data;
5736
5737 if (!*count)
5738 return;
5739
5740 if (*count != -1)
5741 (*count)--;
5742
5743 tracing_snapshot();
5744}
5745
5746static int
5747ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5748 struct ftrace_probe_ops *ops, void *data)
5749{
5750 long count = (long)data;
5751
5752 seq_printf(m, "%ps:", (void *)ip);
5753
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005754 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005755
5756 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005757 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005758 else
5759 seq_printf(m, ":count=%ld\n", count);
5760
5761 return 0;
5762}
5763
5764static struct ftrace_probe_ops snapshot_probe_ops = {
5765 .func = ftrace_snapshot,
5766 .print = ftrace_snapshot_print,
5767};
5768
5769static struct ftrace_probe_ops snapshot_count_probe_ops = {
5770 .func = ftrace_count_snapshot,
5771 .print = ftrace_snapshot_print,
5772};
5773
5774static int
5775ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5776 char *glob, char *cmd, char *param, int enable)
5777{
5778 struct ftrace_probe_ops *ops;
5779 void *count = (void *)-1;
5780 char *number;
5781 int ret;
5782
5783 /* hash funcs only work with set_ftrace_filter */
5784 if (!enable)
5785 return -EINVAL;
5786
5787 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5788
5789 if (glob[0] == '!') {
5790 unregister_ftrace_function_probe_func(glob+1, ops);
5791 return 0;
5792 }
5793
5794 if (!param)
5795 goto out_reg;
5796
5797 number = strsep(&param, ":");
5798
5799 if (!strlen(number))
5800 goto out_reg;
5801
5802 /*
5803 * We use the callback data field (which is a pointer)
5804 * as our counter.
5805 */
5806 ret = kstrtoul(number, 0, (unsigned long *)&count);
5807 if (ret)
5808 return ret;
5809
5810 out_reg:
5811 ret = register_ftrace_function_probe(glob, ops, count);
5812
5813 if (ret >= 0)
5814 alloc_snapshot(&global_trace);
5815
5816 return ret < 0 ? ret : 0;
5817}
5818
5819static struct ftrace_func_command ftrace_snapshot_cmd = {
5820 .name = "snapshot",
5821 .func = ftrace_trace_snapshot_callback,
5822};
5823
Tom Zanussi38de93a2013-10-24 08:34:18 -05005824static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005825{
5826 return register_ftrace_command(&ftrace_snapshot_cmd);
5827}
5828#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005829static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005830#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005831
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005832struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005833{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005834 if (tr->dir)
5835 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005836
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005837 if (!debugfs_initialized())
5838 return NULL;
5839
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005840 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5841 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005842
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005843 if (!tr->dir)
5844 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005845
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005846 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005847}
5848
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005849struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005850{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005851 return tracing_init_dentry_tr(&global_trace);
5852}
5853
5854static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5855{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005856 struct dentry *d_tracer;
5857
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005858 if (tr->percpu_dir)
5859 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005860
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005861 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005862 if (!d_tracer)
5863 return NULL;
5864
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005865 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005866
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005867 WARN_ONCE(!tr->percpu_dir,
5868 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005869
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005870 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005871}
5872
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005873static struct dentry *
5874trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5875 void *data, long cpu, const struct file_operations *fops)
5876{
5877 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5878
5879 if (ret) /* See tracing_get_cpu() */
5880 ret->d_inode->i_cdev = (void *)(cpu + 1);
5881 return ret;
5882}
5883
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005884static void
5885tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005886{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005887 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005888 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005889 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005890
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005891 if (!d_percpu)
5892 return;
5893
Steven Rostedtdd49a382010-10-20 21:51:26 -04005894 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005895 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5896 if (!d_cpu) {
5897 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5898 return;
5899 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005900
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005901 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005902 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005903 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005904
5905 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005906 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005907 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005908
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005909 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005910 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005911
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005912 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005913 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005914
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005915 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005916 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005917
5918#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005919 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005920 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005921
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005922 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005923 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005924#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005925}
5926
Steven Rostedt60a11772008-05-12 21:20:44 +02005927#ifdef CONFIG_FTRACE_SELFTEST
5928/* Let selftest have access to static functions in this file */
5929#include "trace_selftest.c"
5930#endif
5931
Steven Rostedt577b7852009-02-26 23:43:05 -05005932struct trace_option_dentry {
5933 struct tracer_opt *opt;
5934 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005935 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005936 struct dentry *entry;
5937};
5938
5939static ssize_t
5940trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5941 loff_t *ppos)
5942{
5943 struct trace_option_dentry *topt = filp->private_data;
5944 char *buf;
5945
5946 if (topt->flags->val & topt->opt->bit)
5947 buf = "1\n";
5948 else
5949 buf = "0\n";
5950
5951 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5952}
5953
5954static ssize_t
5955trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5956 loff_t *ppos)
5957{
5958 struct trace_option_dentry *topt = filp->private_data;
5959 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005960 int ret;
5961
Peter Huewe22fe9b52011-06-07 21:58:27 +02005962 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5963 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005964 return ret;
5965
Li Zefan8d18eaa2009-12-08 11:17:06 +08005966 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005967 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005968
5969 if (!!(topt->flags->val & topt->opt->bit) != val) {
5970 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005971 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005972 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005973 mutex_unlock(&trace_types_lock);
5974 if (ret)
5975 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005976 }
5977
5978 *ppos += cnt;
5979
5980 return cnt;
5981}
5982
5983
5984static const struct file_operations trace_options_fops = {
5985 .open = tracing_open_generic,
5986 .read = trace_options_read,
5987 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005988 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005989};
5990
Steven Rostedta8259072009-02-26 22:19:12 -05005991static ssize_t
5992trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5993 loff_t *ppos)
5994{
5995 long index = (long)filp->private_data;
5996 char *buf;
5997
5998 if (trace_flags & (1 << index))
5999 buf = "1\n";
6000 else
6001 buf = "0\n";
6002
6003 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6004}
6005
6006static ssize_t
6007trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6008 loff_t *ppos)
6009{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006010 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006011 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05006012 unsigned long val;
6013 int ret;
6014
Peter Huewe22fe9b52011-06-07 21:58:27 +02006015 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6016 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006017 return ret;
6018
Zhaoleif2d84b62009-08-07 18:55:48 +08006019 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006020 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006021
6022 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006023 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006024 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006025
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006026 if (ret < 0)
6027 return ret;
6028
Steven Rostedta8259072009-02-26 22:19:12 -05006029 *ppos += cnt;
6030
6031 return cnt;
6032}
6033
Steven Rostedta8259072009-02-26 22:19:12 -05006034static const struct file_operations trace_options_core_fops = {
6035 .open = tracing_open_generic,
6036 .read = trace_options_core_read,
6037 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006038 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006039};
6040
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006041struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006042 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006043 struct dentry *parent,
6044 void *data,
6045 const struct file_operations *fops)
6046{
6047 struct dentry *ret;
6048
6049 ret = debugfs_create_file(name, mode, parent, data, fops);
6050 if (!ret)
6051 pr_warning("Could not create debugfs '%s' entry\n", name);
6052
6053 return ret;
6054}
6055
6056
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006057static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006058{
6059 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006060
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006061 if (tr->options)
6062 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006063
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006064 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006065 if (!d_tracer)
6066 return NULL;
6067
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006068 tr->options = debugfs_create_dir("options", d_tracer);
6069 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006070 pr_warning("Could not create debugfs directory 'options'\n");
6071 return NULL;
6072 }
6073
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006074 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006075}
6076
Steven Rostedt577b7852009-02-26 23:43:05 -05006077static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006078create_trace_option_file(struct trace_array *tr,
6079 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006080 struct tracer_flags *flags,
6081 struct tracer_opt *opt)
6082{
6083 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006084
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006085 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006086 if (!t_options)
6087 return;
6088
6089 topt->flags = flags;
6090 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006091 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006092
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006093 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006094 &trace_options_fops);
6095
Steven Rostedt577b7852009-02-26 23:43:05 -05006096}
6097
6098static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006099create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006100{
6101 struct trace_option_dentry *topts;
6102 struct tracer_flags *flags;
6103 struct tracer_opt *opts;
6104 int cnt;
6105
6106 if (!tracer)
6107 return NULL;
6108
6109 flags = tracer->flags;
6110
6111 if (!flags || !flags->opts)
6112 return NULL;
6113
6114 opts = flags->opts;
6115
6116 for (cnt = 0; opts[cnt].name; cnt++)
6117 ;
6118
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006119 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006120 if (!topts)
6121 return NULL;
6122
6123 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006124 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006125 &opts[cnt]);
6126
6127 return topts;
6128}
6129
6130static void
6131destroy_trace_option_files(struct trace_option_dentry *topts)
6132{
6133 int cnt;
6134
6135 if (!topts)
6136 return;
6137
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006138 for (cnt = 0; topts[cnt].opt; cnt++)
6139 debugfs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006140
6141 kfree(topts);
6142}
6143
Steven Rostedta8259072009-02-26 22:19:12 -05006144static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006145create_trace_option_core_file(struct trace_array *tr,
6146 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006147{
6148 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006149
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006150 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006151 if (!t_options)
6152 return NULL;
6153
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006154 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006155 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006156}
6157
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006158static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006159{
6160 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006161 int i;
6162
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006163 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006164 if (!t_options)
6165 return;
6166
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006167 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006168 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006169}
6170
Steven Rostedt499e5472012-02-22 15:50:28 -05006171static ssize_t
6172rb_simple_read(struct file *filp, char __user *ubuf,
6173 size_t cnt, loff_t *ppos)
6174{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006175 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006176 char buf[64];
6177 int r;
6178
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006179 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006180 r = sprintf(buf, "%d\n", r);
6181
6182 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6183}
6184
6185static ssize_t
6186rb_simple_write(struct file *filp, const char __user *ubuf,
6187 size_t cnt, loff_t *ppos)
6188{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006189 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006190 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006191 unsigned long val;
6192 int ret;
6193
6194 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6195 if (ret)
6196 return ret;
6197
6198 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006199 mutex_lock(&trace_types_lock);
6200 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006201 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006202 if (tr->current_trace->start)
6203 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006204 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006205 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006206 if (tr->current_trace->stop)
6207 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006208 }
6209 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006210 }
6211
6212 (*ppos)++;
6213
6214 return cnt;
6215}
6216
6217static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006218 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006219 .read = rb_simple_read,
6220 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006221 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006222 .llseek = default_llseek,
6223};
6224
Steven Rostedt277ba042012-08-03 16:10:49 -04006225struct dentry *trace_instance_dir;
6226
6227static void
6228init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6229
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006230static int
6231allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006232{
6233 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006234
6235 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6236
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006237 buf->tr = tr;
6238
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006239 buf->buffer = ring_buffer_alloc(size, rb_flags);
6240 if (!buf->buffer)
6241 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006242
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006243 buf->data = alloc_percpu(struct trace_array_cpu);
6244 if (!buf->data) {
6245 ring_buffer_free(buf->buffer);
6246 return -ENOMEM;
6247 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006248
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006249 /* Allocate the first page for all buffers */
6250 set_buffer_entries(&tr->trace_buffer,
6251 ring_buffer_size(tr->trace_buffer.buffer, 0));
6252
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006253 return 0;
6254}
6255
6256static int allocate_trace_buffers(struct trace_array *tr, int size)
6257{
6258 int ret;
6259
6260 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6261 if (ret)
6262 return ret;
6263
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006264#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006265 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6266 allocate_snapshot ? size : 1);
6267 if (WARN_ON(ret)) {
6268 ring_buffer_free(tr->trace_buffer.buffer);
6269 free_percpu(tr->trace_buffer.data);
6270 return -ENOMEM;
6271 }
6272 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006273
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006274 /*
6275 * Only the top level trace array gets its snapshot allocated
6276 * from the kernel command line.
6277 */
6278 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006279#endif
6280 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006281}
6282
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006283static void free_trace_buffer(struct trace_buffer *buf)
6284{
6285 if (buf->buffer) {
6286 ring_buffer_free(buf->buffer);
6287 buf->buffer = NULL;
6288 free_percpu(buf->data);
6289 buf->data = NULL;
6290 }
6291}
6292
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006293static void free_trace_buffers(struct trace_array *tr)
6294{
6295 if (!tr)
6296 return;
6297
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006298 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006299
6300#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006301 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006302#endif
6303}
6304
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006305static int new_instance_create(const char *name)
6306{
Steven Rostedt277ba042012-08-03 16:10:49 -04006307 struct trace_array *tr;
6308 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006309
6310 mutex_lock(&trace_types_lock);
6311
6312 ret = -EEXIST;
6313 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6314 if (tr->name && strcmp(tr->name, name) == 0)
6315 goto out_unlock;
6316 }
6317
6318 ret = -ENOMEM;
6319 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6320 if (!tr)
6321 goto out_unlock;
6322
6323 tr->name = kstrdup(name, GFP_KERNEL);
6324 if (!tr->name)
6325 goto out_free_tr;
6326
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006327 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6328 goto out_free_tr;
6329
6330 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6331
Steven Rostedt277ba042012-08-03 16:10:49 -04006332 raw_spin_lock_init(&tr->start_lock);
6333
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006334 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6335
Steven Rostedt277ba042012-08-03 16:10:49 -04006336 tr->current_trace = &nop_trace;
6337
6338 INIT_LIST_HEAD(&tr->systems);
6339 INIT_LIST_HEAD(&tr->events);
6340
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006341 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006342 goto out_free_tr;
6343
Steven Rostedt277ba042012-08-03 16:10:49 -04006344 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6345 if (!tr->dir)
6346 goto out_free_tr;
6347
6348 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006349 if (ret) {
6350 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006351 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006352 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006353
6354 init_tracer_debugfs(tr, tr->dir);
6355
6356 list_add(&tr->list, &ftrace_trace_arrays);
6357
6358 mutex_unlock(&trace_types_lock);
6359
6360 return 0;
6361
6362 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006363 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006364 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006365 kfree(tr->name);
6366 kfree(tr);
6367
6368 out_unlock:
6369 mutex_unlock(&trace_types_lock);
6370
6371 return ret;
6372
6373}
6374
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006375static int instance_delete(const char *name)
6376{
6377 struct trace_array *tr;
6378 int found = 0;
6379 int ret;
6380
6381 mutex_lock(&trace_types_lock);
6382
6383 ret = -ENODEV;
6384 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6385 if (tr->name && strcmp(tr->name, name) == 0) {
6386 found = 1;
6387 break;
6388 }
6389 }
6390 if (!found)
6391 goto out_unlock;
6392
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006393 ret = -EBUSY;
6394 if (tr->ref)
6395 goto out_unlock;
6396
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006397 list_del(&tr->list);
6398
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006399 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006400 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006401 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006402 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006403 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006404
6405 kfree(tr->name);
6406 kfree(tr);
6407
6408 ret = 0;
6409
6410 out_unlock:
6411 mutex_unlock(&trace_types_lock);
6412
6413 return ret;
6414}
6415
Steven Rostedt277ba042012-08-03 16:10:49 -04006416static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6417{
6418 struct dentry *parent;
6419 int ret;
6420
6421 /* Paranoid: Make sure the parent is the "instances" directory */
6422 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6423 if (WARN_ON_ONCE(parent != trace_instance_dir))
6424 return -ENOENT;
6425
6426 /*
6427 * The inode mutex is locked, but debugfs_create_dir() will also
6428 * take the mutex. As the instances directory can not be destroyed
6429 * or changed in any other way, it is safe to unlock it, and
6430 * let the dentry try. If two users try to make the same dir at
6431 * the same time, then the new_instance_create() will determine the
6432 * winner.
6433 */
6434 mutex_unlock(&inode->i_mutex);
6435
6436 ret = new_instance_create(dentry->d_iname);
6437
6438 mutex_lock(&inode->i_mutex);
6439
6440 return ret;
6441}
6442
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006443static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6444{
6445 struct dentry *parent;
6446 int ret;
6447
6448 /* Paranoid: Make sure the parent is the "instances" directory */
6449 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6450 if (WARN_ON_ONCE(parent != trace_instance_dir))
6451 return -ENOENT;
6452
6453 /* The caller did a dget() on dentry */
6454 mutex_unlock(&dentry->d_inode->i_mutex);
6455
6456 /*
6457 * The inode mutex is locked, but debugfs_create_dir() will also
6458 * take the mutex. As the instances directory can not be destroyed
6459 * or changed in any other way, it is safe to unlock it, and
6460 * let the dentry try. If two users try to make the same dir at
6461 * the same time, then the instance_delete() will determine the
6462 * winner.
6463 */
6464 mutex_unlock(&inode->i_mutex);
6465
6466 ret = instance_delete(dentry->d_iname);
6467
6468 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6469 mutex_lock(&dentry->d_inode->i_mutex);
6470
6471 return ret;
6472}
6473
Steven Rostedt277ba042012-08-03 16:10:49 -04006474static const struct inode_operations instance_dir_inode_operations = {
6475 .lookup = simple_lookup,
6476 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006477 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006478};
6479
6480static __init void create_trace_instances(struct dentry *d_tracer)
6481{
6482 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6483 if (WARN_ON(!trace_instance_dir))
6484 return;
6485
6486 /* Hijack the dir inode operations, to allow mkdir */
6487 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6488}
6489
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006490static void
6491init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6492{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006493 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006494
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006495 trace_create_file("available_tracers", 0444, d_tracer,
6496 tr, &show_traces_fops);
6497
6498 trace_create_file("current_tracer", 0644, d_tracer,
6499 tr, &set_tracer_fops);
6500
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006501 trace_create_file("tracing_cpumask", 0644, d_tracer,
6502 tr, &tracing_cpumask_fops);
6503
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006504 trace_create_file("trace_options", 0644, d_tracer,
6505 tr, &tracing_iter_fops);
6506
6507 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006508 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006509
6510 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006511 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006512
6513 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006514 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006515
6516 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6517 tr, &tracing_total_entries_fops);
6518
Wang YanQing238ae932013-05-26 16:52:01 +08006519 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006520 tr, &tracing_free_buffer_fops);
6521
6522 trace_create_file("trace_marker", 0220, d_tracer,
6523 tr, &tracing_mark_fops);
6524
6525 trace_create_file("trace_clock", 0644, d_tracer, tr,
6526 &trace_clock_fops);
6527
6528 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006529 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006530
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006531#ifdef CONFIG_TRACER_MAX_TRACE
6532 trace_create_file("tracing_max_latency", 0644, d_tracer,
6533 &tr->max_latency, &tracing_max_lat_fops);
6534#endif
6535
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006536 if (ftrace_create_function_files(tr, d_tracer))
6537 WARN(1, "Could not allocate function filter files");
6538
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006539#ifdef CONFIG_TRACER_SNAPSHOT
6540 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006541 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006542#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006543
6544 for_each_tracing_cpu(cpu)
6545 tracing_init_debugfs_percpu(tr, cpu);
6546
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006547}
6548
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006549static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006550{
6551 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006552
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006553 trace_access_lock_init();
6554
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006555 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006556 if (!d_tracer)
6557 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006558
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006559 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006560
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006561 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006562 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006563
Li Zefan339ae5d2009-04-17 10:34:30 +08006564 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006565 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006566
Avadh Patel69abe6a2009-04-10 16:04:48 -04006567 trace_create_file("saved_cmdlines", 0444, d_tracer,
6568 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006569
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006570 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6571 NULL, &tracing_saved_cmdlines_size_fops);
6572
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006573#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006574 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6575 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006576#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006577
Steven Rostedt277ba042012-08-03 16:10:49 -04006578 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006579
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006580 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006581
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006582 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006583}
6584
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006585static int trace_panic_handler(struct notifier_block *this,
6586 unsigned long event, void *unused)
6587{
Steven Rostedt944ac422008-10-23 19:26:08 -04006588 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006589 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006590 return NOTIFY_OK;
6591}
6592
6593static struct notifier_block trace_panic_notifier = {
6594 .notifier_call = trace_panic_handler,
6595 .next = NULL,
6596 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6597};
6598
6599static int trace_die_handler(struct notifier_block *self,
6600 unsigned long val,
6601 void *data)
6602{
6603 switch (val) {
6604 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006605 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006606 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006607 break;
6608 default:
6609 break;
6610 }
6611 return NOTIFY_OK;
6612}
6613
6614static struct notifier_block trace_die_notifier = {
6615 .notifier_call = trace_die_handler,
6616 .priority = 200
6617};
6618
6619/*
6620 * printk is set to max of 1024, we really don't need it that big.
6621 * Nothing should be printing 1000 characters anyway.
6622 */
6623#define TRACE_MAX_PRINT 1000
6624
6625/*
6626 * Define here KERN_TRACE so that we have one place to modify
6627 * it if we decide to change what log level the ftrace dump
6628 * should be at.
6629 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006630#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006631
Jason Wessel955b61e2010-08-05 09:22:23 -05006632void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006633trace_printk_seq(struct trace_seq *s)
6634{
6635 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006636 if (s->len >= TRACE_MAX_PRINT)
6637 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006638
6639 /* should be zero ended, but we are paranoid. */
6640 s->buffer[s->len] = 0;
6641
6642 printk(KERN_TRACE "%s", s->buffer);
6643
Steven Rostedtf9520752009-03-02 14:04:40 -05006644 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006645}
6646
Jason Wessel955b61e2010-08-05 09:22:23 -05006647void trace_init_global_iter(struct trace_iterator *iter)
6648{
6649 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006650 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006651 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006652 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006653
6654 if (iter->trace && iter->trace->open)
6655 iter->trace->open(iter);
6656
6657 /* Annotate start of buffers if we had overruns */
6658 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6659 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6660
6661 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6662 if (trace_clocks[iter->tr->clock_id].in_ns)
6663 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006664}
6665
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006666void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006667{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006668 /* use static because iter can be a bit big for the stack */
6669 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006670 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006671 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006672 unsigned long flags;
6673 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006674
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006675 /* Only allow one dump user at a time. */
6676 if (atomic_inc_return(&dump_running) != 1) {
6677 atomic_dec(&dump_running);
6678 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006679 }
6680
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006681 /*
6682 * Always turn off tracing when we dump.
6683 * We don't need to show trace output of what happens
6684 * between multiple crashes.
6685 *
6686 * If the user does a sysrq-z, then they can re-enable
6687 * tracing with echo 1 > tracing_on.
6688 */
6689 tracing_off();
6690
6691 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006692
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006693 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006694 trace_init_global_iter(&iter);
6695
Steven Rostedtd7690412008-10-01 00:29:53 -04006696 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006697 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006698 }
6699
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006700 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6701
Török Edwinb54d3de2008-11-22 13:28:48 +02006702 /* don't look at user memory in panic mode */
6703 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6704
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006705 switch (oops_dump_mode) {
6706 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006707 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006708 break;
6709 case DUMP_ORIG:
6710 iter.cpu_file = raw_smp_processor_id();
6711 break;
6712 case DUMP_NONE:
6713 goto out_enable;
6714 default:
6715 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006716 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006717 }
6718
6719 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006720
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006721 /* Did function tracer already get disabled? */
6722 if (ftrace_is_dead()) {
6723 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6724 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6725 }
6726
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006727 /*
6728 * We need to stop all tracing on all CPUS to read the
6729 * the next buffer. This is a bit expensive, but is
6730 * not done often. We fill all what we can read,
6731 * and then release the locks again.
6732 */
6733
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006734 while (!trace_empty(&iter)) {
6735
6736 if (!cnt)
6737 printk(KERN_TRACE "---------------------------------\n");
6738
6739 cnt++;
6740
6741 /* reset all but tr, trace, and overruns */
6742 memset(&iter.seq, 0,
6743 sizeof(struct trace_iterator) -
6744 offsetof(struct trace_iterator, seq));
6745 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6746 iter.pos = -1;
6747
Jason Wessel955b61e2010-08-05 09:22:23 -05006748 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006749 int ret;
6750
6751 ret = print_trace_line(&iter);
6752 if (ret != TRACE_TYPE_NO_CONSUME)
6753 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006754 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006755 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006756
6757 trace_printk_seq(&iter.seq);
6758 }
6759
6760 if (!cnt)
6761 printk(KERN_TRACE " (ftrace buffer empty)\n");
6762 else
6763 printk(KERN_TRACE "---------------------------------\n");
6764
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006765 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006766 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006767
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006768 for_each_tracing_cpu(cpu) {
6769 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006770 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006771 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006772 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006773}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006774EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006775
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006776__init static int tracer_alloc_buffers(void)
6777{
Steven Rostedt73c51622009-03-11 13:42:01 -04006778 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306779 int ret = -ENOMEM;
6780
David Sharp750912f2010-12-08 13:46:47 -08006781
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306782 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6783 goto out;
6784
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006785 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306786 goto out_free_buffer_mask;
6787
Steven Rostedt07d777f2011-09-22 14:01:55 -04006788 /* Only allocate trace_printk buffers if a trace_printk exists */
6789 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006790 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006791 trace_printk_init_buffers();
6792
Steven Rostedt73c51622009-03-11 13:42:01 -04006793 /* To save memory, keep the ring buffer size to its minimum */
6794 if (ring_buffer_expanded)
6795 ring_buf_size = trace_buf_size;
6796 else
6797 ring_buf_size = 1;
6798
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306799 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006800 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006801
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006802 raw_spin_lock_init(&global_trace.start_lock);
6803
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006804 /* Used for event triggers */
6805 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6806 if (!temp_buffer)
6807 goto out_free_cpumask;
6808
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006809 if (trace_create_savedcmd() < 0)
6810 goto out_free_temp_buffer;
6811
Steven Rostedtab464282008-05-12 21:21:00 +02006812 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006813 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006814 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6815 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006816 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006817 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006818
Steven Rostedt499e5472012-02-22 15:50:28 -05006819 if (global_trace.buffer_disabled)
6820 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006821
Steven Rostedte1e232c2014-02-10 23:38:46 -05006822 if (trace_boot_clock) {
6823 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6824 if (ret < 0)
6825 pr_warning("Trace clock %s not defined, going back to default\n",
6826 trace_boot_clock);
6827 }
6828
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006829 /*
6830 * register_tracer() might reference current_trace, so it
6831 * needs to be set before we register anything. This is
6832 * just a bootstrap of current_trace anyway.
6833 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006834 global_trace.current_trace = &nop_trace;
6835
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006836 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6837
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006838 ftrace_init_global_array_ops(&global_trace);
6839
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006840 register_tracer(&nop_trace);
6841
Steven Rostedt60a11772008-05-12 21:20:44 +02006842 /* All seems OK, enable tracing */
6843 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006844
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006845 atomic_notifier_chain_register(&panic_notifier_list,
6846 &trace_panic_notifier);
6847
6848 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006849
Steven Rostedtae63b312012-05-03 23:09:03 -04006850 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6851
6852 INIT_LIST_HEAD(&global_trace.systems);
6853 INIT_LIST_HEAD(&global_trace.events);
6854 list_add(&global_trace.list, &ftrace_trace_arrays);
6855
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006856 while (trace_boot_options) {
6857 char *option;
6858
6859 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006860 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006861 }
6862
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006863 register_snapshot_cmd();
6864
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006865 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006866
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006867out_free_savedcmd:
6868 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006869out_free_temp_buffer:
6870 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306871out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006872 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306873out_free_buffer_mask:
6874 free_cpumask_var(tracing_buffer_mask);
6875out:
6876 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006877}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006878
6879__init static int clear_boot_tracer(void)
6880{
6881 /*
6882 * The default tracer at boot buffer is an init section.
6883 * This function is called in lateinit. If we did not
6884 * find the boot tracer, then clear it out, to prevent
6885 * later registration from accessing the buffer that is
6886 * about to be freed.
6887 */
6888 if (!default_bootup_tracer)
6889 return 0;
6890
6891 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6892 default_bootup_tracer);
6893 default_bootup_tracer = NULL;
6894
6895 return 0;
6896}
6897
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006898early_initcall(tracer_alloc_buffers);
6899fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006900late_initcall(clear_boot_tracer);