blob: 9023446b2c2bc1f37aacc6a4e47a71bbf7c66866 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200158 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
159 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400160 return 1;
161}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200162__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400163
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400164static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500165{
166 allocate_snapshot = true;
167 /* We also need the main ring buffer expanded */
168 ring_buffer_expanded = true;
169 return 1;
170}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400171__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500172
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400173
174static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
175static char *trace_boot_options __initdata;
176
177static int __init set_trace_boot_options(char *str)
178{
Chen Gang67012ab2013-04-08 12:06:44 +0800179 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400180 trace_boot_options = trace_boot_options_buf;
181 return 0;
182}
183__setup("trace_options=", set_trace_boot_options);
184
Steven Rostedte1e232c2014-02-10 23:38:46 -0500185static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
186static char *trace_boot_clock __initdata;
187
188static int __init set_trace_boot_clock(char *str)
189{
190 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
191 trace_boot_clock = trace_boot_clock_buf;
192 return 0;
193}
194__setup("trace_clock=", set_trace_boot_clock);
195
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400196
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800197unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200198{
199 nsec += 500;
200 do_div(nsec, 1000);
201 return nsec;
202}
203
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200204/*
205 * The global_trace is the descriptor that holds the tracing
206 * buffers for the live tracing. For each CPU, it contains
207 * a link list of pages that will store trace entries. The
208 * page descriptor of the pages in the memory is used to hold
209 * the link list by linking the lru item in the page descriptor
210 * to each of the pages in the buffer per CPU.
211 *
212 * For each active CPU there is a data field that holds the
213 * pages for the buffer for that CPU. Each CPU has the same number
214 * of pages allocated for its buffer.
215 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200216static struct trace_array global_trace;
217
Steven Rostedtae63b312012-05-03 23:09:03 -0400218LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200219
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400220int trace_array_get(struct trace_array *this_tr)
221{
222 struct trace_array *tr;
223 int ret = -ENODEV;
224
225 mutex_lock(&trace_types_lock);
226 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
227 if (tr == this_tr) {
228 tr->ref++;
229 ret = 0;
230 break;
231 }
232 }
233 mutex_unlock(&trace_types_lock);
234
235 return ret;
236}
237
238static void __trace_array_put(struct trace_array *this_tr)
239{
240 WARN_ON(!this_tr->ref);
241 this_tr->ref--;
242}
243
244void trace_array_put(struct trace_array *this_tr)
245{
246 mutex_lock(&trace_types_lock);
247 __trace_array_put(this_tr);
248 mutex_unlock(&trace_types_lock);
249}
250
Tom Zanussif306cc82013-10-24 08:34:17 -0500251int filter_check_discard(struct ftrace_event_file *file, void *rec,
252 struct ring_buffer *buffer,
253 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500254{
Tom Zanussif306cc82013-10-24 08:34:17 -0500255 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
256 !filter_match_preds(file->filter, rec)) {
257 ring_buffer_discard_commit(buffer, event);
258 return 1;
259 }
260
261 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500262}
Tom Zanussif306cc82013-10-24 08:34:17 -0500263EXPORT_SYMBOL_GPL(filter_check_discard);
264
265int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
266 struct ring_buffer *buffer,
267 struct ring_buffer_event *event)
268{
269 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
270 !filter_match_preds(call->filter, rec)) {
271 ring_buffer_discard_commit(buffer, event);
272 return 1;
273 }
274
275 return 0;
276}
277EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500278
Fabian Frederickad1438a2014-04-17 21:44:42 +0200279static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400280{
281 u64 ts;
282
283 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700284 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400285 return trace_clock_local();
286
Alexander Z Lam94571582013-08-02 18:36:16 -0700287 ts = ring_buffer_time_stamp(buf->buffer, cpu);
288 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400289
290 return ts;
291}
292
Alexander Z Lam94571582013-08-02 18:36:16 -0700293cycle_t ftrace_now(int cpu)
294{
295 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
296}
297
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400298/**
299 * tracing_is_enabled - Show if global_trace has been disabled
300 *
301 * Shows if the global trace has been enabled or not. It uses the
302 * mirror flag "buffer_disabled" to be used in fast paths such as for
303 * the irqsoff tracer. But it may be inaccurate due to races. If you
304 * need to know the accurate state, use tracing_is_on() which is a little
305 * slower, but accurate.
306 */
Steven Rostedt90369902008-11-05 16:05:44 -0500307int tracing_is_enabled(void)
308{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400309 /*
310 * For quick access (irqsoff uses this in fast path), just
311 * return the mirror variable of the state of the ring buffer.
312 * It's a little racy, but we don't really care.
313 */
314 smp_rmb();
315 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500316}
317
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200318/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400319 * trace_buf_size is the size in bytes that is allocated
320 * for a buffer. Note, the number of bytes is always rounded
321 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400322 *
323 * This number is purposely set to a low number of 16384.
324 * If the dump on oops happens, it will be much appreciated
325 * to not have to wait for all that output. Anyway this can be
326 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200327 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400328#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400329
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400330static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200331
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200332/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200333static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200337 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700338DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200339
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800340/*
341 * serialize the access of the ring buffer
342 *
343 * ring buffer serializes readers, but it is low level protection.
344 * The validity of the events (which returns by ring_buffer_peek() ..etc)
345 * are not protected by ring buffer.
346 *
347 * The content of events may become garbage if we allow other process consumes
348 * these events concurrently:
349 * A) the page of the consumed events may become a normal page
350 * (not reader page) in ring buffer, and this page will be rewrited
351 * by events producer.
352 * B) The page of the consumed events may become a page for splice_read,
353 * and this page will be returned to system.
354 *
355 * These primitives allow multi process access to different cpu ring buffer
356 * concurrently.
357 *
358 * These primitives don't distinguish read-only and read-consume access.
359 * Multi read-only access are also serialized.
360 */
361
362#ifdef CONFIG_SMP
363static DECLARE_RWSEM(all_cpu_access_lock);
364static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
365
366static inline void trace_access_lock(int cpu)
367{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500368 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800369 /* gain it for accessing the whole ring buffer. */
370 down_write(&all_cpu_access_lock);
371 } else {
372 /* gain it for accessing a cpu ring buffer. */
373
Steven Rostedtae3b5092013-01-23 15:22:59 -0500374 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800375 down_read(&all_cpu_access_lock);
376
377 /* Secondly block other access to this @cpu ring buffer. */
378 mutex_lock(&per_cpu(cpu_access_lock, cpu));
379 }
380}
381
382static inline void trace_access_unlock(int cpu)
383{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500384 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800385 up_write(&all_cpu_access_lock);
386 } else {
387 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
388 up_read(&all_cpu_access_lock);
389 }
390}
391
392static inline void trace_access_lock_init(void)
393{
394 int cpu;
395
396 for_each_possible_cpu(cpu)
397 mutex_init(&per_cpu(cpu_access_lock, cpu));
398}
399
400#else
401
402static DEFINE_MUTEX(access_lock);
403
404static inline void trace_access_lock(int cpu)
405{
406 (void)cpu;
407 mutex_lock(&access_lock);
408}
409
410static inline void trace_access_unlock(int cpu)
411{
412 (void)cpu;
413 mutex_unlock(&access_lock);
414}
415
416static inline void trace_access_lock_init(void)
417{
418}
419
420#endif
421
Steven Rostedtee6bce52008-11-12 17:52:37 -0500422/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500423unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400424 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500425 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400426 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700427
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400428static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400429{
430 if (tr->trace_buffer.buffer)
431 ring_buffer_record_on(tr->trace_buffer.buffer);
432 /*
433 * This flag is looked at when buffers haven't been allocated
434 * yet, or by some tracers (like irqsoff), that just want to
435 * know if the ring buffer has been disabled, but it can handle
436 * races of where it gets disabled but we still do a record.
437 * As the check is in the fast path of the tracers, it is more
438 * important to be fast than accurate.
439 */
440 tr->buffer_disabled = 0;
441 /* Make the flag seen by readers */
442 smp_wmb();
443}
444
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200445/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500446 * tracing_on - enable tracing buffers
447 *
448 * This function enables tracing buffers that may have been
449 * disabled with tracing_off.
450 */
451void tracing_on(void)
452{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400453 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500454}
455EXPORT_SYMBOL_GPL(tracing_on);
456
457/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500458 * __trace_puts - write a constant string into the trace buffer.
459 * @ip: The address of the caller
460 * @str: The constant string to write
461 * @size: The size of the string.
462 */
463int __trace_puts(unsigned long ip, const char *str, int size)
464{
465 struct ring_buffer_event *event;
466 struct ring_buffer *buffer;
467 struct print_entry *entry;
468 unsigned long irq_flags;
469 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800470 int pc;
471
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800472 if (!(trace_flags & TRACE_ITER_PRINTK))
473 return 0;
474
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800475 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500476
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500477 if (unlikely(tracing_selftest_running || tracing_disabled))
478 return 0;
479
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500480 alloc = sizeof(*entry) + size + 2; /* possible \n added */
481
482 local_save_flags(irq_flags);
483 buffer = global_trace.trace_buffer.buffer;
484 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800485 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500486 if (!event)
487 return 0;
488
489 entry = ring_buffer_event_data(event);
490 entry->ip = ip;
491
492 memcpy(&entry->buf, str, size);
493
494 /* Add a newline if necessary */
495 if (entry->buf[size - 1] != '\n') {
496 entry->buf[size] = '\n';
497 entry->buf[size + 1] = '\0';
498 } else
499 entry->buf[size] = '\0';
500
501 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800502 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500503
504 return size;
505}
506EXPORT_SYMBOL_GPL(__trace_puts);
507
508/**
509 * __trace_bputs - write the pointer to a constant string into trace buffer
510 * @ip: The address of the caller
511 * @str: The constant string to write to the buffer to
512 */
513int __trace_bputs(unsigned long ip, const char *str)
514{
515 struct ring_buffer_event *event;
516 struct ring_buffer *buffer;
517 struct bputs_entry *entry;
518 unsigned long irq_flags;
519 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800520 int pc;
521
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800522 if (!(trace_flags & TRACE_ITER_PRINTK))
523 return 0;
524
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800525 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500526
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500527 if (unlikely(tracing_selftest_running || tracing_disabled))
528 return 0;
529
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500530 local_save_flags(irq_flags);
531 buffer = global_trace.trace_buffer.buffer;
532 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800533 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500534 if (!event)
535 return 0;
536
537 entry = ring_buffer_event_data(event);
538 entry->ip = ip;
539 entry->str = str;
540
541 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800542 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500543
544 return 1;
545}
546EXPORT_SYMBOL_GPL(__trace_bputs);
547
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500548#ifdef CONFIG_TRACER_SNAPSHOT
549/**
550 * trace_snapshot - take a snapshot of the current buffer.
551 *
552 * This causes a swap between the snapshot buffer and the current live
553 * tracing buffer. You can use this to take snapshots of the live
554 * trace when some condition is triggered, but continue to trace.
555 *
556 * Note, make sure to allocate the snapshot with either
557 * a tracing_snapshot_alloc(), or by doing it manually
558 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
559 *
560 * If the snapshot buffer is not allocated, it will stop tracing.
561 * Basically making a permanent snapshot.
562 */
563void tracing_snapshot(void)
564{
565 struct trace_array *tr = &global_trace;
566 struct tracer *tracer = tr->current_trace;
567 unsigned long flags;
568
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500569 if (in_nmi()) {
570 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
571 internal_trace_puts("*** snapshot is being ignored ***\n");
572 return;
573 }
574
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500575 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500576 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
577 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500578 tracing_off();
579 return;
580 }
581
582 /* Note, snapshot can not be used when the tracer uses it */
583 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500584 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
585 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500586 return;
587 }
588
589 local_irq_save(flags);
590 update_max_tr(tr, current, smp_processor_id());
591 local_irq_restore(flags);
592}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500593EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500594
595static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
596 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400597static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
598
599static int alloc_snapshot(struct trace_array *tr)
600{
601 int ret;
602
603 if (!tr->allocated_snapshot) {
604
605 /* allocate spare buffer */
606 ret = resize_buffer_duplicate_size(&tr->max_buffer,
607 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
608 if (ret < 0)
609 return ret;
610
611 tr->allocated_snapshot = true;
612 }
613
614 return 0;
615}
616
Fabian Frederickad1438a2014-04-17 21:44:42 +0200617static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400618{
619 /*
620 * We don't free the ring buffer. instead, resize it because
621 * The max_tr ring buffer has some state (e.g. ring->clock) and
622 * we want preserve it.
623 */
624 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
625 set_buffer_entries(&tr->max_buffer, 1);
626 tracing_reset_online_cpus(&tr->max_buffer);
627 tr->allocated_snapshot = false;
628}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500629
630/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500631 * tracing_alloc_snapshot - allocate snapshot buffer.
632 *
633 * This only allocates the snapshot buffer if it isn't already
634 * allocated - it doesn't also take a snapshot.
635 *
636 * This is meant to be used in cases where the snapshot buffer needs
637 * to be set up for events that can't sleep but need to be able to
638 * trigger a snapshot.
639 */
640int tracing_alloc_snapshot(void)
641{
642 struct trace_array *tr = &global_trace;
643 int ret;
644
645 ret = alloc_snapshot(tr);
646 WARN_ON(ret < 0);
647
648 return ret;
649}
650EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
651
652/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500653 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
654 *
655 * This is similar to trace_snapshot(), but it will allocate the
656 * snapshot buffer if it isn't already allocated. Use this only
657 * where it is safe to sleep, as the allocation may sleep.
658 *
659 * This causes a swap between the snapshot buffer and the current live
660 * tracing buffer. You can use this to take snapshots of the live
661 * trace when some condition is triggered, but continue to trace.
662 */
663void tracing_snapshot_alloc(void)
664{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500665 int ret;
666
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500667 ret = tracing_alloc_snapshot();
668 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400669 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500670
671 tracing_snapshot();
672}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500673EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500674#else
675void tracing_snapshot(void)
676{
677 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
678}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500679EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500680int tracing_alloc_snapshot(void)
681{
682 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
683 return -ENODEV;
684}
685EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500686void tracing_snapshot_alloc(void)
687{
688 /* Give warning */
689 tracing_snapshot();
690}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500691EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500692#endif /* CONFIG_TRACER_SNAPSHOT */
693
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400694static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400695{
696 if (tr->trace_buffer.buffer)
697 ring_buffer_record_off(tr->trace_buffer.buffer);
698 /*
699 * This flag is looked at when buffers haven't been allocated
700 * yet, or by some tracers (like irqsoff), that just want to
701 * know if the ring buffer has been disabled, but it can handle
702 * races of where it gets disabled but we still do a record.
703 * As the check is in the fast path of the tracers, it is more
704 * important to be fast than accurate.
705 */
706 tr->buffer_disabled = 1;
707 /* Make the flag seen by readers */
708 smp_wmb();
709}
710
Steven Rostedt499e5472012-02-22 15:50:28 -0500711/**
712 * tracing_off - turn off tracing buffers
713 *
714 * This function stops the tracing buffers from recording data.
715 * It does not disable any overhead the tracers themselves may
716 * be causing. This function simply causes all recording to
717 * the ring buffers to fail.
718 */
719void tracing_off(void)
720{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400721 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500722}
723EXPORT_SYMBOL_GPL(tracing_off);
724
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400725void disable_trace_on_warning(void)
726{
727 if (__disable_trace_on_warning)
728 tracing_off();
729}
730
Steven Rostedt499e5472012-02-22 15:50:28 -0500731/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400732 * tracer_tracing_is_on - show real state of ring buffer enabled
733 * @tr : the trace array to know if ring buffer is enabled
734 *
735 * Shows real state of the ring buffer if it is enabled or not.
736 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400737static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400738{
739 if (tr->trace_buffer.buffer)
740 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
741 return !tr->buffer_disabled;
742}
743
Steven Rostedt499e5472012-02-22 15:50:28 -0500744/**
745 * tracing_is_on - show state of ring buffers enabled
746 */
747int tracing_is_on(void)
748{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400749 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500750}
751EXPORT_SYMBOL_GPL(tracing_is_on);
752
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400753static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200754{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400755 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200756
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200757 if (!str)
758 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800759 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200760 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800761 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200762 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400763 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200764 return 1;
765}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400766__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200767
Tim Bird0e950172010-02-25 15:36:43 -0800768static int __init set_tracing_thresh(char *str)
769{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800770 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800771 int ret;
772
773 if (!str)
774 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200775 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800776 if (ret < 0)
777 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800778 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800779 return 1;
780}
781__setup("tracing_thresh=", set_tracing_thresh);
782
Steven Rostedt57f50be2008-05-12 21:20:44 +0200783unsigned long nsecs_to_usecs(unsigned long nsecs)
784{
785 return nsecs / 1000;
786}
787
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200788/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200789static const char *trace_options[] = {
790 "print-parent",
791 "sym-offset",
792 "sym-addr",
793 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200794 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200795 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200796 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200797 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200798 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100799 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500800 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500801 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500802 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200803 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200804 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100805 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200806 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500807 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400808 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400809 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800810 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800811 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400812 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500813 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700814 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400815 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200816 NULL
817};
818
Zhaolei5079f322009-08-25 16:12:56 +0800819static struct {
820 u64 (*func)(void);
821 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800822 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800823} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000824 { trace_clock_local, "local", 1 },
825 { trace_clock_global, "global", 1 },
826 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700827 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000828 { trace_clock, "perf", 1 },
829 { ktime_get_mono_fast_ns, "mono", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800830 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800831};
832
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200833/*
834 * trace_parser_get_init - gets the buffer for trace parser
835 */
836int trace_parser_get_init(struct trace_parser *parser, int size)
837{
838 memset(parser, 0, sizeof(*parser));
839
840 parser->buffer = kmalloc(size, GFP_KERNEL);
841 if (!parser->buffer)
842 return 1;
843
844 parser->size = size;
845 return 0;
846}
847
848/*
849 * trace_parser_put - frees the buffer for trace parser
850 */
851void trace_parser_put(struct trace_parser *parser)
852{
853 kfree(parser->buffer);
854}
855
856/*
857 * trace_get_user - reads the user input string separated by space
858 * (matched by isspace(ch))
859 *
860 * For each string found the 'struct trace_parser' is updated,
861 * and the function returns.
862 *
863 * Returns number of bytes read.
864 *
865 * See kernel/trace/trace.h for 'struct trace_parser' details.
866 */
867int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
868 size_t cnt, loff_t *ppos)
869{
870 char ch;
871 size_t read = 0;
872 ssize_t ret;
873
874 if (!*ppos)
875 trace_parser_clear(parser);
876
877 ret = get_user(ch, ubuf++);
878 if (ret)
879 goto out;
880
881 read++;
882 cnt--;
883
884 /*
885 * The parser is not finished with the last write,
886 * continue reading the user input without skipping spaces.
887 */
888 if (!parser->cont) {
889 /* skip white space */
890 while (cnt && isspace(ch)) {
891 ret = get_user(ch, ubuf++);
892 if (ret)
893 goto out;
894 read++;
895 cnt--;
896 }
897
898 /* only spaces were written */
899 if (isspace(ch)) {
900 *ppos += read;
901 ret = read;
902 goto out;
903 }
904
905 parser->idx = 0;
906 }
907
908 /* read the non-space input */
909 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800910 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200911 parser->buffer[parser->idx++] = ch;
912 else {
913 ret = -EINVAL;
914 goto out;
915 }
916 ret = get_user(ch, ubuf++);
917 if (ret)
918 goto out;
919 read++;
920 cnt--;
921 }
922
923 /* We either got finished input or we have to wait for another call. */
924 if (isspace(ch)) {
925 parser->buffer[parser->idx] = 0;
926 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400927 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200928 parser->cont = true;
929 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400930 } else {
931 ret = -EINVAL;
932 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200933 }
934
935 *ppos += read;
936 ret = read;
937
938out:
939 return ret;
940}
941
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400942/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200943static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200944{
945 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200946
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500947 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200948 return -EBUSY;
949
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500950 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200951 if (cnt > len)
952 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400953 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200954
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400955 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200956 return cnt;
957}
958
Tim Bird0e950172010-02-25 15:36:43 -0800959unsigned long __read_mostly tracing_thresh;
960
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400961#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400962/*
963 * Copy the new maximum trace into the separate maximum-trace
964 * structure. (this way the maximum trace is permanently saved,
965 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
966 */
967static void
968__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
969{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500970 struct trace_buffer *trace_buf = &tr->trace_buffer;
971 struct trace_buffer *max_buf = &tr->max_buffer;
972 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
973 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400974
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500975 max_buf->cpu = cpu;
976 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400977
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500978 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400979 max_data->critical_start = data->critical_start;
980 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400981
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300982 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400983 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400984 /*
985 * If tsk == current, then use current_uid(), as that does not use
986 * RCU. The irq tracer can be called out of RCU scope.
987 */
988 if (tsk == current)
989 max_data->uid = current_uid();
990 else
991 max_data->uid = task_uid(tsk);
992
Steven Rostedt8248ac02009-09-02 12:27:41 -0400993 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
994 max_data->policy = tsk->policy;
995 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400996
997 /* record this tasks comm */
998 tracing_record_cmdline(tsk);
999}
1000
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001001/**
1002 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1003 * @tr: tracer
1004 * @tsk: the task with the latency
1005 * @cpu: The cpu that initiated the trace.
1006 *
1007 * Flip the buffers between the @tr and the max_tr and record information
1008 * about which task was the cause of this latency.
1009 */
Ingo Molnare309b412008-05-12 21:20:51 +02001010void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001011update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1012{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001013 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001014
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001015 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001016 return;
1017
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001018 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001019
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001020 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001021 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001022 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001023 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001024 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001025
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001026 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001027
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001028 buf = tr->trace_buffer.buffer;
1029 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1030 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001031
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001032 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001033 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001034}
1035
1036/**
1037 * update_max_tr_single - only copy one trace over, and reset the rest
1038 * @tr - tracer
1039 * @tsk - task with the latency
1040 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001041 *
1042 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001043 */
Ingo Molnare309b412008-05-12 21:20:51 +02001044void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001045update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1046{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001047 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001048
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001049 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001050 return;
1051
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001052 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001053 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001054 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001055 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001056 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001057 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001058
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001059 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001060
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001061 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001062
Steven Rostedte8165db2009-09-03 19:13:05 -04001063 if (ret == -EBUSY) {
1064 /*
1065 * We failed to swap the buffer due to a commit taking
1066 * place on this CPU. We fail to record, but we reset
1067 * the max trace buffer (no one writes directly to it)
1068 * and flag that it failed.
1069 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001070 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001071 "Failed to swap buffers due to commit in progress\n");
1072 }
1073
Steven Rostedte8165db2009-09-03 19:13:05 -04001074 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001075
1076 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001077 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001078}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001079#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001080
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001081static int wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001082{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001083 /* Iterators are static, they should be filled or empty */
1084 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001085 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001086
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001087 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001088}
1089
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001090#ifdef CONFIG_FTRACE_STARTUP_TEST
1091static int run_tracer_selftest(struct tracer *type)
1092{
1093 struct trace_array *tr = &global_trace;
1094 struct tracer *saved_tracer = tr->current_trace;
1095 int ret;
1096
1097 if (!type->selftest || tracing_selftest_disabled)
1098 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001099
1100 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001101 * Run a selftest on this tracer.
1102 * Here we reset the trace buffer, and set the current
1103 * tracer to be this tracer. The tracer can then run some
1104 * internal tracing to verify that everything is in order.
1105 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001106 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001107 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001108
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001109 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001110
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001111#ifdef CONFIG_TRACER_MAX_TRACE
1112 if (type->use_max_tr) {
1113 /* If we expanded the buffers, make sure the max is expanded too */
1114 if (ring_buffer_expanded)
1115 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1116 RING_BUFFER_ALL_CPUS);
1117 tr->allocated_snapshot = true;
1118 }
1119#endif
1120
1121 /* the test is responsible for initializing and enabling */
1122 pr_info("Testing tracer %s: ", type->name);
1123 ret = type->selftest(type, tr);
1124 /* the test is responsible for resetting too */
1125 tr->current_trace = saved_tracer;
1126 if (ret) {
1127 printk(KERN_CONT "FAILED!\n");
1128 /* Add the warning after printing 'FAILED' */
1129 WARN_ON(1);
1130 return -1;
1131 }
1132 /* Only reset on passing, to avoid touching corrupted buffers */
1133 tracing_reset_online_cpus(&tr->trace_buffer);
1134
1135#ifdef CONFIG_TRACER_MAX_TRACE
1136 if (type->use_max_tr) {
1137 tr->allocated_snapshot = false;
1138
1139 /* Shrink the max buffer again */
1140 if (ring_buffer_expanded)
1141 ring_buffer_resize(tr->max_buffer.buffer, 1,
1142 RING_BUFFER_ALL_CPUS);
1143 }
1144#endif
1145
1146 printk(KERN_CONT "PASSED\n");
1147 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001148}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001149#else
1150static inline int run_tracer_selftest(struct tracer *type)
1151{
1152 return 0;
1153}
1154#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001155
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001156/**
1157 * register_tracer - register a tracer with the ftrace system.
1158 * @type - the plugin for the tracer
1159 *
1160 * Register a new plugin tracer.
1161 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162int register_tracer(struct tracer *type)
1163{
1164 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001165 int ret = 0;
1166
1167 if (!type->name) {
1168 pr_info("Tracer must have a name\n");
1169 return -1;
1170 }
1171
Dan Carpenter24a461d2010-07-10 12:06:44 +02001172 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001173 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1174 return -1;
1175 }
1176
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001177 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001178
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001179 tracing_selftest_running = true;
1180
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001181 for (t = trace_types; t; t = t->next) {
1182 if (strcmp(type->name, t->name) == 0) {
1183 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001184 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001185 type->name);
1186 ret = -1;
1187 goto out;
1188 }
1189 }
1190
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001191 if (!type->set_flag)
1192 type->set_flag = &dummy_set_flag;
1193 if (!type->flags)
1194 type->flags = &dummy_tracer_flags;
1195 else
1196 if (!type->flags->opts)
1197 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001198
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001199 ret = run_tracer_selftest(type);
1200 if (ret < 0)
1201 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001202
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001203 type->next = trace_types;
1204 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001205
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001206 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001207 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001208 mutex_unlock(&trace_types_lock);
1209
Steven Rostedtdac74942009-02-05 01:13:38 -05001210 if (ret || !default_bootup_tracer)
1211 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001212
Li Zefanee6c2c12009-09-18 14:06:47 +08001213 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001214 goto out_unlock;
1215
1216 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1217 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001218 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001219 default_bootup_tracer = NULL;
1220 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001221 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001222#ifdef CONFIG_FTRACE_STARTUP_TEST
1223 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1224 type->name);
1225#endif
1226
1227 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001228 return ret;
1229}
1230
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001231void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001232{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001233 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001234
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001235 if (!buffer)
1236 return;
1237
Steven Rostedtf6339032009-09-04 12:35:16 -04001238 ring_buffer_record_disable(buffer);
1239
1240 /* Make sure all commits have finished */
1241 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001242 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001243
1244 ring_buffer_record_enable(buffer);
1245}
1246
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001247void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001248{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001249 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001250 int cpu;
1251
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001252 if (!buffer)
1253 return;
1254
Steven Rostedt621968c2009-09-04 12:02:35 -04001255 ring_buffer_record_disable(buffer);
1256
1257 /* Make sure all commits have finished */
1258 synchronize_sched();
1259
Alexander Z Lam94571582013-08-02 18:36:16 -07001260 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001261
1262 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001263 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001264
1265 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001266}
1267
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001268/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001269void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001270{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001271 struct trace_array *tr;
1272
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001273 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001274 tracing_reset_online_cpus(&tr->trace_buffer);
1275#ifdef CONFIG_TRACER_MAX_TRACE
1276 tracing_reset_online_cpus(&tr->max_buffer);
1277#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001278 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001279}
1280
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001281#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001282#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001283static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001284struct saved_cmdlines_buffer {
1285 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1286 unsigned *map_cmdline_to_pid;
1287 unsigned cmdline_num;
1288 int cmdline_idx;
1289 char *saved_cmdlines;
1290};
1291static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001292
Steven Rostedt25b0b442008-05-12 21:21:00 +02001293/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001294static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001295
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001296static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001297{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001298 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1299}
1300
1301static inline void set_cmdline(int idx, const char *cmdline)
1302{
1303 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1304}
1305
1306static int allocate_cmdlines_buffer(unsigned int val,
1307 struct saved_cmdlines_buffer *s)
1308{
1309 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1310 GFP_KERNEL);
1311 if (!s->map_cmdline_to_pid)
1312 return -ENOMEM;
1313
1314 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1315 if (!s->saved_cmdlines) {
1316 kfree(s->map_cmdline_to_pid);
1317 return -ENOMEM;
1318 }
1319
1320 s->cmdline_idx = 0;
1321 s->cmdline_num = val;
1322 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1323 sizeof(s->map_pid_to_cmdline));
1324 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1325 val * sizeof(*s->map_cmdline_to_pid));
1326
1327 return 0;
1328}
1329
1330static int trace_create_savedcmd(void)
1331{
1332 int ret;
1333
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001334 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001335 if (!savedcmd)
1336 return -ENOMEM;
1337
1338 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1339 if (ret < 0) {
1340 kfree(savedcmd);
1341 savedcmd = NULL;
1342 return -ENOMEM;
1343 }
1344
1345 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001346}
1347
Carsten Emdeb5130b12009-09-13 01:43:07 +02001348int is_tracing_stopped(void)
1349{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001350 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001351}
1352
Steven Rostedt0f048702008-11-05 16:05:44 -05001353/**
1354 * tracing_start - quick start of the tracer
1355 *
1356 * If tracing is enabled but was stopped by tracing_stop,
1357 * this will start the tracer back up.
1358 */
1359void tracing_start(void)
1360{
1361 struct ring_buffer *buffer;
1362 unsigned long flags;
1363
1364 if (tracing_disabled)
1365 return;
1366
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001367 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1368 if (--global_trace.stop_count) {
1369 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001370 /* Someone screwed up their debugging */
1371 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001372 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001373 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001374 goto out;
1375 }
1376
Steven Rostedta2f80712010-03-12 19:56:00 -05001377 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001378 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001379
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001380 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001381 if (buffer)
1382 ring_buffer_record_enable(buffer);
1383
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001384#ifdef CONFIG_TRACER_MAX_TRACE
1385 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001386 if (buffer)
1387 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001388#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001389
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001390 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001391
Steven Rostedt0f048702008-11-05 16:05:44 -05001392 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001393 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1394}
1395
1396static void tracing_start_tr(struct trace_array *tr)
1397{
1398 struct ring_buffer *buffer;
1399 unsigned long flags;
1400
1401 if (tracing_disabled)
1402 return;
1403
1404 /* If global, we need to also start the max tracer */
1405 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1406 return tracing_start();
1407
1408 raw_spin_lock_irqsave(&tr->start_lock, flags);
1409
1410 if (--tr->stop_count) {
1411 if (tr->stop_count < 0) {
1412 /* Someone screwed up their debugging */
1413 WARN_ON_ONCE(1);
1414 tr->stop_count = 0;
1415 }
1416 goto out;
1417 }
1418
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001419 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001420 if (buffer)
1421 ring_buffer_record_enable(buffer);
1422
1423 out:
1424 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001425}
1426
1427/**
1428 * tracing_stop - quick stop of the tracer
1429 *
1430 * Light weight way to stop tracing. Use in conjunction with
1431 * tracing_start.
1432 */
1433void tracing_stop(void)
1434{
1435 struct ring_buffer *buffer;
1436 unsigned long flags;
1437
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001438 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1439 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001440 goto out;
1441
Steven Rostedta2f80712010-03-12 19:56:00 -05001442 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001443 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001444
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001445 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001446 if (buffer)
1447 ring_buffer_record_disable(buffer);
1448
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001449#ifdef CONFIG_TRACER_MAX_TRACE
1450 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001451 if (buffer)
1452 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001453#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001454
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001455 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001456
Steven Rostedt0f048702008-11-05 16:05:44 -05001457 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001458 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1459}
1460
1461static void tracing_stop_tr(struct trace_array *tr)
1462{
1463 struct ring_buffer *buffer;
1464 unsigned long flags;
1465
1466 /* If global, we need to also stop the max tracer */
1467 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1468 return tracing_stop();
1469
1470 raw_spin_lock_irqsave(&tr->start_lock, flags);
1471 if (tr->stop_count++)
1472 goto out;
1473
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001474 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001475 if (buffer)
1476 ring_buffer_record_disable(buffer);
1477
1478 out:
1479 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001480}
1481
Ingo Molnare309b412008-05-12 21:20:51 +02001482void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001483
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001484static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001485{
Carsten Emdea635cf02009-03-18 09:00:41 +01001486 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001487
1488 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001489 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001490
1491 /*
1492 * It's not the end of the world if we don't get
1493 * the lock, but we also don't want to spin
1494 * nor do we want to disable interrupts,
1495 * so if we miss here, then better luck next time.
1496 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001497 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001498 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001499
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001500 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001501 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001502 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001503
Carsten Emdea635cf02009-03-18 09:00:41 +01001504 /*
1505 * Check whether the cmdline buffer at idx has a pid
1506 * mapped. We are going to overwrite that entry so we
1507 * need to clear the map_pid_to_cmdline. Otherwise we
1508 * would read the new comm for the old pid.
1509 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001510 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001511 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001512 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001513
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001514 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1515 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001516
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001517 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001518 }
1519
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001520 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001521
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001522 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001523
1524 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001525}
1526
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001527static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001528{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001529 unsigned map;
1530
Steven Rostedt4ca53082009-03-16 19:20:15 -04001531 if (!pid) {
1532 strcpy(comm, "<idle>");
1533 return;
1534 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001535
Steven Rostedt74bf4072010-01-25 15:11:53 -05001536 if (WARN_ON_ONCE(pid < 0)) {
1537 strcpy(comm, "<XXX>");
1538 return;
1539 }
1540
Steven Rostedt4ca53082009-03-16 19:20:15 -04001541 if (pid > PID_MAX_DEFAULT) {
1542 strcpy(comm, "<...>");
1543 return;
1544 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001545
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001546 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001547 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001548 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001549 else
1550 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001551}
1552
1553void trace_find_cmdline(int pid, char comm[])
1554{
1555 preempt_disable();
1556 arch_spin_lock(&trace_cmdline_lock);
1557
1558 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001559
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001560 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001561 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001562}
1563
Ingo Molnare309b412008-05-12 21:20:51 +02001564void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001565{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001566 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001567 return;
1568
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001569 if (!__this_cpu_read(trace_cmdline_save))
1570 return;
1571
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001572 if (trace_save_cmdline(tsk))
1573 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001574}
1575
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001576void
Steven Rostedt38697052008-10-01 13:14:09 -04001577tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1578 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001579{
1580 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001581
Steven Rostedt777e2082008-09-29 23:02:42 -04001582 entry->preempt_count = pc & 0xff;
1583 entry->pid = (tsk) ? tsk->pid : 0;
1584 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001585#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001586 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001587#else
1588 TRACE_FLAG_IRQS_NOSUPPORT |
1589#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001590 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1591 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001592 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1593 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001594}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001595EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001596
Steven Rostedte77405a2009-09-02 14:17:06 -04001597struct ring_buffer_event *
1598trace_buffer_lock_reserve(struct ring_buffer *buffer,
1599 int type,
1600 unsigned long len,
1601 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001602{
1603 struct ring_buffer_event *event;
1604
Steven Rostedte77405a2009-09-02 14:17:06 -04001605 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001606 if (event != NULL) {
1607 struct trace_entry *ent = ring_buffer_event_data(event);
1608
1609 tracing_generic_entry_update(ent, flags, pc);
1610 ent->type = type;
1611 }
1612
1613 return event;
1614}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001615
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001616void
1617__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1618{
1619 __this_cpu_write(trace_cmdline_save, true);
1620 ring_buffer_unlock_commit(buffer, event);
1621}
1622
Steven Rostedte77405a2009-09-02 14:17:06 -04001623static inline void
1624__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1625 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001626 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001627{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001628 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001629
Steven Rostedte77405a2009-09-02 14:17:06 -04001630 ftrace_trace_stack(buffer, flags, 6, pc);
1631 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001632}
1633
Steven Rostedte77405a2009-09-02 14:17:06 -04001634void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1635 struct ring_buffer_event *event,
1636 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001637{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001638 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001639}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001640EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001641
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001642static struct ring_buffer *temp_buffer;
1643
Steven Rostedtef5580d2009-02-27 19:38:04 -05001644struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001645trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1646 struct ftrace_event_file *ftrace_file,
1647 int type, unsigned long len,
1648 unsigned long flags, int pc)
1649{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001650 struct ring_buffer_event *entry;
1651
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001652 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001653 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001654 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001655 /*
1656 * If tracing is off, but we have triggers enabled
1657 * we still need to look at the event data. Use the temp_buffer
1658 * to store the trace event for the tigger to use. It's recusive
1659 * safe and will not be recorded anywhere.
1660 */
1661 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1662 *current_rb = temp_buffer;
1663 entry = trace_buffer_lock_reserve(*current_rb,
1664 type, len, flags, pc);
1665 }
1666 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001667}
1668EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1669
1670struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001671trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1672 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001673 unsigned long flags, int pc)
1674{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001675 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001676 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001677 type, len, flags, pc);
1678}
Steven Rostedt94487d62009-05-05 19:22:53 -04001679EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001680
Steven Rostedte77405a2009-09-02 14:17:06 -04001681void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1682 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001683 unsigned long flags, int pc)
1684{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001685 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001686}
Steven Rostedt94487d62009-05-05 19:22:53 -04001687EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001688
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001689void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1690 struct ring_buffer_event *event,
1691 unsigned long flags, int pc,
1692 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001693{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001694 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001695
1696 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1697 ftrace_trace_userstack(buffer, flags, pc);
1698}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001699EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001700
Steven Rostedte77405a2009-09-02 14:17:06 -04001701void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1702 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001703{
Steven Rostedte77405a2009-09-02 14:17:06 -04001704 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001705}
Steven Rostedt12acd472009-04-17 16:01:56 -04001706EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001707
Ingo Molnare309b412008-05-12 21:20:51 +02001708void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001709trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001710 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1711 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001712{
Tom Zanussie1112b42009-03-31 00:48:49 -05001713 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001714 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001715 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001716 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001717
Steven Rostedtd7690412008-10-01 00:29:53 -04001718 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001719 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001720 return;
1721
Steven Rostedte77405a2009-09-02 14:17:06 -04001722 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001723 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001724 if (!event)
1725 return;
1726 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001727 entry->ip = ip;
1728 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001729
Tom Zanussif306cc82013-10-24 08:34:17 -05001730 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001731 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001732}
1733
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001734#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001735
1736#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1737struct ftrace_stack {
1738 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1739};
1740
1741static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1742static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1743
Steven Rostedte77405a2009-09-02 14:17:06 -04001744static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001745 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001746 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001747{
Tom Zanussie1112b42009-03-31 00:48:49 -05001748 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001749 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001750 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001751 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001752 int use_stack;
1753 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001754
1755 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001756 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001757
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001758 /*
1759 * Since events can happen in NMIs there's no safe way to
1760 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1761 * or NMI comes in, it will just have to use the default
1762 * FTRACE_STACK_SIZE.
1763 */
1764 preempt_disable_notrace();
1765
Shan Wei82146522012-11-19 13:21:01 +08001766 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001767 /*
1768 * We don't need any atomic variables, just a barrier.
1769 * If an interrupt comes in, we don't care, because it would
1770 * have exited and put the counter back to what we want.
1771 * We just need a barrier to keep gcc from moving things
1772 * around.
1773 */
1774 barrier();
1775 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001776 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001777 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1778
1779 if (regs)
1780 save_stack_trace_regs(regs, &trace);
1781 else
1782 save_stack_trace(&trace);
1783
1784 if (trace.nr_entries > size)
1785 size = trace.nr_entries;
1786 } else
1787 /* From now on, use_stack is a boolean */
1788 use_stack = 0;
1789
1790 size *= sizeof(unsigned long);
1791
1792 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1793 sizeof(*entry) + size, flags, pc);
1794 if (!event)
1795 goto out;
1796 entry = ring_buffer_event_data(event);
1797
1798 memset(&entry->caller, 0, size);
1799
1800 if (use_stack)
1801 memcpy(&entry->caller, trace.entries,
1802 trace.nr_entries * sizeof(unsigned long));
1803 else {
1804 trace.max_entries = FTRACE_STACK_ENTRIES;
1805 trace.entries = entry->caller;
1806 if (regs)
1807 save_stack_trace_regs(regs, &trace);
1808 else
1809 save_stack_trace(&trace);
1810 }
1811
1812 entry->size = trace.nr_entries;
1813
Tom Zanussif306cc82013-10-24 08:34:17 -05001814 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001815 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001816
1817 out:
1818 /* Again, don't let gcc optimize things here */
1819 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001820 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001821 preempt_enable_notrace();
1822
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001823}
1824
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001825void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1826 int skip, int pc, struct pt_regs *regs)
1827{
1828 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1829 return;
1830
1831 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1832}
1833
Steven Rostedte77405a2009-09-02 14:17:06 -04001834void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1835 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001836{
1837 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1838 return;
1839
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001840 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001841}
1842
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001843void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1844 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001845{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001846 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001847}
1848
Steven Rostedt03889382009-12-11 09:48:22 -05001849/**
1850 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001851 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001852 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001853void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001854{
1855 unsigned long flags;
1856
1857 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001858 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001859
1860 local_save_flags(flags);
1861
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001862 /*
1863 * Skip 3 more, seems to get us at the caller of
1864 * this function.
1865 */
1866 skip += 3;
1867 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1868 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001869}
1870
Steven Rostedt91e86e52010-11-10 12:56:12 +01001871static DEFINE_PER_CPU(int, user_stack_count);
1872
Steven Rostedte77405a2009-09-02 14:17:06 -04001873void
1874ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001875{
Tom Zanussie1112b42009-03-31 00:48:49 -05001876 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001877 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001878 struct userstack_entry *entry;
1879 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001880
1881 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1882 return;
1883
Steven Rostedtb6345872010-03-12 20:03:30 -05001884 /*
1885 * NMIs can not handle page faults, even with fix ups.
1886 * The save user stack can (and often does) fault.
1887 */
1888 if (unlikely(in_nmi()))
1889 return;
1890
Steven Rostedt91e86e52010-11-10 12:56:12 +01001891 /*
1892 * prevent recursion, since the user stack tracing may
1893 * trigger other kernel events.
1894 */
1895 preempt_disable();
1896 if (__this_cpu_read(user_stack_count))
1897 goto out;
1898
1899 __this_cpu_inc(user_stack_count);
1900
Steven Rostedte77405a2009-09-02 14:17:06 -04001901 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001902 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001903 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001904 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001905 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001906
Steven Rostedt48659d32009-09-11 11:36:23 -04001907 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001908 memset(&entry->caller, 0, sizeof(entry->caller));
1909
1910 trace.nr_entries = 0;
1911 trace.max_entries = FTRACE_STACK_ENTRIES;
1912 trace.skip = 0;
1913 trace.entries = entry->caller;
1914
1915 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001916 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001917 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001918
Li Zefan1dbd1952010-12-09 15:47:56 +08001919 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001920 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001921 out:
1922 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001923}
1924
Hannes Eder4fd27352009-02-10 19:44:12 +01001925#ifdef UNUSED
1926static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001927{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001928 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001929}
Hannes Eder4fd27352009-02-10 19:44:12 +01001930#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001931
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001932#endif /* CONFIG_STACKTRACE */
1933
Steven Rostedt07d777f2011-09-22 14:01:55 -04001934/* created for use with alloc_percpu */
1935struct trace_buffer_struct {
1936 char buffer[TRACE_BUF_SIZE];
1937};
1938
1939static struct trace_buffer_struct *trace_percpu_buffer;
1940static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1941static struct trace_buffer_struct *trace_percpu_irq_buffer;
1942static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1943
1944/*
1945 * The buffer used is dependent on the context. There is a per cpu
1946 * buffer for normal context, softirq contex, hard irq context and
1947 * for NMI context. Thise allows for lockless recording.
1948 *
1949 * Note, if the buffers failed to be allocated, then this returns NULL
1950 */
1951static char *get_trace_buf(void)
1952{
1953 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001954
1955 /*
1956 * If we have allocated per cpu buffers, then we do not
1957 * need to do any locking.
1958 */
1959 if (in_nmi())
1960 percpu_buffer = trace_percpu_nmi_buffer;
1961 else if (in_irq())
1962 percpu_buffer = trace_percpu_irq_buffer;
1963 else if (in_softirq())
1964 percpu_buffer = trace_percpu_sirq_buffer;
1965 else
1966 percpu_buffer = trace_percpu_buffer;
1967
1968 if (!percpu_buffer)
1969 return NULL;
1970
Shan Weid8a03492012-11-13 09:53:04 +08001971 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001972}
1973
1974static int alloc_percpu_trace_buffer(void)
1975{
1976 struct trace_buffer_struct *buffers;
1977 struct trace_buffer_struct *sirq_buffers;
1978 struct trace_buffer_struct *irq_buffers;
1979 struct trace_buffer_struct *nmi_buffers;
1980
1981 buffers = alloc_percpu(struct trace_buffer_struct);
1982 if (!buffers)
1983 goto err_warn;
1984
1985 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1986 if (!sirq_buffers)
1987 goto err_sirq;
1988
1989 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1990 if (!irq_buffers)
1991 goto err_irq;
1992
1993 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1994 if (!nmi_buffers)
1995 goto err_nmi;
1996
1997 trace_percpu_buffer = buffers;
1998 trace_percpu_sirq_buffer = sirq_buffers;
1999 trace_percpu_irq_buffer = irq_buffers;
2000 trace_percpu_nmi_buffer = nmi_buffers;
2001
2002 return 0;
2003
2004 err_nmi:
2005 free_percpu(irq_buffers);
2006 err_irq:
2007 free_percpu(sirq_buffers);
2008 err_sirq:
2009 free_percpu(buffers);
2010 err_warn:
2011 WARN(1, "Could not allocate percpu trace_printk buffer");
2012 return -ENOMEM;
2013}
2014
Steven Rostedt81698832012-10-11 10:15:05 -04002015static int buffers_allocated;
2016
Steven Rostedt07d777f2011-09-22 14:01:55 -04002017void trace_printk_init_buffers(void)
2018{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002019 if (buffers_allocated)
2020 return;
2021
2022 if (alloc_percpu_trace_buffer())
2023 return;
2024
Steven Rostedt2184db42014-05-28 13:14:40 -04002025 /* trace_printk() is for debug use only. Don't use it in production. */
2026
2027 pr_warning("\n**********************************************************\n");
2028 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2029 pr_warning("** **\n");
2030 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2031 pr_warning("** **\n");
2032 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2033 pr_warning("** unsafe for produciton use. **\n");
2034 pr_warning("** **\n");
2035 pr_warning("** If you see this message and you are not debugging **\n");
2036 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2037 pr_warning("** **\n");
2038 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2039 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002040
Steven Rostedtb382ede62012-10-10 21:44:34 -04002041 /* Expand the buffers to set size */
2042 tracing_update_buffers();
2043
Steven Rostedt07d777f2011-09-22 14:01:55 -04002044 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002045
2046 /*
2047 * trace_printk_init_buffers() can be called by modules.
2048 * If that happens, then we need to start cmdline recording
2049 * directly here. If the global_trace.buffer is already
2050 * allocated here, then this was called by module code.
2051 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002052 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002053 tracing_start_cmdline_record();
2054}
2055
2056void trace_printk_start_comm(void)
2057{
2058 /* Start tracing comms if trace printk is set */
2059 if (!buffers_allocated)
2060 return;
2061 tracing_start_cmdline_record();
2062}
2063
2064static void trace_printk_start_stop_comm(int enabled)
2065{
2066 if (!buffers_allocated)
2067 return;
2068
2069 if (enabled)
2070 tracing_start_cmdline_record();
2071 else
2072 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002073}
2074
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002075/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002076 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002077 *
2078 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002079int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002080{
Tom Zanussie1112b42009-03-31 00:48:49 -05002081 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002082 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002083 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002084 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002085 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002086 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002087 char *tbuffer;
2088 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002089
2090 if (unlikely(tracing_selftest_running || tracing_disabled))
2091 return 0;
2092
2093 /* Don't pollute graph traces with trace_vprintk internals */
2094 pause_graph_tracing();
2095
2096 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002097 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002098
Steven Rostedt07d777f2011-09-22 14:01:55 -04002099 tbuffer = get_trace_buf();
2100 if (!tbuffer) {
2101 len = 0;
2102 goto out;
2103 }
2104
2105 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2106
2107 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002108 goto out;
2109
Steven Rostedt07d777f2011-09-22 14:01:55 -04002110 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002111 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002112 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002113 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2114 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002115 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002116 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002117 entry = ring_buffer_event_data(event);
2118 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002119 entry->fmt = fmt;
2120
Steven Rostedt07d777f2011-09-22 14:01:55 -04002121 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002122 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002123 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002124 ftrace_trace_stack(buffer, flags, 6, pc);
2125 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002126
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002127out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002128 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002129 unpause_graph_tracing();
2130
2131 return len;
2132}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002133EXPORT_SYMBOL_GPL(trace_vbprintk);
2134
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002135static int
2136__trace_array_vprintk(struct ring_buffer *buffer,
2137 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002138{
Tom Zanussie1112b42009-03-31 00:48:49 -05002139 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002140 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002141 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002142 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002143 unsigned long flags;
2144 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002145
2146 if (tracing_disabled || tracing_selftest_running)
2147 return 0;
2148
Steven Rostedt07d777f2011-09-22 14:01:55 -04002149 /* Don't pollute graph traces with trace_vprintk internals */
2150 pause_graph_tracing();
2151
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002152 pc = preempt_count();
2153 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002154
Steven Rostedt07d777f2011-09-22 14:01:55 -04002155
2156 tbuffer = get_trace_buf();
2157 if (!tbuffer) {
2158 len = 0;
2159 goto out;
2160 }
2161
2162 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2163 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002164 goto out;
2165
Steven Rostedt07d777f2011-09-22 14:01:55 -04002166 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002167 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002168 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002169 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002170 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002171 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002172 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002173 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002174
Steven Rostedt07d777f2011-09-22 14:01:55 -04002175 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002176 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002177 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002178 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002179 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002180 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002181 out:
2182 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002183 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002184
2185 return len;
2186}
Steven Rostedt659372d2009-09-03 19:11:07 -04002187
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002188int trace_array_vprintk(struct trace_array *tr,
2189 unsigned long ip, const char *fmt, va_list args)
2190{
2191 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2192}
2193
2194int trace_array_printk(struct trace_array *tr,
2195 unsigned long ip, const char *fmt, ...)
2196{
2197 int ret;
2198 va_list ap;
2199
2200 if (!(trace_flags & TRACE_ITER_PRINTK))
2201 return 0;
2202
2203 va_start(ap, fmt);
2204 ret = trace_array_vprintk(tr, ip, fmt, ap);
2205 va_end(ap);
2206 return ret;
2207}
2208
2209int trace_array_printk_buf(struct ring_buffer *buffer,
2210 unsigned long ip, const char *fmt, ...)
2211{
2212 int ret;
2213 va_list ap;
2214
2215 if (!(trace_flags & TRACE_ITER_PRINTK))
2216 return 0;
2217
2218 va_start(ap, fmt);
2219 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2220 va_end(ap);
2221 return ret;
2222}
2223
Steven Rostedt659372d2009-09-03 19:11:07 -04002224int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2225{
Steven Rostedta813a152009-10-09 01:41:35 -04002226 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002227}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002228EXPORT_SYMBOL_GPL(trace_vprintk);
2229
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002230static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002231{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002232 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2233
Steven Rostedt5a90f572008-09-03 17:42:51 -04002234 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002235 if (buf_iter)
2236 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002237}
2238
Ingo Molnare309b412008-05-12 21:20:51 +02002239static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002240peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2241 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002242{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002243 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002244 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002245
Steven Rostedtd7690412008-10-01 00:29:53 -04002246 if (buf_iter)
2247 event = ring_buffer_iter_peek(buf_iter, ts);
2248 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002249 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002250 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002251
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002252 if (event) {
2253 iter->ent_size = ring_buffer_event_length(event);
2254 return ring_buffer_event_data(event);
2255 }
2256 iter->ent_size = 0;
2257 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002258}
Steven Rostedtd7690412008-10-01 00:29:53 -04002259
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002260static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002261__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2262 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002263{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002264 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002265 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002266 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002267 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002268 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002269 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002270 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002271 int cpu;
2272
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002273 /*
2274 * If we are in a per_cpu trace file, don't bother by iterating over
2275 * all cpu and peek directly.
2276 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002277 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002278 if (ring_buffer_empty_cpu(buffer, cpu_file))
2279 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002280 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002281 if (ent_cpu)
2282 *ent_cpu = cpu_file;
2283
2284 return ent;
2285 }
2286
Steven Rostedtab464282008-05-12 21:21:00 +02002287 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002288
2289 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002290 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002291
Steven Rostedtbc21b472010-03-31 19:49:26 -04002292 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002293
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002294 /*
2295 * Pick the entry with the smallest timestamp:
2296 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002297 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002298 next = ent;
2299 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002300 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002301 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002302 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002303 }
2304 }
2305
Steven Rostedt12b5da32012-03-27 10:43:28 -04002306 iter->ent_size = next_size;
2307
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002308 if (ent_cpu)
2309 *ent_cpu = next_cpu;
2310
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002311 if (ent_ts)
2312 *ent_ts = next_ts;
2313
Steven Rostedtbc21b472010-03-31 19:49:26 -04002314 if (missing_events)
2315 *missing_events = next_lost;
2316
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002317 return next;
2318}
2319
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002320/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002321struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2322 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002323{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002324 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002325}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002326
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002327/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002328void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002329{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002330 iter->ent = __find_next_entry(iter, &iter->cpu,
2331 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002332
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002333 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002334 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002335
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002336 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002337}
2338
Ingo Molnare309b412008-05-12 21:20:51 +02002339static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002340{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002341 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002342 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002343}
2344
Ingo Molnare309b412008-05-12 21:20:51 +02002345static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002346{
2347 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002348 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002349 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002350
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002351 WARN_ON_ONCE(iter->leftover);
2352
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002353 (*pos)++;
2354
2355 /* can't go backwards */
2356 if (iter->idx > i)
2357 return NULL;
2358
2359 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002360 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002361 else
2362 ent = iter;
2363
2364 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002365 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002366
2367 iter->pos = *pos;
2368
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002369 return ent;
2370}
2371
Jason Wessel955b61e2010-08-05 09:22:23 -05002372void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002373{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002374 struct ring_buffer_event *event;
2375 struct ring_buffer_iter *buf_iter;
2376 unsigned long entries = 0;
2377 u64 ts;
2378
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002379 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002380
Steven Rostedt6d158a82012-06-27 20:46:14 -04002381 buf_iter = trace_buffer_iter(iter, cpu);
2382 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002383 return;
2384
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002385 ring_buffer_iter_reset(buf_iter);
2386
2387 /*
2388 * We could have the case with the max latency tracers
2389 * that a reset never took place on a cpu. This is evident
2390 * by the timestamp being before the start of the buffer.
2391 */
2392 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002393 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002394 break;
2395 entries++;
2396 ring_buffer_read(buf_iter, NULL);
2397 }
2398
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002399 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002400}
2401
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002402/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002403 * The current tracer is copied to avoid a global locking
2404 * all around.
2405 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002406static void *s_start(struct seq_file *m, loff_t *pos)
2407{
2408 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002409 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002410 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002411 void *p = NULL;
2412 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002413 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002414
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002415 /*
2416 * copy the tracer to avoid using a global lock all around.
2417 * iter->trace is a copy of current_trace, the pointer to the
2418 * name may be used instead of a strcmp(), as iter->trace->name
2419 * will point to the same string as current_trace->name.
2420 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002421 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002422 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2423 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002424 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002425
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002426#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002427 if (iter->snapshot && iter->trace->use_max_tr)
2428 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002429#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002430
2431 if (!iter->snapshot)
2432 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002433
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002434 if (*pos != iter->pos) {
2435 iter->ent = NULL;
2436 iter->cpu = 0;
2437 iter->idx = -1;
2438
Steven Rostedtae3b5092013-01-23 15:22:59 -05002439 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002440 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002441 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002442 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002443 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002444
Lai Jiangshanac91d852010-03-02 17:54:50 +08002445 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002446 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2447 ;
2448
2449 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002450 /*
2451 * If we overflowed the seq_file before, then we want
2452 * to just reuse the trace_seq buffer again.
2453 */
2454 if (iter->leftover)
2455 p = iter;
2456 else {
2457 l = *pos - 1;
2458 p = s_next(m, p, &l);
2459 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002460 }
2461
Lai Jiangshan4f535962009-05-18 19:35:34 +08002462 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002463 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002464 return p;
2465}
2466
2467static void s_stop(struct seq_file *m, void *p)
2468{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002469 struct trace_iterator *iter = m->private;
2470
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002471#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002472 if (iter->snapshot && iter->trace->use_max_tr)
2473 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002474#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002475
2476 if (!iter->snapshot)
2477 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002478
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002479 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002480 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002481}
2482
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002483static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002484get_total_entries(struct trace_buffer *buf,
2485 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002486{
2487 unsigned long count;
2488 int cpu;
2489
2490 *total = 0;
2491 *entries = 0;
2492
2493 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002494 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002495 /*
2496 * If this buffer has skipped entries, then we hold all
2497 * entries for the trace and we need to ignore the
2498 * ones before the time stamp.
2499 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002500 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2501 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002502 /* total is the same as the entries */
2503 *total += count;
2504 } else
2505 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002506 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002507 *entries += count;
2508 }
2509}
2510
Ingo Molnare309b412008-05-12 21:20:51 +02002511static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002512{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002513 seq_puts(m, "# _------=> CPU# \n"
2514 "# / _-----=> irqs-off \n"
2515 "# | / _----=> need-resched \n"
2516 "# || / _---=> hardirq/softirq \n"
2517 "# ||| / _--=> preempt-depth \n"
2518 "# |||| / delay \n"
2519 "# cmd pid ||||| time | caller \n"
2520 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002521}
2522
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002523static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002524{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002525 unsigned long total;
2526 unsigned long entries;
2527
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002528 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002529 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2530 entries, total, num_online_cpus());
2531 seq_puts(m, "#\n");
2532}
2533
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002534static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002535{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002536 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002537 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2538 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002539}
2540
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002541static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002542{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002543 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002544 seq_puts(m, "# _-----=> irqs-off\n"
2545 "# / _----=> need-resched\n"
2546 "# | / _---=> hardirq/softirq\n"
2547 "# || / _--=> preempt-depth\n"
2548 "# ||| / delay\n"
2549 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2550 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002551}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002552
Jiri Olsa62b915f2010-04-02 19:01:22 +02002553void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002554print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2555{
2556 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002557 struct trace_buffer *buf = iter->trace_buffer;
2558 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002559 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002560 unsigned long entries;
2561 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002562 const char *name = "preemption";
2563
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002564 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002565
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002566 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002567
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002568 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002569 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002570 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002571 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002572 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002573 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002574 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002575 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002576 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002577 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002578#if defined(CONFIG_PREEMPT_NONE)
2579 "server",
2580#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2581 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002582#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002583 "preempt",
2584#else
2585 "unknown",
2586#endif
2587 /* These are reserved for later use */
2588 0, 0, 0, 0);
2589#ifdef CONFIG_SMP
2590 seq_printf(m, " #P:%d)\n", num_online_cpus());
2591#else
2592 seq_puts(m, ")\n");
2593#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002594 seq_puts(m, "# -----------------\n");
2595 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002596 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002597 data->comm, data->pid,
2598 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002599 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002600 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002601
2602 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002603 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002604 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2605 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002606 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002607 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2608 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002609 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002610 }
2611
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002612 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002613}
2614
Steven Rostedta3097202008-11-07 22:36:02 -05002615static void test_cpu_buff_start(struct trace_iterator *iter)
2616{
2617 struct trace_seq *s = &iter->seq;
2618
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002619 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2620 return;
2621
2622 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2623 return;
2624
Rusty Russell44623442009-01-01 10:12:23 +10302625 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002626 return;
2627
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002628 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002629 return;
2630
Rusty Russell44623442009-01-01 10:12:23 +10302631 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002632
2633 /* Don't print started cpu buffer for the first entry of the trace */
2634 if (iter->idx > 1)
2635 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2636 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002637}
2638
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002639static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002640{
Steven Rostedt214023c2008-05-12 21:20:46 +02002641 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002642 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002643 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002644 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002645
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002646 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002647
Steven Rostedta3097202008-11-07 22:36:02 -05002648 test_cpu_buff_start(iter);
2649
Steven Rostedtf633cef2008-12-23 23:24:13 -05002650 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002651
2652 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002653 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2654 trace_print_lat_context(iter);
2655 else
2656 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002657 }
2658
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002659 if (trace_seq_has_overflowed(s))
2660 return TRACE_TYPE_PARTIAL_LINE;
2661
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002662 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002663 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002664
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002665 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002666
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002667 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002668}
2669
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002670static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002671{
2672 struct trace_seq *s = &iter->seq;
2673 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002674 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002675
2676 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002677
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002678 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2679 trace_seq_printf(s, "%d %d %llu ",
2680 entry->pid, iter->cpu, iter->ts);
2681
2682 if (trace_seq_has_overflowed(s))
2683 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002684
Steven Rostedtf633cef2008-12-23 23:24:13 -05002685 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002686 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002687 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002688
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002689 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002690
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002691 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002692}
2693
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002694static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002695{
2696 struct trace_seq *s = &iter->seq;
2697 unsigned char newline = '\n';
2698 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002699 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002700
2701 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002702
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002703 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002704 SEQ_PUT_HEX_FIELD(s, entry->pid);
2705 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2706 SEQ_PUT_HEX_FIELD(s, iter->ts);
2707 if (trace_seq_has_overflowed(s))
2708 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002709 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002710
Steven Rostedtf633cef2008-12-23 23:24:13 -05002711 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002712 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002713 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002714 if (ret != TRACE_TYPE_HANDLED)
2715 return ret;
2716 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002717
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002718 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002719
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002720 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002721}
2722
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002723static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002724{
2725 struct trace_seq *s = &iter->seq;
2726 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002727 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002728
2729 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002730
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002731 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002732 SEQ_PUT_FIELD(s, entry->pid);
2733 SEQ_PUT_FIELD(s, iter->cpu);
2734 SEQ_PUT_FIELD(s, iter->ts);
2735 if (trace_seq_has_overflowed(s))
2736 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002737 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002738
Steven Rostedtf633cef2008-12-23 23:24:13 -05002739 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002740 return event ? event->funcs->binary(iter, 0, event) :
2741 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002742}
2743
Jiri Olsa62b915f2010-04-02 19:01:22 +02002744int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002745{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002746 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002747 int cpu;
2748
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002749 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002750 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002751 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002752 buf_iter = trace_buffer_iter(iter, cpu);
2753 if (buf_iter) {
2754 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002755 return 0;
2756 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002757 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002758 return 0;
2759 }
2760 return 1;
2761 }
2762
Steven Rostedtab464282008-05-12 21:21:00 +02002763 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002764 buf_iter = trace_buffer_iter(iter, cpu);
2765 if (buf_iter) {
2766 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002767 return 0;
2768 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002769 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002770 return 0;
2771 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002772 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002773
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002774 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002775}
2776
Lai Jiangshan4f535962009-05-18 19:35:34 +08002777/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002778enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002779{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002780 enum print_line_t ret;
2781
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002782 if (iter->lost_events) {
2783 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2784 iter->cpu, iter->lost_events);
2785 if (trace_seq_has_overflowed(&iter->seq))
2786 return TRACE_TYPE_PARTIAL_LINE;
2787 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002788
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002789 if (iter->trace && iter->trace->print_line) {
2790 ret = iter->trace->print_line(iter);
2791 if (ret != TRACE_TYPE_UNHANDLED)
2792 return ret;
2793 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002794
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002795 if (iter->ent->type == TRACE_BPUTS &&
2796 trace_flags & TRACE_ITER_PRINTK &&
2797 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2798 return trace_print_bputs_msg_only(iter);
2799
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002800 if (iter->ent->type == TRACE_BPRINT &&
2801 trace_flags & TRACE_ITER_PRINTK &&
2802 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002803 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002804
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002805 if (iter->ent->type == TRACE_PRINT &&
2806 trace_flags & TRACE_ITER_PRINTK &&
2807 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002808 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002809
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002810 if (trace_flags & TRACE_ITER_BIN)
2811 return print_bin_fmt(iter);
2812
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002813 if (trace_flags & TRACE_ITER_HEX)
2814 return print_hex_fmt(iter);
2815
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002816 if (trace_flags & TRACE_ITER_RAW)
2817 return print_raw_fmt(iter);
2818
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002819 return print_trace_fmt(iter);
2820}
2821
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002822void trace_latency_header(struct seq_file *m)
2823{
2824 struct trace_iterator *iter = m->private;
2825
2826 /* print nothing if the buffers are empty */
2827 if (trace_empty(iter))
2828 return;
2829
2830 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2831 print_trace_header(m, iter);
2832
2833 if (!(trace_flags & TRACE_ITER_VERBOSE))
2834 print_lat_help_header(m);
2835}
2836
Jiri Olsa62b915f2010-04-02 19:01:22 +02002837void trace_default_header(struct seq_file *m)
2838{
2839 struct trace_iterator *iter = m->private;
2840
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002841 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2842 return;
2843
Jiri Olsa62b915f2010-04-02 19:01:22 +02002844 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2845 /* print nothing if the buffers are empty */
2846 if (trace_empty(iter))
2847 return;
2848 print_trace_header(m, iter);
2849 if (!(trace_flags & TRACE_ITER_VERBOSE))
2850 print_lat_help_header(m);
2851 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002852 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2853 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002854 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002855 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002856 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002857 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002858 }
2859}
2860
Steven Rostedte0a413f2011-09-29 21:26:16 -04002861static void test_ftrace_alive(struct seq_file *m)
2862{
2863 if (!ftrace_is_dead())
2864 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002865 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2866 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002867}
2868
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002869#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002870static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002871{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002872 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2873 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2874 "# Takes a snapshot of the main buffer.\n"
2875 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2876 "# (Doesn't have to be '2' works with any number that\n"
2877 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002878}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002879
2880static void show_snapshot_percpu_help(struct seq_file *m)
2881{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002882 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002883#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002884 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2885 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002886#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002887 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2888 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002889#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002890 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2891 "# (Doesn't have to be '2' works with any number that\n"
2892 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002893}
2894
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002895static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2896{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002897 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002898 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002899 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002900 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002901
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002902 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002903 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2904 show_snapshot_main_help(m);
2905 else
2906 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002907}
2908#else
2909/* Should never be called */
2910static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2911#endif
2912
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002913static int s_show(struct seq_file *m, void *v)
2914{
2915 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002916 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002917
2918 if (iter->ent == NULL) {
2919 if (iter->tr) {
2920 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2921 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002922 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002923 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002924 if (iter->snapshot && trace_empty(iter))
2925 print_snapshot_help(m, iter);
2926 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002927 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002928 else
2929 trace_default_header(m);
2930
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002931 } else if (iter->leftover) {
2932 /*
2933 * If we filled the seq_file buffer earlier, we
2934 * want to just show it now.
2935 */
2936 ret = trace_print_seq(m, &iter->seq);
2937
2938 /* ret should this time be zero, but you never know */
2939 iter->leftover = ret;
2940
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002941 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002942 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002943 ret = trace_print_seq(m, &iter->seq);
2944 /*
2945 * If we overflow the seq_file buffer, then it will
2946 * ask us for this data again at start up.
2947 * Use that instead.
2948 * ret is 0 if seq_file write succeeded.
2949 * -1 otherwise.
2950 */
2951 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002952 }
2953
2954 return 0;
2955}
2956
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002957/*
2958 * Should be used after trace_array_get(), trace_types_lock
2959 * ensures that i_cdev was already initialized.
2960 */
2961static inline int tracing_get_cpu(struct inode *inode)
2962{
2963 if (inode->i_cdev) /* See trace_create_cpu_file() */
2964 return (long)inode->i_cdev - 1;
2965 return RING_BUFFER_ALL_CPUS;
2966}
2967
James Morris88e9d342009-09-22 16:43:43 -07002968static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002969 .start = s_start,
2970 .next = s_next,
2971 .stop = s_stop,
2972 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002973};
2974
Ingo Molnare309b412008-05-12 21:20:51 +02002975static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002976__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002977{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002978 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002979 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002980 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002981
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002982 if (tracing_disabled)
2983 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002984
Jiri Olsa50e18b92012-04-25 10:23:39 +02002985 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002986 if (!iter)
2987 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002988
Steven Rostedt6d158a82012-06-27 20:46:14 -04002989 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2990 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002991 if (!iter->buffer_iter)
2992 goto release;
2993
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002994 /*
2995 * We make a copy of the current tracer to avoid concurrent
2996 * changes on it while we are reading.
2997 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002998 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002999 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003000 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003001 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003002
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003003 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003004
Li Zefan79f55992009-06-15 14:58:26 +08003005 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003006 goto fail;
3007
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003008 iter->tr = tr;
3009
3010#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003011 /* Currently only the top directory has a snapshot */
3012 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003013 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003014 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003015#endif
3016 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003017 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003018 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003019 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003020 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003021
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003022 /* Notify the tracer early; before we stop tracing. */
3023 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003024 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003025
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003026 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003027 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003028 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3029
David Sharp8be07092012-11-13 12:18:22 -08003030 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003031 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003032 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3033
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003034 /* stop the trace while dumping if we are not opening "snapshot" */
3035 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003036 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003037
Steven Rostedtae3b5092013-01-23 15:22:59 -05003038 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003039 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003040 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003041 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003042 }
3043 ring_buffer_read_prepare_sync();
3044 for_each_tracing_cpu(cpu) {
3045 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003046 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003047 }
3048 } else {
3049 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003050 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003051 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003052 ring_buffer_read_prepare_sync();
3053 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003054 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003055 }
3056
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003057 mutex_unlock(&trace_types_lock);
3058
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003059 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003060
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003061 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003062 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003063 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003064 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003065release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003066 seq_release_private(inode, file);
3067 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003068}
3069
3070int tracing_open_generic(struct inode *inode, struct file *filp)
3071{
Steven Rostedt60a11772008-05-12 21:20:44 +02003072 if (tracing_disabled)
3073 return -ENODEV;
3074
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003075 filp->private_data = inode->i_private;
3076 return 0;
3077}
3078
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003079bool tracing_is_disabled(void)
3080{
3081 return (tracing_disabled) ? true: false;
3082}
3083
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003084/*
3085 * Open and update trace_array ref count.
3086 * Must have the current trace_array passed to it.
3087 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003088static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003089{
3090 struct trace_array *tr = inode->i_private;
3091
3092 if (tracing_disabled)
3093 return -ENODEV;
3094
3095 if (trace_array_get(tr) < 0)
3096 return -ENODEV;
3097
3098 filp->private_data = inode->i_private;
3099
3100 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003101}
3102
Hannes Eder4fd27352009-02-10 19:44:12 +01003103static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003104{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003105 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003106 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003107 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003108 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003109
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003110 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003111 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003112 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003113 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003114
Oleg Nesterov6484c712013-07-23 17:26:10 +02003115 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003116 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003117 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003118
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003119 for_each_tracing_cpu(cpu) {
3120 if (iter->buffer_iter[cpu])
3121 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3122 }
3123
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003124 if (iter->trace && iter->trace->close)
3125 iter->trace->close(iter);
3126
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003127 if (!iter->snapshot)
3128 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003129 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003130
3131 __trace_array_put(tr);
3132
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003133 mutex_unlock(&trace_types_lock);
3134
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003135 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003136 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003137 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003138 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003139 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003140
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003141 return 0;
3142}
3143
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003144static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3145{
3146 struct trace_array *tr = inode->i_private;
3147
3148 trace_array_put(tr);
3149 return 0;
3150}
3151
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003152static int tracing_single_release_tr(struct inode *inode, struct file *file)
3153{
3154 struct trace_array *tr = inode->i_private;
3155
3156 trace_array_put(tr);
3157
3158 return single_release(inode, file);
3159}
3160
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003161static int tracing_open(struct inode *inode, struct file *file)
3162{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003163 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003164 struct trace_iterator *iter;
3165 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003166
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003167 if (trace_array_get(tr) < 0)
3168 return -ENODEV;
3169
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003170 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003171 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3172 int cpu = tracing_get_cpu(inode);
3173
3174 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003175 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003176 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003177 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003178 }
3179
3180 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003181 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003182 if (IS_ERR(iter))
3183 ret = PTR_ERR(iter);
3184 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3185 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3186 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003187
3188 if (ret < 0)
3189 trace_array_put(tr);
3190
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003191 return ret;
3192}
3193
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003194/*
3195 * Some tracers are not suitable for instance buffers.
3196 * A tracer is always available for the global array (toplevel)
3197 * or if it explicitly states that it is.
3198 */
3199static bool
3200trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3201{
3202 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3203}
3204
3205/* Find the next tracer that this trace array may use */
3206static struct tracer *
3207get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3208{
3209 while (t && !trace_ok_for_array(t, tr))
3210 t = t->next;
3211
3212 return t;
3213}
3214
Ingo Molnare309b412008-05-12 21:20:51 +02003215static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003216t_next(struct seq_file *m, void *v, loff_t *pos)
3217{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003218 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003219 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003220
3221 (*pos)++;
3222
3223 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003224 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003225
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003226 return t;
3227}
3228
3229static void *t_start(struct seq_file *m, loff_t *pos)
3230{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003231 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003232 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003233 loff_t l = 0;
3234
3235 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003236
3237 t = get_tracer_for_array(tr, trace_types);
3238 for (; t && l < *pos; t = t_next(m, t, &l))
3239 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003240
3241 return t;
3242}
3243
3244static void t_stop(struct seq_file *m, void *p)
3245{
3246 mutex_unlock(&trace_types_lock);
3247}
3248
3249static int t_show(struct seq_file *m, void *v)
3250{
3251 struct tracer *t = v;
3252
3253 if (!t)
3254 return 0;
3255
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003256 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003257 if (t->next)
3258 seq_putc(m, ' ');
3259 else
3260 seq_putc(m, '\n');
3261
3262 return 0;
3263}
3264
James Morris88e9d342009-09-22 16:43:43 -07003265static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003266 .start = t_start,
3267 .next = t_next,
3268 .stop = t_stop,
3269 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003270};
3271
3272static int show_traces_open(struct inode *inode, struct file *file)
3273{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003274 struct trace_array *tr = inode->i_private;
3275 struct seq_file *m;
3276 int ret;
3277
Steven Rostedt60a11772008-05-12 21:20:44 +02003278 if (tracing_disabled)
3279 return -ENODEV;
3280
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003281 ret = seq_open(file, &show_traces_seq_ops);
3282 if (ret)
3283 return ret;
3284
3285 m = file->private_data;
3286 m->private = tr;
3287
3288 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003289}
3290
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003291static ssize_t
3292tracing_write_stub(struct file *filp, const char __user *ubuf,
3293 size_t count, loff_t *ppos)
3294{
3295 return count;
3296}
3297
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003298loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003299{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003300 int ret;
3301
Slava Pestov364829b2010-11-24 15:13:16 -08003302 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003303 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003304 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003305 file->f_pos = ret = 0;
3306
3307 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003308}
3309
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003310static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003311 .open = tracing_open,
3312 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003313 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003314 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003315 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003316};
3317
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003318static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003319 .open = show_traces_open,
3320 .read = seq_read,
3321 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003322 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003323};
3324
Ingo Molnar36dfe922008-05-12 21:20:52 +02003325/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003326 * The tracer itself will not take this lock, but still we want
3327 * to provide a consistent cpumask to user-space:
3328 */
3329static DEFINE_MUTEX(tracing_cpumask_update_lock);
3330
3331/*
3332 * Temporary storage for the character representation of the
3333 * CPU bitmask (and one more byte for the newline):
3334 */
3335static char mask_str[NR_CPUS + 1];
3336
Ingo Molnarc7078de2008-05-12 21:20:52 +02003337static ssize_t
3338tracing_cpumask_read(struct file *filp, char __user *ubuf,
3339 size_t count, loff_t *ppos)
3340{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003341 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003342 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003343
3344 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003345
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003346 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003347 if (count - len < 2) {
3348 count = -EINVAL;
3349 goto out_err;
3350 }
3351 len += sprintf(mask_str + len, "\n");
3352 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3353
3354out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003355 mutex_unlock(&tracing_cpumask_update_lock);
3356
3357 return count;
3358}
3359
3360static ssize_t
3361tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3362 size_t count, loff_t *ppos)
3363{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003364 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303365 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003366 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303367
3368 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3369 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003370
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303371 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003372 if (err)
3373 goto err_unlock;
3374
Li Zefan215368e2009-06-15 10:56:42 +08003375 mutex_lock(&tracing_cpumask_update_lock);
3376
Steven Rostedta5e25882008-12-02 15:34:05 -05003377 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003378 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003379 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003380 /*
3381 * Increase/decrease the disabled counter if we are
3382 * about to flip a bit in the cpumask:
3383 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003384 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303385 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003386 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3387 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003388 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003389 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303390 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003391 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3392 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003393 }
3394 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003395 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003396 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003397
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003398 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003399
Ingo Molnarc7078de2008-05-12 21:20:52 +02003400 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303401 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003402
Ingo Molnarc7078de2008-05-12 21:20:52 +02003403 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003404
3405err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003406 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003407
3408 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003409}
3410
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003411static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003412 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003413 .read = tracing_cpumask_read,
3414 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003415 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003416 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003417};
3418
Li Zefanfdb372e2009-12-08 11:15:59 +08003419static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003420{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003421 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003422 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003423 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003424 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003425
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003426 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003427 tracer_flags = tr->current_trace->flags->val;
3428 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003429
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003430 for (i = 0; trace_options[i]; i++) {
3431 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003432 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003433 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003434 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435 }
3436
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003437 for (i = 0; trace_opts[i].name; i++) {
3438 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003439 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003440 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003441 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003442 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003443 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003444
Li Zefanfdb372e2009-12-08 11:15:59 +08003445 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003446}
3447
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003448static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003449 struct tracer_flags *tracer_flags,
3450 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003451{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003452 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003453 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003454
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003455 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003456 if (ret)
3457 return ret;
3458
3459 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003460 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003461 else
Zhaolei77708412009-08-07 18:53:21 +08003462 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003463 return 0;
3464}
3465
Li Zefan8d18eaa2009-12-08 11:17:06 +08003466/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003467static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003468{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003469 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003470 struct tracer_flags *tracer_flags = trace->flags;
3471 struct tracer_opt *opts = NULL;
3472 int i;
3473
3474 for (i = 0; tracer_flags->opts[i].name; i++) {
3475 opts = &tracer_flags->opts[i];
3476
3477 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003478 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003479 }
3480
3481 return -EINVAL;
3482}
3483
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003484/* Some tracers require overwrite to stay enabled */
3485int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3486{
3487 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3488 return -1;
3489
3490 return 0;
3491}
3492
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003493int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003494{
3495 /* do nothing if flag is already set */
3496 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003497 return 0;
3498
3499 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003500 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003501 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003502 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003503
3504 if (enabled)
3505 trace_flags |= mask;
3506 else
3507 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003508
3509 if (mask == TRACE_ITER_RECORD_CMD)
3510 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003511
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003512 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003513 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003514#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003515 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003516#endif
3517 }
Steven Rostedt81698832012-10-11 10:15:05 -04003518
3519 if (mask == TRACE_ITER_PRINTK)
3520 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003521
3522 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003523}
3524
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003525static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003526{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003527 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003528 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003529 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003530 int i;
3531
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003532 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003533
Li Zefan8d18eaa2009-12-08 11:17:06 +08003534 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003535 neg = 1;
3536 cmp += 2;
3537 }
3538
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003539 mutex_lock(&trace_types_lock);
3540
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003541 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003542 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003543 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003544 break;
3545 }
3546 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003547
3548 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003549 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003550 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003551
3552 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003553
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003554 return ret;
3555}
3556
3557static ssize_t
3558tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3559 size_t cnt, loff_t *ppos)
3560{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003561 struct seq_file *m = filp->private_data;
3562 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003563 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003564 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003565
3566 if (cnt >= sizeof(buf))
3567 return -EINVAL;
3568
3569 if (copy_from_user(&buf, ubuf, cnt))
3570 return -EFAULT;
3571
Steven Rostedta8dd2172013-01-09 20:54:17 -05003572 buf[cnt] = 0;
3573
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003574 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003575 if (ret < 0)
3576 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003577
Jiri Olsacf8517c2009-10-23 19:36:16 -04003578 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003579
3580 return cnt;
3581}
3582
Li Zefanfdb372e2009-12-08 11:15:59 +08003583static int tracing_trace_options_open(struct inode *inode, struct file *file)
3584{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003585 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003586 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003587
Li Zefanfdb372e2009-12-08 11:15:59 +08003588 if (tracing_disabled)
3589 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003590
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003591 if (trace_array_get(tr) < 0)
3592 return -ENODEV;
3593
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003594 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3595 if (ret < 0)
3596 trace_array_put(tr);
3597
3598 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003599}
3600
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003601static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003602 .open = tracing_trace_options_open,
3603 .read = seq_read,
3604 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003605 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003606 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003607};
3608
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003609static const char readme_msg[] =
3610 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003611 "# echo 0 > tracing_on : quick way to disable tracing\n"
3612 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3613 " Important files:\n"
3614 " trace\t\t\t- The static contents of the buffer\n"
3615 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3616 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3617 " current_tracer\t- function and latency tracers\n"
3618 " available_tracers\t- list of configured tracers for current_tracer\n"
3619 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3620 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3621 " trace_clock\t\t-change the clock used to order events\n"
3622 " local: Per cpu clock but may not be synced across CPUs\n"
3623 " global: Synced across CPUs but slows tracing down.\n"
3624 " counter: Not a clock, but just an increment\n"
3625 " uptime: Jiffy counter from time of boot\n"
3626 " perf: Same clock that perf events use\n"
3627#ifdef CONFIG_X86_64
3628 " x86-tsc: TSC cycle counter\n"
3629#endif
3630 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3631 " tracing_cpumask\t- Limit which CPUs to trace\n"
3632 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3633 "\t\t\t Remove sub-buffer with rmdir\n"
3634 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003635 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3636 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003637 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003638#ifdef CONFIG_DYNAMIC_FTRACE
3639 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003640 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3641 "\t\t\t functions\n"
3642 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3643 "\t modules: Can select a group via module\n"
3644 "\t Format: :mod:<module-name>\n"
3645 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3646 "\t triggers: a command to perform when function is hit\n"
3647 "\t Format: <function>:<trigger>[:count]\n"
3648 "\t trigger: traceon, traceoff\n"
3649 "\t\t enable_event:<system>:<event>\n"
3650 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003651#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003652 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003653#endif
3654#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003655 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003656#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003657 "\t\t dump\n"
3658 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003659 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3660 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3661 "\t The first one will disable tracing every time do_fault is hit\n"
3662 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3663 "\t The first time do trap is hit and it disables tracing, the\n"
3664 "\t counter will decrement to 2. If tracing is already disabled,\n"
3665 "\t the counter will not decrement. It only decrements when the\n"
3666 "\t trigger did work\n"
3667 "\t To remove trigger without count:\n"
3668 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3669 "\t To remove trigger with a count:\n"
3670 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003671 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003672 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3673 "\t modules: Can select a group via module command :mod:\n"
3674 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003675#endif /* CONFIG_DYNAMIC_FTRACE */
3676#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003677 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3678 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003679#endif
3680#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3681 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003682 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003683 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3684#endif
3685#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003686 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3687 "\t\t\t snapshot buffer. Read the contents for more\n"
3688 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003689#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003690#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003691 " stack_trace\t\t- Shows the max stack trace when active\n"
3692 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003693 "\t\t\t Write into this file to reset the max size (trigger a\n"
3694 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003695#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003696 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3697 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003698#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003699#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003700 " events/\t\t- Directory containing all trace event subsystems:\n"
3701 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3702 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003703 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3704 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003705 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003706 " events/<system>/<event>/\t- Directory containing control files for\n"
3707 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003708 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3709 " filter\t\t- If set, only events passing filter are traced\n"
3710 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003711 "\t Format: <trigger>[:count][if <filter>]\n"
3712 "\t trigger: traceon, traceoff\n"
3713 "\t enable_event:<system>:<event>\n"
3714 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003715#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003716 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003717#endif
3718#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003719 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003720#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003721 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3722 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3723 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3724 "\t events/block/block_unplug/trigger\n"
3725 "\t The first disables tracing every time block_unplug is hit.\n"
3726 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3727 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3728 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3729 "\t Like function triggers, the counter is only decremented if it\n"
3730 "\t enabled or disabled tracing.\n"
3731 "\t To remove a trigger without a count:\n"
3732 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3733 "\t To remove a trigger with a count:\n"
3734 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3735 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003736;
3737
3738static ssize_t
3739tracing_readme_read(struct file *filp, char __user *ubuf,
3740 size_t cnt, loff_t *ppos)
3741{
3742 return simple_read_from_buffer(ubuf, cnt, ppos,
3743 readme_msg, strlen(readme_msg));
3744}
3745
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003746static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003747 .open = tracing_open_generic,
3748 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003749 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003750};
3751
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003752static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003753{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003754 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003755
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003756 if (*pos || m->count)
3757 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003758
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003759 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003760
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003761 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3762 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003763 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003764 continue;
3765
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003766 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003767 }
3768
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003769 return NULL;
3770}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003771
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003772static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3773{
3774 void *v;
3775 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003776
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003777 preempt_disable();
3778 arch_spin_lock(&trace_cmdline_lock);
3779
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003780 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003781 while (l <= *pos) {
3782 v = saved_cmdlines_next(m, v, &l);
3783 if (!v)
3784 return NULL;
3785 }
3786
3787 return v;
3788}
3789
3790static void saved_cmdlines_stop(struct seq_file *m, void *v)
3791{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003792 arch_spin_unlock(&trace_cmdline_lock);
3793 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003794}
3795
3796static int saved_cmdlines_show(struct seq_file *m, void *v)
3797{
3798 char buf[TASK_COMM_LEN];
3799 unsigned int *pid = v;
3800
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003801 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003802 seq_printf(m, "%d %s\n", *pid, buf);
3803 return 0;
3804}
3805
3806static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3807 .start = saved_cmdlines_start,
3808 .next = saved_cmdlines_next,
3809 .stop = saved_cmdlines_stop,
3810 .show = saved_cmdlines_show,
3811};
3812
3813static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3814{
3815 if (tracing_disabled)
3816 return -ENODEV;
3817
3818 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003819}
3820
3821static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003822 .open = tracing_saved_cmdlines_open,
3823 .read = seq_read,
3824 .llseek = seq_lseek,
3825 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003826};
3827
3828static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003829tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3830 size_t cnt, loff_t *ppos)
3831{
3832 char buf[64];
3833 int r;
3834
3835 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003836 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003837 arch_spin_unlock(&trace_cmdline_lock);
3838
3839 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3840}
3841
3842static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3843{
3844 kfree(s->saved_cmdlines);
3845 kfree(s->map_cmdline_to_pid);
3846 kfree(s);
3847}
3848
3849static int tracing_resize_saved_cmdlines(unsigned int val)
3850{
3851 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3852
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003853 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003854 if (!s)
3855 return -ENOMEM;
3856
3857 if (allocate_cmdlines_buffer(val, s) < 0) {
3858 kfree(s);
3859 return -ENOMEM;
3860 }
3861
3862 arch_spin_lock(&trace_cmdline_lock);
3863 savedcmd_temp = savedcmd;
3864 savedcmd = s;
3865 arch_spin_unlock(&trace_cmdline_lock);
3866 free_saved_cmdlines_buffer(savedcmd_temp);
3867
3868 return 0;
3869}
3870
3871static ssize_t
3872tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3873 size_t cnt, loff_t *ppos)
3874{
3875 unsigned long val;
3876 int ret;
3877
3878 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3879 if (ret)
3880 return ret;
3881
3882 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3883 if (!val || val > PID_MAX_DEFAULT)
3884 return -EINVAL;
3885
3886 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3887 if (ret < 0)
3888 return ret;
3889
3890 *ppos += cnt;
3891
3892 return cnt;
3893}
3894
3895static const struct file_operations tracing_saved_cmdlines_size_fops = {
3896 .open = tracing_open_generic,
3897 .read = tracing_saved_cmdlines_size_read,
3898 .write = tracing_saved_cmdlines_size_write,
3899};
3900
3901static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003902tracing_set_trace_read(struct file *filp, char __user *ubuf,
3903 size_t cnt, loff_t *ppos)
3904{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003905 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003906 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003907 int r;
3908
3909 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003910 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003911 mutex_unlock(&trace_types_lock);
3912
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003913 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003914}
3915
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003916int tracer_init(struct tracer *t, struct trace_array *tr)
3917{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003918 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003919 return t->init(tr);
3920}
3921
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003922static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003923{
3924 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003925
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003926 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003927 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003928}
3929
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003930#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003931/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003932static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3933 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003934{
3935 int cpu, ret = 0;
3936
3937 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3938 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003939 ret = ring_buffer_resize(trace_buf->buffer,
3940 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003941 if (ret < 0)
3942 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003943 per_cpu_ptr(trace_buf->data, cpu)->entries =
3944 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003945 }
3946 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003947 ret = ring_buffer_resize(trace_buf->buffer,
3948 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003949 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003950 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3951 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003952 }
3953
3954 return ret;
3955}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003956#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003957
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003958static int __tracing_resize_ring_buffer(struct trace_array *tr,
3959 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003960{
3961 int ret;
3962
3963 /*
3964 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003965 * we use the size that was given, and we can forget about
3966 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003967 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003968 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003969
Steven Rostedtb382ede62012-10-10 21:44:34 -04003970 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003971 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003972 return 0;
3973
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003974 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003975 if (ret < 0)
3976 return ret;
3977
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003978#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003979 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3980 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003981 goto out;
3982
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003983 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003984 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003985 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3986 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003987 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003988 /*
3989 * AARGH! We are left with different
3990 * size max buffer!!!!
3991 * The max buffer is our "snapshot" buffer.
3992 * When a tracer needs a snapshot (one of the
3993 * latency tracers), it swaps the max buffer
3994 * with the saved snap shot. We succeeded to
3995 * update the size of the main buffer, but failed to
3996 * update the size of the max buffer. But when we tried
3997 * to reset the main buffer to the original size, we
3998 * failed there too. This is very unlikely to
3999 * happen, but if it does, warn and kill all
4000 * tracing.
4001 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004002 WARN_ON(1);
4003 tracing_disabled = 1;
4004 }
4005 return ret;
4006 }
4007
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004008 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004009 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004010 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004011 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004012
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004013 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004014#endif /* CONFIG_TRACER_MAX_TRACE */
4015
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004016 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004017 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004018 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004019 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004020
4021 return ret;
4022}
4023
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004024static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4025 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004026{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004027 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004028
4029 mutex_lock(&trace_types_lock);
4030
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004031 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4032 /* make sure, this cpu is enabled in the mask */
4033 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4034 ret = -EINVAL;
4035 goto out;
4036 }
4037 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004038
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004039 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004040 if (ret < 0)
4041 ret = -ENOMEM;
4042
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004043out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004044 mutex_unlock(&trace_types_lock);
4045
4046 return ret;
4047}
4048
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004049
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004050/**
4051 * tracing_update_buffers - used by tracing facility to expand ring buffers
4052 *
4053 * To save on memory when the tracing is never used on a system with it
4054 * configured in. The ring buffers are set to a minimum size. But once
4055 * a user starts to use the tracing facility, then they need to grow
4056 * to their default size.
4057 *
4058 * This function is to be called when a tracer is about to be used.
4059 */
4060int tracing_update_buffers(void)
4061{
4062 int ret = 0;
4063
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004064 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004065 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004066 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004067 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004068 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004069
4070 return ret;
4071}
4072
Steven Rostedt577b7852009-02-26 23:43:05 -05004073struct trace_option_dentry;
4074
4075static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004076create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004077
4078static void
4079destroy_trace_option_files(struct trace_option_dentry *topts);
4080
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004081/*
4082 * Used to clear out the tracer before deletion of an instance.
4083 * Must have trace_types_lock held.
4084 */
4085static void tracing_set_nop(struct trace_array *tr)
4086{
4087 if (tr->current_trace == &nop_trace)
4088 return;
4089
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004090 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004091
4092 if (tr->current_trace->reset)
4093 tr->current_trace->reset(tr);
4094
4095 tr->current_trace = &nop_trace;
4096}
4097
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004098static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004099{
Steven Rostedt577b7852009-02-26 23:43:05 -05004100 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004101 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004102#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004103 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004104#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004105 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004106
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004107 mutex_lock(&trace_types_lock);
4108
Steven Rostedt73c51622009-03-11 13:42:01 -04004109 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004110 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004111 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004112 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004113 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004114 ret = 0;
4115 }
4116
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004117 for (t = trace_types; t; t = t->next) {
4118 if (strcmp(t->name, buf) == 0)
4119 break;
4120 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004121 if (!t) {
4122 ret = -EINVAL;
4123 goto out;
4124 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004125 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004126 goto out;
4127
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004128 /* Some tracers are only allowed for the top level buffer */
4129 if (!trace_ok_for_array(t, tr)) {
4130 ret = -EINVAL;
4131 goto out;
4132 }
4133
Steven Rostedt9f029e82008-11-12 15:24:24 -05004134 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004135
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004136 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004137
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004138 if (tr->current_trace->reset)
4139 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004140
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004141 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004142 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004143
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004144#ifdef CONFIG_TRACER_MAX_TRACE
4145 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004146
4147 if (had_max_tr && !t->use_max_tr) {
4148 /*
4149 * We need to make sure that the update_max_tr sees that
4150 * current_trace changed to nop_trace to keep it from
4151 * swapping the buffers after we resize it.
4152 * The update_max_tr is called from interrupts disabled
4153 * so a synchronized_sched() is sufficient.
4154 */
4155 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004156 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004157 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004158#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004159 /* Currently, only the top instance has options */
4160 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4161 destroy_trace_option_files(topts);
4162 topts = create_trace_option_files(tr, t);
4163 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004164
4165#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004166 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004167 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004168 if (ret < 0)
4169 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004170 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004171#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004172
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004173 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004174 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004175 if (ret)
4176 goto out;
4177 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004178
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004179 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004180 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004181 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004182 out:
4183 mutex_unlock(&trace_types_lock);
4184
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004185 return ret;
4186}
4187
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004188static ssize_t
4189tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4190 size_t cnt, loff_t *ppos)
4191{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004192 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004193 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004194 int i;
4195 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004196 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004197
Steven Rostedt60063a62008-10-28 10:44:24 -04004198 ret = cnt;
4199
Li Zefanee6c2c12009-09-18 14:06:47 +08004200 if (cnt > MAX_TRACER_SIZE)
4201 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004202
4203 if (copy_from_user(&buf, ubuf, cnt))
4204 return -EFAULT;
4205
4206 buf[cnt] = 0;
4207
4208 /* strip ending whitespace. */
4209 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4210 buf[i] = 0;
4211
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004212 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004213 if (err)
4214 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004215
Jiri Olsacf8517c2009-10-23 19:36:16 -04004216 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004217
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004218 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004219}
4220
4221static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004222tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4223 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004224{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004225 char buf[64];
4226 int r;
4227
Steven Rostedtcffae432008-05-12 21:21:00 +02004228 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004229 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004230 if (r > sizeof(buf))
4231 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004232 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004233}
4234
4235static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004236tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4237 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004238{
Hannes Eder5e398412009-02-10 19:44:34 +01004239 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004240 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004241
Peter Huewe22fe9b52011-06-07 21:58:27 +02004242 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4243 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004244 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004245
4246 *ptr = val * 1000;
4247
4248 return cnt;
4249}
4250
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004251static ssize_t
4252tracing_thresh_read(struct file *filp, char __user *ubuf,
4253 size_t cnt, loff_t *ppos)
4254{
4255 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4256}
4257
4258static ssize_t
4259tracing_thresh_write(struct file *filp, const char __user *ubuf,
4260 size_t cnt, loff_t *ppos)
4261{
4262 struct trace_array *tr = filp->private_data;
4263 int ret;
4264
4265 mutex_lock(&trace_types_lock);
4266 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4267 if (ret < 0)
4268 goto out;
4269
4270 if (tr->current_trace->update_thresh) {
4271 ret = tr->current_trace->update_thresh(tr);
4272 if (ret < 0)
4273 goto out;
4274 }
4275
4276 ret = cnt;
4277out:
4278 mutex_unlock(&trace_types_lock);
4279
4280 return ret;
4281}
4282
4283static ssize_t
4284tracing_max_lat_read(struct file *filp, char __user *ubuf,
4285 size_t cnt, loff_t *ppos)
4286{
4287 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4288}
4289
4290static ssize_t
4291tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4292 size_t cnt, loff_t *ppos)
4293{
4294 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4295}
4296
Steven Rostedtb3806b42008-05-12 21:20:46 +02004297static int tracing_open_pipe(struct inode *inode, struct file *filp)
4298{
Oleg Nesterov15544202013-07-23 17:25:57 +02004299 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004300 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004301 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004302
4303 if (tracing_disabled)
4304 return -ENODEV;
4305
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004306 if (trace_array_get(tr) < 0)
4307 return -ENODEV;
4308
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004309 mutex_lock(&trace_types_lock);
4310
Steven Rostedtb3806b42008-05-12 21:20:46 +02004311 /* create a buffer to store the information to pass to userspace */
4312 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004313 if (!iter) {
4314 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004315 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004316 goto out;
4317 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004318
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004319 trace_seq_init(&iter->seq);
4320
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004321 /*
4322 * We make a copy of the current tracer to avoid concurrent
4323 * changes on it while we are reading.
4324 */
4325 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4326 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004327 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004328 goto fail;
4329 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004330 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004331
4332 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4333 ret = -ENOMEM;
4334 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304335 }
4336
Steven Rostedta3097202008-11-07 22:36:02 -05004337 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304338 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004339
Steven Rostedt112f38a72009-06-01 15:16:05 -04004340 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4341 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4342
David Sharp8be07092012-11-13 12:18:22 -08004343 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004344 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004345 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4346
Oleg Nesterov15544202013-07-23 17:25:57 +02004347 iter->tr = tr;
4348 iter->trace_buffer = &tr->trace_buffer;
4349 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004350 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004351 filp->private_data = iter;
4352
Steven Rostedt107bad82008-05-12 21:21:01 +02004353 if (iter->trace->pipe_open)
4354 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004355
Arnd Bergmannb4447862010-07-07 23:40:11 +02004356 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004357out:
4358 mutex_unlock(&trace_types_lock);
4359 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004360
4361fail:
4362 kfree(iter->trace);
4363 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004364 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004365 mutex_unlock(&trace_types_lock);
4366 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004367}
4368
4369static int tracing_release_pipe(struct inode *inode, struct file *file)
4370{
4371 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004372 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004373
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004374 mutex_lock(&trace_types_lock);
4375
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004376 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004377 iter->trace->pipe_close(iter);
4378
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004379 mutex_unlock(&trace_types_lock);
4380
Rusty Russell44623442009-01-01 10:12:23 +10304381 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004382 mutex_destroy(&iter->mutex);
4383 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004384 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004385
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004386 trace_array_put(tr);
4387
Steven Rostedtb3806b42008-05-12 21:20:46 +02004388 return 0;
4389}
4390
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004391static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004392trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004393{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004394 /* Iterators are static, they should be filled or empty */
4395 if (trace_buffer_iter(iter, iter->cpu_file))
4396 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004397
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004398 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004399 /*
4400 * Always select as readable when in blocking mode
4401 */
4402 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004403 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004404 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004405 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004406}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004407
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004408static unsigned int
4409tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4410{
4411 struct trace_iterator *iter = filp->private_data;
4412
4413 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004414}
4415
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004416/* Must be called with trace_types_lock mutex held. */
4417static int tracing_wait_pipe(struct file *filp)
4418{
4419 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004420 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004421
4422 while (trace_empty(iter)) {
4423
4424 if ((filp->f_flags & O_NONBLOCK)) {
4425 return -EAGAIN;
4426 }
4427
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004428 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004429 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004430 * We still block if tracing is disabled, but we have never
4431 * read anything. This allows a user to cat this file, and
4432 * then enable tracing. But after we have read something,
4433 * we give an EOF when tracing is again disabled.
4434 *
4435 * iter->pos will be 0 if we haven't read anything.
4436 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004437 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004438 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004439
4440 mutex_unlock(&iter->mutex);
4441
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004442 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004443
4444 mutex_lock(&iter->mutex);
4445
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004446 if (ret)
4447 return ret;
4448
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004449 if (signal_pending(current))
4450 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004451 }
4452
4453 return 1;
4454}
4455
Steven Rostedtb3806b42008-05-12 21:20:46 +02004456/*
4457 * Consumer reader.
4458 */
4459static ssize_t
4460tracing_read_pipe(struct file *filp, char __user *ubuf,
4461 size_t cnt, loff_t *ppos)
4462{
4463 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004464 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004465 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004466
4467 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004468 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4469 if (sret != -EBUSY)
4470 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004471
Steven Rostedtf9520752009-03-02 14:04:40 -05004472 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004473
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004474 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004475 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004476 if (unlikely(iter->trace->name != tr->current_trace->name))
4477 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004478 mutex_unlock(&trace_types_lock);
4479
4480 /*
4481 * Avoid more than one consumer on a single file descriptor
4482 * This is just a matter of traces coherency, the ring buffer itself
4483 * is protected.
4484 */
4485 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004486 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004487 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4488 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004489 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004490 }
4491
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004492waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004493 sret = tracing_wait_pipe(filp);
4494 if (sret <= 0)
4495 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004496
4497 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004498 if (trace_empty(iter)) {
4499 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004500 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004501 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004502
4503 if (cnt >= PAGE_SIZE)
4504 cnt = PAGE_SIZE - 1;
4505
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004506 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004507 memset(&iter->seq, 0,
4508 sizeof(struct trace_iterator) -
4509 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004510 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004511 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004512
Lai Jiangshan4f535962009-05-18 19:35:34 +08004513 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004514 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004515 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004516 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004517 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004518
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004519 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004520 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004521 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004522 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004523 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004524 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004525 if (ret != TRACE_TYPE_NO_CONSUME)
4526 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004527
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004528 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004529 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004530
4531 /*
4532 * Setting the full flag means we reached the trace_seq buffer
4533 * size and we should leave by partial output condition above.
4534 * One of the trace_seq_* functions is not used properly.
4535 */
4536 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4537 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004538 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004539 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004540 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004541
Steven Rostedtb3806b42008-05-12 21:20:46 +02004542 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004543 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004544 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004545 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004546
4547 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004548 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004549 * entries, go back to wait for more entries.
4550 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004551 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004552 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004553
Steven Rostedt107bad82008-05-12 21:21:01 +02004554out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004555 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004556
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004557 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004558}
4559
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004560static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4561 unsigned int idx)
4562{
4563 __free_page(spd->pages[idx]);
4564}
4565
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004566static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004567 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004568 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004569 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004570 .steal = generic_pipe_buf_steal,
4571 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004572};
4573
Steven Rostedt34cd4992009-02-09 12:06:29 -05004574static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004575tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004576{
4577 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004578 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004579 int ret;
4580
4581 /* Seq buffer is page-sized, exactly what we need. */
4582 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004583 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004584 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004585
4586 if (trace_seq_has_overflowed(&iter->seq)) {
4587 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004588 break;
4589 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004590
4591 /*
4592 * This should not be hit, because it should only
4593 * be set if the iter->seq overflowed. But check it
4594 * anyway to be safe.
4595 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05004596 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004597 iter->seq.seq.len = save_len;
4598 break;
4599 }
4600
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004601 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004602 if (rem < count) {
4603 rem = 0;
4604 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004605 break;
4606 }
4607
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004608 if (ret != TRACE_TYPE_NO_CONSUME)
4609 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004610 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004611 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004612 rem = 0;
4613 iter->ent = NULL;
4614 break;
4615 }
4616 }
4617
4618 return rem;
4619}
4620
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004621static ssize_t tracing_splice_read_pipe(struct file *filp,
4622 loff_t *ppos,
4623 struct pipe_inode_info *pipe,
4624 size_t len,
4625 unsigned int flags)
4626{
Jens Axboe35f3d142010-05-20 10:43:18 +02004627 struct page *pages_def[PIPE_DEF_BUFFERS];
4628 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004629 struct trace_iterator *iter = filp->private_data;
4630 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004631 .pages = pages_def,
4632 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004633 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004634 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004635 .flags = flags,
4636 .ops = &tracing_pipe_buf_ops,
4637 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004638 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004639 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004640 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004641 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004642 unsigned int i;
4643
Jens Axboe35f3d142010-05-20 10:43:18 +02004644 if (splice_grow_spd(pipe, &spd))
4645 return -ENOMEM;
4646
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004647 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004648 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004649 if (unlikely(iter->trace->name != tr->current_trace->name))
4650 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004651 mutex_unlock(&trace_types_lock);
4652
4653 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004654
4655 if (iter->trace->splice_read) {
4656 ret = iter->trace->splice_read(iter, filp,
4657 ppos, pipe, len, flags);
4658 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004659 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004660 }
4661
4662 ret = tracing_wait_pipe(filp);
4663 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004664 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004665
Jason Wessel955b61e2010-08-05 09:22:23 -05004666 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004667 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004668 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004669 }
4670
Lai Jiangshan4f535962009-05-18 19:35:34 +08004671 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004672 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004673
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004674 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004675 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004676 spd.pages[i] = alloc_page(GFP_KERNEL);
4677 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004678 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004679
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004680 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004681
4682 /* Copy the data into the page, so we can start over. */
4683 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004684 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004685 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004686 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004687 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004688 break;
4689 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004690 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004691 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004692
Steven Rostedtf9520752009-03-02 14:04:40 -05004693 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004694 }
4695
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004696 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004697 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004698 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004699
4700 spd.nr_pages = i;
4701
Jens Axboe35f3d142010-05-20 10:43:18 +02004702 ret = splice_to_pipe(pipe, &spd);
4703out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004704 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004705 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004706
Steven Rostedt34cd4992009-02-09 12:06:29 -05004707out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004708 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004709 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004710}
4711
Steven Rostedta98a3c32008-05-12 21:20:59 +02004712static ssize_t
4713tracing_entries_read(struct file *filp, char __user *ubuf,
4714 size_t cnt, loff_t *ppos)
4715{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004716 struct inode *inode = file_inode(filp);
4717 struct trace_array *tr = inode->i_private;
4718 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004719 char buf[64];
4720 int r = 0;
4721 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004722
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004723 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004724
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004725 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004726 int cpu, buf_size_same;
4727 unsigned long size;
4728
4729 size = 0;
4730 buf_size_same = 1;
4731 /* check if all cpu sizes are same */
4732 for_each_tracing_cpu(cpu) {
4733 /* fill in the size from first enabled cpu */
4734 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004735 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4736 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004737 buf_size_same = 0;
4738 break;
4739 }
4740 }
4741
4742 if (buf_size_same) {
4743 if (!ring_buffer_expanded)
4744 r = sprintf(buf, "%lu (expanded: %lu)\n",
4745 size >> 10,
4746 trace_buf_size >> 10);
4747 else
4748 r = sprintf(buf, "%lu\n", size >> 10);
4749 } else
4750 r = sprintf(buf, "X\n");
4751 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004752 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004753
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004754 mutex_unlock(&trace_types_lock);
4755
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004756 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4757 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004758}
4759
4760static ssize_t
4761tracing_entries_write(struct file *filp, const char __user *ubuf,
4762 size_t cnt, loff_t *ppos)
4763{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004764 struct inode *inode = file_inode(filp);
4765 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004766 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004767 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004768
Peter Huewe22fe9b52011-06-07 21:58:27 +02004769 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4770 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004771 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004772
4773 /* must have at least 1 entry */
4774 if (!val)
4775 return -EINVAL;
4776
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004777 /* value is in KB */
4778 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004779 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004780 if (ret < 0)
4781 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004782
Jiri Olsacf8517c2009-10-23 19:36:16 -04004783 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004784
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004785 return cnt;
4786}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004787
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004788static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004789tracing_total_entries_read(struct file *filp, char __user *ubuf,
4790 size_t cnt, loff_t *ppos)
4791{
4792 struct trace_array *tr = filp->private_data;
4793 char buf[64];
4794 int r, cpu;
4795 unsigned long size = 0, expanded_size = 0;
4796
4797 mutex_lock(&trace_types_lock);
4798 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004799 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004800 if (!ring_buffer_expanded)
4801 expanded_size += trace_buf_size >> 10;
4802 }
4803 if (ring_buffer_expanded)
4804 r = sprintf(buf, "%lu\n", size);
4805 else
4806 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4807 mutex_unlock(&trace_types_lock);
4808
4809 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4810}
4811
4812static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004813tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4814 size_t cnt, loff_t *ppos)
4815{
4816 /*
4817 * There is no need to read what the user has written, this function
4818 * is just to make sure that there is no error when "echo" is used
4819 */
4820
4821 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004822
4823 return cnt;
4824}
4825
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004826static int
4827tracing_free_buffer_release(struct inode *inode, struct file *filp)
4828{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004829 struct trace_array *tr = inode->i_private;
4830
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004831 /* disable tracing ? */
4832 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004833 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004834 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004835 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004836
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004837 trace_array_put(tr);
4838
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004839 return 0;
4840}
4841
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004842static ssize_t
4843tracing_mark_write(struct file *filp, const char __user *ubuf,
4844 size_t cnt, loff_t *fpos)
4845{
Steven Rostedtd696b582011-09-22 11:50:27 -04004846 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004847 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004848 struct ring_buffer_event *event;
4849 struct ring_buffer *buffer;
4850 struct print_entry *entry;
4851 unsigned long irq_flags;
4852 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004853 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004854 int nr_pages = 1;
4855 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004856 int offset;
4857 int size;
4858 int len;
4859 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004860 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004861
Steven Rostedtc76f0692008-11-07 22:36:02 -05004862 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004863 return -EINVAL;
4864
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004865 if (!(trace_flags & TRACE_ITER_MARKERS))
4866 return -EINVAL;
4867
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004868 if (cnt > TRACE_BUF_SIZE)
4869 cnt = TRACE_BUF_SIZE;
4870
Steven Rostedtd696b582011-09-22 11:50:27 -04004871 /*
4872 * Userspace is injecting traces into the kernel trace buffer.
4873 * We want to be as non intrusive as possible.
4874 * To do so, we do not want to allocate any special buffers
4875 * or take any locks, but instead write the userspace data
4876 * straight into the ring buffer.
4877 *
4878 * First we need to pin the userspace buffer into memory,
4879 * which, most likely it is, because it just referenced it.
4880 * But there's no guarantee that it is. By using get_user_pages_fast()
4881 * and kmap_atomic/kunmap_atomic() we can get access to the
4882 * pages directly. We then write the data directly into the
4883 * ring buffer.
4884 */
4885 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004886
Steven Rostedtd696b582011-09-22 11:50:27 -04004887 /* check if we cross pages */
4888 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4889 nr_pages = 2;
4890
4891 offset = addr & (PAGE_SIZE - 1);
4892 addr &= PAGE_MASK;
4893
4894 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4895 if (ret < nr_pages) {
4896 while (--ret >= 0)
4897 put_page(pages[ret]);
4898 written = -EFAULT;
4899 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004900 }
4901
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004902 for (i = 0; i < nr_pages; i++)
4903 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004904
4905 local_save_flags(irq_flags);
4906 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004907 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004908 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4909 irq_flags, preempt_count());
4910 if (!event) {
4911 /* Ring buffer disabled, return as if not open for write */
4912 written = -EBADF;
4913 goto out_unlock;
4914 }
4915
4916 entry = ring_buffer_event_data(event);
4917 entry->ip = _THIS_IP_;
4918
4919 if (nr_pages == 2) {
4920 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004921 memcpy(&entry->buf, map_page[0] + offset, len);
4922 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004923 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004924 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004925
4926 if (entry->buf[cnt - 1] != '\n') {
4927 entry->buf[cnt] = '\n';
4928 entry->buf[cnt + 1] = '\0';
4929 } else
4930 entry->buf[cnt] = '\0';
4931
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004932 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004933
4934 written = cnt;
4935
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004936 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004937
Steven Rostedtd696b582011-09-22 11:50:27 -04004938 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004939 for (i = 0; i < nr_pages; i++){
4940 kunmap_atomic(map_page[i]);
4941 put_page(pages[i]);
4942 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004943 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004944 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004945}
4946
Li Zefan13f16d22009-12-08 11:16:11 +08004947static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004948{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004949 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004950 int i;
4951
4952 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004953 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004954 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004955 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4956 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004957 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004958
Li Zefan13f16d22009-12-08 11:16:11 +08004959 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004960}
4961
Steven Rostedte1e232c2014-02-10 23:38:46 -05004962static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004963{
Zhaolei5079f322009-08-25 16:12:56 +08004964 int i;
4965
Zhaolei5079f322009-08-25 16:12:56 +08004966 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4967 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4968 break;
4969 }
4970 if (i == ARRAY_SIZE(trace_clocks))
4971 return -EINVAL;
4972
Zhaolei5079f322009-08-25 16:12:56 +08004973 mutex_lock(&trace_types_lock);
4974
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004975 tr->clock_id = i;
4976
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004977 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004978
David Sharp60303ed2012-10-11 16:27:52 -07004979 /*
4980 * New clock may not be consistent with the previous clock.
4981 * Reset the buffer so that it doesn't have incomparable timestamps.
4982 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004983 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004984
4985#ifdef CONFIG_TRACER_MAX_TRACE
4986 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4987 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004988 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004989#endif
David Sharp60303ed2012-10-11 16:27:52 -07004990
Zhaolei5079f322009-08-25 16:12:56 +08004991 mutex_unlock(&trace_types_lock);
4992
Steven Rostedte1e232c2014-02-10 23:38:46 -05004993 return 0;
4994}
4995
4996static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4997 size_t cnt, loff_t *fpos)
4998{
4999 struct seq_file *m = filp->private_data;
5000 struct trace_array *tr = m->private;
5001 char buf[64];
5002 const char *clockstr;
5003 int ret;
5004
5005 if (cnt >= sizeof(buf))
5006 return -EINVAL;
5007
5008 if (copy_from_user(&buf, ubuf, cnt))
5009 return -EFAULT;
5010
5011 buf[cnt] = 0;
5012
5013 clockstr = strstrip(buf);
5014
5015 ret = tracing_set_clock(tr, clockstr);
5016 if (ret)
5017 return ret;
5018
Zhaolei5079f322009-08-25 16:12:56 +08005019 *fpos += cnt;
5020
5021 return cnt;
5022}
5023
Li Zefan13f16d22009-12-08 11:16:11 +08005024static int tracing_clock_open(struct inode *inode, struct file *file)
5025{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005026 struct trace_array *tr = inode->i_private;
5027 int ret;
5028
Li Zefan13f16d22009-12-08 11:16:11 +08005029 if (tracing_disabled)
5030 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005031
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005032 if (trace_array_get(tr))
5033 return -ENODEV;
5034
5035 ret = single_open(file, tracing_clock_show, inode->i_private);
5036 if (ret < 0)
5037 trace_array_put(tr);
5038
5039 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005040}
5041
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005042struct ftrace_buffer_info {
5043 struct trace_iterator iter;
5044 void *spare;
5045 unsigned int read;
5046};
5047
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005048#ifdef CONFIG_TRACER_SNAPSHOT
5049static int tracing_snapshot_open(struct inode *inode, struct file *file)
5050{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005051 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005052 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005053 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005054 int ret = 0;
5055
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005056 if (trace_array_get(tr) < 0)
5057 return -ENODEV;
5058
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005059 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005060 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005061 if (IS_ERR(iter))
5062 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005063 } else {
5064 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005065 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005066 m = kzalloc(sizeof(*m), GFP_KERNEL);
5067 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005068 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005069 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5070 if (!iter) {
5071 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005072 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005073 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005074 ret = 0;
5075
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005076 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005077 iter->trace_buffer = &tr->max_buffer;
5078 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005079 m->private = iter;
5080 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005081 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005082out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005083 if (ret < 0)
5084 trace_array_put(tr);
5085
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005086 return ret;
5087}
5088
5089static ssize_t
5090tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5091 loff_t *ppos)
5092{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005093 struct seq_file *m = filp->private_data;
5094 struct trace_iterator *iter = m->private;
5095 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005096 unsigned long val;
5097 int ret;
5098
5099 ret = tracing_update_buffers();
5100 if (ret < 0)
5101 return ret;
5102
5103 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5104 if (ret)
5105 return ret;
5106
5107 mutex_lock(&trace_types_lock);
5108
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005109 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005110 ret = -EBUSY;
5111 goto out;
5112 }
5113
5114 switch (val) {
5115 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005116 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5117 ret = -EINVAL;
5118 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005119 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005120 if (tr->allocated_snapshot)
5121 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005122 break;
5123 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005124/* Only allow per-cpu swap if the ring buffer supports it */
5125#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5126 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5127 ret = -EINVAL;
5128 break;
5129 }
5130#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005131 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005132 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005133 if (ret < 0)
5134 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005135 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005136 local_irq_disable();
5137 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005138 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005139 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005140 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005141 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005142 local_irq_enable();
5143 break;
5144 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005145 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005146 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5147 tracing_reset_online_cpus(&tr->max_buffer);
5148 else
5149 tracing_reset(&tr->max_buffer, iter->cpu_file);
5150 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005151 break;
5152 }
5153
5154 if (ret >= 0) {
5155 *ppos += cnt;
5156 ret = cnt;
5157 }
5158out:
5159 mutex_unlock(&trace_types_lock);
5160 return ret;
5161}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005162
5163static int tracing_snapshot_release(struct inode *inode, struct file *file)
5164{
5165 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005166 int ret;
5167
5168 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005169
5170 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005171 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005172
5173 /* If write only, the seq_file is just a stub */
5174 if (m)
5175 kfree(m->private);
5176 kfree(m);
5177
5178 return 0;
5179}
5180
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005181static int tracing_buffers_open(struct inode *inode, struct file *filp);
5182static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5183 size_t count, loff_t *ppos);
5184static int tracing_buffers_release(struct inode *inode, struct file *file);
5185static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5186 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5187
5188static int snapshot_raw_open(struct inode *inode, struct file *filp)
5189{
5190 struct ftrace_buffer_info *info;
5191 int ret;
5192
5193 ret = tracing_buffers_open(inode, filp);
5194 if (ret < 0)
5195 return ret;
5196
5197 info = filp->private_data;
5198
5199 if (info->iter.trace->use_max_tr) {
5200 tracing_buffers_release(inode, filp);
5201 return -EBUSY;
5202 }
5203
5204 info->iter.snapshot = true;
5205 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5206
5207 return ret;
5208}
5209
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005210#endif /* CONFIG_TRACER_SNAPSHOT */
5211
5212
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005213static const struct file_operations tracing_thresh_fops = {
5214 .open = tracing_open_generic,
5215 .read = tracing_thresh_read,
5216 .write = tracing_thresh_write,
5217 .llseek = generic_file_llseek,
5218};
5219
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005220static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005221 .open = tracing_open_generic,
5222 .read = tracing_max_lat_read,
5223 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005224 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005225};
5226
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005227static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005228 .open = tracing_open_generic,
5229 .read = tracing_set_trace_read,
5230 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005231 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005232};
5233
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005234static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005235 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005236 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005237 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005238 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005239 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005240 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005241};
5242
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005243static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005244 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005245 .read = tracing_entries_read,
5246 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005247 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005248 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005249};
5250
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005251static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005252 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005253 .read = tracing_total_entries_read,
5254 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005255 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005256};
5257
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005258static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005259 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005260 .write = tracing_free_buffer_write,
5261 .release = tracing_free_buffer_release,
5262};
5263
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005264static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005265 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005266 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005267 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005268 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005269};
5270
Zhaolei5079f322009-08-25 16:12:56 +08005271static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005272 .open = tracing_clock_open,
5273 .read = seq_read,
5274 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005275 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005276 .write = tracing_clock_write,
5277};
5278
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005279#ifdef CONFIG_TRACER_SNAPSHOT
5280static const struct file_operations snapshot_fops = {
5281 .open = tracing_snapshot_open,
5282 .read = seq_read,
5283 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005284 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005285 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005286};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005287
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005288static const struct file_operations snapshot_raw_fops = {
5289 .open = snapshot_raw_open,
5290 .read = tracing_buffers_read,
5291 .release = tracing_buffers_release,
5292 .splice_read = tracing_buffers_splice_read,
5293 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005294};
5295
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005296#endif /* CONFIG_TRACER_SNAPSHOT */
5297
Steven Rostedt2cadf912008-12-01 22:20:19 -05005298static int tracing_buffers_open(struct inode *inode, struct file *filp)
5299{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005300 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005301 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005302 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005303
5304 if (tracing_disabled)
5305 return -ENODEV;
5306
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005307 if (trace_array_get(tr) < 0)
5308 return -ENODEV;
5309
Steven Rostedt2cadf912008-12-01 22:20:19 -05005310 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005311 if (!info) {
5312 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005313 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005314 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005315
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005316 mutex_lock(&trace_types_lock);
5317
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005318 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005319 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005320 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005321 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005322 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005323 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005324 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005325
5326 filp->private_data = info;
5327
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005328 mutex_unlock(&trace_types_lock);
5329
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005330 ret = nonseekable_open(inode, filp);
5331 if (ret < 0)
5332 trace_array_put(tr);
5333
5334 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005335}
5336
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005337static unsigned int
5338tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5339{
5340 struct ftrace_buffer_info *info = filp->private_data;
5341 struct trace_iterator *iter = &info->iter;
5342
5343 return trace_poll(iter, filp, poll_table);
5344}
5345
Steven Rostedt2cadf912008-12-01 22:20:19 -05005346static ssize_t
5347tracing_buffers_read(struct file *filp, char __user *ubuf,
5348 size_t count, loff_t *ppos)
5349{
5350 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005351 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005352 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005353 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005354
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005355 if (!count)
5356 return 0;
5357
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005358 mutex_lock(&trace_types_lock);
5359
5360#ifdef CONFIG_TRACER_MAX_TRACE
5361 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5362 size = -EBUSY;
5363 goto out_unlock;
5364 }
5365#endif
5366
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005367 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005368 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5369 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005370 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005371 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005372 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005373
Steven Rostedt2cadf912008-12-01 22:20:19 -05005374 /* Do we have previous read data to read? */
5375 if (info->read < PAGE_SIZE)
5376 goto read;
5377
Steven Rostedtb6273442013-02-28 13:44:11 -05005378 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005379 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005380 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005381 &info->spare,
5382 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005383 iter->cpu_file, 0);
5384 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005385
5386 if (ret < 0) {
5387 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005388 if ((filp->f_flags & O_NONBLOCK)) {
5389 size = -EAGAIN;
5390 goto out_unlock;
5391 }
5392 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005393 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005394 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005395 if (ret) {
5396 size = ret;
5397 goto out_unlock;
5398 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005399 if (signal_pending(current)) {
5400 size = -EINTR;
5401 goto out_unlock;
5402 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005403 goto again;
5404 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005405 size = 0;
5406 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005407 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005408
Steven Rostedt436fc282011-10-14 10:44:25 -04005409 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005410 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005411 size = PAGE_SIZE - info->read;
5412 if (size > count)
5413 size = count;
5414
5415 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005416 if (ret == size) {
5417 size = -EFAULT;
5418 goto out_unlock;
5419 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005420 size -= ret;
5421
Steven Rostedt2cadf912008-12-01 22:20:19 -05005422 *ppos += size;
5423 info->read += size;
5424
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005425 out_unlock:
5426 mutex_unlock(&trace_types_lock);
5427
Steven Rostedt2cadf912008-12-01 22:20:19 -05005428 return size;
5429}
5430
5431static int tracing_buffers_release(struct inode *inode, struct file *file)
5432{
5433 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005434 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005435
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005436 mutex_lock(&trace_types_lock);
5437
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005438 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005439
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005440 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005441 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005442 kfree(info);
5443
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005444 mutex_unlock(&trace_types_lock);
5445
Steven Rostedt2cadf912008-12-01 22:20:19 -05005446 return 0;
5447}
5448
5449struct buffer_ref {
5450 struct ring_buffer *buffer;
5451 void *page;
5452 int ref;
5453};
5454
5455static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5456 struct pipe_buffer *buf)
5457{
5458 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5459
5460 if (--ref->ref)
5461 return;
5462
5463 ring_buffer_free_read_page(ref->buffer, ref->page);
5464 kfree(ref);
5465 buf->private = 0;
5466}
5467
Steven Rostedt2cadf912008-12-01 22:20:19 -05005468static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5469 struct pipe_buffer *buf)
5470{
5471 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5472
5473 ref->ref++;
5474}
5475
5476/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005477static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005478 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005479 .confirm = generic_pipe_buf_confirm,
5480 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005481 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005482 .get = buffer_pipe_buf_get,
5483};
5484
5485/*
5486 * Callback from splice_to_pipe(), if we need to release some pages
5487 * at the end of the spd in case we error'ed out in filling the pipe.
5488 */
5489static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5490{
5491 struct buffer_ref *ref =
5492 (struct buffer_ref *)spd->partial[i].private;
5493
5494 if (--ref->ref)
5495 return;
5496
5497 ring_buffer_free_read_page(ref->buffer, ref->page);
5498 kfree(ref);
5499 spd->partial[i].private = 0;
5500}
5501
5502static ssize_t
5503tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5504 struct pipe_inode_info *pipe, size_t len,
5505 unsigned int flags)
5506{
5507 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005508 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005509 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5510 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005511 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005512 .pages = pages_def,
5513 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005514 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005515 .flags = flags,
5516 .ops = &buffer_pipe_buf_ops,
5517 .spd_release = buffer_spd_release,
5518 };
5519 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005520 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005521 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005522
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005523 mutex_lock(&trace_types_lock);
5524
5525#ifdef CONFIG_TRACER_MAX_TRACE
5526 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5527 ret = -EBUSY;
5528 goto out;
5529 }
5530#endif
5531
5532 if (splice_grow_spd(pipe, &spd)) {
5533 ret = -ENOMEM;
5534 goto out;
5535 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005536
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005537 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005538 ret = -EINVAL;
5539 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005540 }
5541
5542 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005543 if (len < PAGE_SIZE) {
5544 ret = -EINVAL;
5545 goto out;
5546 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005547 len &= PAGE_MASK;
5548 }
5549
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005550 again:
5551 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005552 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005553
Al Viroa786c062014-04-11 12:01:03 -04005554 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005555 struct page *page;
5556 int r;
5557
5558 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5559 if (!ref)
5560 break;
5561
Steven Rostedt7267fa62009-04-29 00:16:21 -04005562 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005563 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005564 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005565 if (!ref->page) {
5566 kfree(ref);
5567 break;
5568 }
5569
5570 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005571 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005572 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005573 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005574 kfree(ref);
5575 break;
5576 }
5577
5578 /*
5579 * zero out any left over data, this is going to
5580 * user land.
5581 */
5582 size = ring_buffer_page_len(ref->page);
5583 if (size < PAGE_SIZE)
5584 memset(ref->page + size, 0, PAGE_SIZE - size);
5585
5586 page = virt_to_page(ref->page);
5587
5588 spd.pages[i] = page;
5589 spd.partial[i].len = PAGE_SIZE;
5590 spd.partial[i].offset = 0;
5591 spd.partial[i].private = (unsigned long)ref;
5592 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005593 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005594
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005595 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005596 }
5597
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005598 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005599 spd.nr_pages = i;
5600
5601 /* did we read anything? */
5602 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005603 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005604 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005605 goto out;
5606 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005607 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005608 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005609 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005610 if (ret)
5611 goto out;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005612 if (signal_pending(current)) {
5613 ret = -EINTR;
5614 goto out;
5615 }
5616 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005617 }
5618
5619 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005620 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005621out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005622 mutex_unlock(&trace_types_lock);
5623
Steven Rostedt2cadf912008-12-01 22:20:19 -05005624 return ret;
5625}
5626
5627static const struct file_operations tracing_buffers_fops = {
5628 .open = tracing_buffers_open,
5629 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005630 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005631 .release = tracing_buffers_release,
5632 .splice_read = tracing_buffers_splice_read,
5633 .llseek = no_llseek,
5634};
5635
Steven Rostedtc8d77182009-04-29 18:03:45 -04005636static ssize_t
5637tracing_stats_read(struct file *filp, char __user *ubuf,
5638 size_t count, loff_t *ppos)
5639{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005640 struct inode *inode = file_inode(filp);
5641 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005642 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005643 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005644 struct trace_seq *s;
5645 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005646 unsigned long long t;
5647 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005648
Li Zefane4f2d102009-06-15 10:57:28 +08005649 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005650 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005651 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005652
5653 trace_seq_init(s);
5654
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005655 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005656 trace_seq_printf(s, "entries: %ld\n", cnt);
5657
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005658 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005659 trace_seq_printf(s, "overrun: %ld\n", cnt);
5660
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005661 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005662 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5663
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005664 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005665 trace_seq_printf(s, "bytes: %ld\n", cnt);
5666
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005667 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005668 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005669 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005670 usec_rem = do_div(t, USEC_PER_SEC);
5671 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5672 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005673
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005674 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005675 usec_rem = do_div(t, USEC_PER_SEC);
5676 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5677 } else {
5678 /* counter or tsc mode for trace_clock */
5679 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005680 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005681
5682 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005683 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005684 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005685
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005686 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005687 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5688
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005689 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005690 trace_seq_printf(s, "read events: %ld\n", cnt);
5691
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005692 count = simple_read_from_buffer(ubuf, count, ppos,
5693 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04005694
5695 kfree(s);
5696
5697 return count;
5698}
5699
5700static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005701 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005702 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005703 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005704 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005705};
5706
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005707#ifdef CONFIG_DYNAMIC_FTRACE
5708
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005709int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005710{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005711 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005712}
5713
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005714static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005715tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005716 size_t cnt, loff_t *ppos)
5717{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005718 static char ftrace_dyn_info_buffer[1024];
5719 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005720 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005721 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005722 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005723 int r;
5724
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005725 mutex_lock(&dyn_info_mutex);
5726 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005727
Steven Rostedta26a2a22008-10-31 00:03:22 -04005728 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005729 buf[r++] = '\n';
5730
5731 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5732
5733 mutex_unlock(&dyn_info_mutex);
5734
5735 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005736}
5737
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005738static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005739 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005740 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005741 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005742};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005743#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005744
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005745#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5746static void
5747ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005748{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005749 tracing_snapshot();
5750}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005751
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005752static void
5753ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5754{
5755 unsigned long *count = (long *)data;
5756
5757 if (!*count)
5758 return;
5759
5760 if (*count != -1)
5761 (*count)--;
5762
5763 tracing_snapshot();
5764}
5765
5766static int
5767ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5768 struct ftrace_probe_ops *ops, void *data)
5769{
5770 long count = (long)data;
5771
5772 seq_printf(m, "%ps:", (void *)ip);
5773
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005774 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005775
5776 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005777 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005778 else
5779 seq_printf(m, ":count=%ld\n", count);
5780
5781 return 0;
5782}
5783
5784static struct ftrace_probe_ops snapshot_probe_ops = {
5785 .func = ftrace_snapshot,
5786 .print = ftrace_snapshot_print,
5787};
5788
5789static struct ftrace_probe_ops snapshot_count_probe_ops = {
5790 .func = ftrace_count_snapshot,
5791 .print = ftrace_snapshot_print,
5792};
5793
5794static int
5795ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5796 char *glob, char *cmd, char *param, int enable)
5797{
5798 struct ftrace_probe_ops *ops;
5799 void *count = (void *)-1;
5800 char *number;
5801 int ret;
5802
5803 /* hash funcs only work with set_ftrace_filter */
5804 if (!enable)
5805 return -EINVAL;
5806
5807 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5808
5809 if (glob[0] == '!') {
5810 unregister_ftrace_function_probe_func(glob+1, ops);
5811 return 0;
5812 }
5813
5814 if (!param)
5815 goto out_reg;
5816
5817 number = strsep(&param, ":");
5818
5819 if (!strlen(number))
5820 goto out_reg;
5821
5822 /*
5823 * We use the callback data field (which is a pointer)
5824 * as our counter.
5825 */
5826 ret = kstrtoul(number, 0, (unsigned long *)&count);
5827 if (ret)
5828 return ret;
5829
5830 out_reg:
5831 ret = register_ftrace_function_probe(glob, ops, count);
5832
5833 if (ret >= 0)
5834 alloc_snapshot(&global_trace);
5835
5836 return ret < 0 ? ret : 0;
5837}
5838
5839static struct ftrace_func_command ftrace_snapshot_cmd = {
5840 .name = "snapshot",
5841 .func = ftrace_trace_snapshot_callback,
5842};
5843
Tom Zanussi38de93a2013-10-24 08:34:18 -05005844static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005845{
5846 return register_ftrace_command(&ftrace_snapshot_cmd);
5847}
5848#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005849static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005850#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005851
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005852struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005853{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005854 if (tr->dir)
5855 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005856
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005857 if (!debugfs_initialized())
5858 return NULL;
5859
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005860 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5861 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005862
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005863 if (!tr->dir)
5864 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005865
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005866 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005867}
5868
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005869struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005870{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005871 return tracing_init_dentry_tr(&global_trace);
5872}
5873
5874static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5875{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005876 struct dentry *d_tracer;
5877
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005878 if (tr->percpu_dir)
5879 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005880
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005881 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005882 if (!d_tracer)
5883 return NULL;
5884
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005885 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005886
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005887 WARN_ONCE(!tr->percpu_dir,
5888 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005889
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005890 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005891}
5892
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005893static struct dentry *
5894trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5895 void *data, long cpu, const struct file_operations *fops)
5896{
5897 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5898
5899 if (ret) /* See tracing_get_cpu() */
5900 ret->d_inode->i_cdev = (void *)(cpu + 1);
5901 return ret;
5902}
5903
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005904static void
5905tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005906{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005907 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005908 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005909 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005910
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005911 if (!d_percpu)
5912 return;
5913
Steven Rostedtdd49a382010-10-20 21:51:26 -04005914 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005915 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5916 if (!d_cpu) {
5917 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5918 return;
5919 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005920
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005921 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005922 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005923 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005924
5925 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005926 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005927 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005928
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005929 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005930 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005931
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005932 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005933 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005934
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005935 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005936 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005937
5938#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005939 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005940 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005941
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005942 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005943 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005944#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005945}
5946
Steven Rostedt60a11772008-05-12 21:20:44 +02005947#ifdef CONFIG_FTRACE_SELFTEST
5948/* Let selftest have access to static functions in this file */
5949#include "trace_selftest.c"
5950#endif
5951
Steven Rostedt577b7852009-02-26 23:43:05 -05005952struct trace_option_dentry {
5953 struct tracer_opt *opt;
5954 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005955 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005956 struct dentry *entry;
5957};
5958
5959static ssize_t
5960trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5961 loff_t *ppos)
5962{
5963 struct trace_option_dentry *topt = filp->private_data;
5964 char *buf;
5965
5966 if (topt->flags->val & topt->opt->bit)
5967 buf = "1\n";
5968 else
5969 buf = "0\n";
5970
5971 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5972}
5973
5974static ssize_t
5975trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5976 loff_t *ppos)
5977{
5978 struct trace_option_dentry *topt = filp->private_data;
5979 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005980 int ret;
5981
Peter Huewe22fe9b52011-06-07 21:58:27 +02005982 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5983 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005984 return ret;
5985
Li Zefan8d18eaa2009-12-08 11:17:06 +08005986 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005987 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005988
5989 if (!!(topt->flags->val & topt->opt->bit) != val) {
5990 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005991 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005992 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005993 mutex_unlock(&trace_types_lock);
5994 if (ret)
5995 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005996 }
5997
5998 *ppos += cnt;
5999
6000 return cnt;
6001}
6002
6003
6004static const struct file_operations trace_options_fops = {
6005 .open = tracing_open_generic,
6006 .read = trace_options_read,
6007 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006008 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006009};
6010
Steven Rostedta8259072009-02-26 22:19:12 -05006011static ssize_t
6012trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6013 loff_t *ppos)
6014{
6015 long index = (long)filp->private_data;
6016 char *buf;
6017
6018 if (trace_flags & (1 << index))
6019 buf = "1\n";
6020 else
6021 buf = "0\n";
6022
6023 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6024}
6025
6026static ssize_t
6027trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6028 loff_t *ppos)
6029{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006030 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006031 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05006032 unsigned long val;
6033 int ret;
6034
Peter Huewe22fe9b52011-06-07 21:58:27 +02006035 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6036 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006037 return ret;
6038
Zhaoleif2d84b62009-08-07 18:55:48 +08006039 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006040 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006041
6042 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006043 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006044 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006045
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006046 if (ret < 0)
6047 return ret;
6048
Steven Rostedta8259072009-02-26 22:19:12 -05006049 *ppos += cnt;
6050
6051 return cnt;
6052}
6053
Steven Rostedta8259072009-02-26 22:19:12 -05006054static const struct file_operations trace_options_core_fops = {
6055 .open = tracing_open_generic,
6056 .read = trace_options_core_read,
6057 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006058 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006059};
6060
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006061struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006062 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006063 struct dentry *parent,
6064 void *data,
6065 const struct file_operations *fops)
6066{
6067 struct dentry *ret;
6068
6069 ret = debugfs_create_file(name, mode, parent, data, fops);
6070 if (!ret)
6071 pr_warning("Could not create debugfs '%s' entry\n", name);
6072
6073 return ret;
6074}
6075
6076
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006077static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006078{
6079 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006080
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006081 if (tr->options)
6082 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006083
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006084 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006085 if (!d_tracer)
6086 return NULL;
6087
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006088 tr->options = debugfs_create_dir("options", d_tracer);
6089 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006090 pr_warning("Could not create debugfs directory 'options'\n");
6091 return NULL;
6092 }
6093
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006094 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006095}
6096
Steven Rostedt577b7852009-02-26 23:43:05 -05006097static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006098create_trace_option_file(struct trace_array *tr,
6099 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006100 struct tracer_flags *flags,
6101 struct tracer_opt *opt)
6102{
6103 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006104
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006105 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006106 if (!t_options)
6107 return;
6108
6109 topt->flags = flags;
6110 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006111 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006112
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006113 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006114 &trace_options_fops);
6115
Steven Rostedt577b7852009-02-26 23:43:05 -05006116}
6117
6118static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006119create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006120{
6121 struct trace_option_dentry *topts;
6122 struct tracer_flags *flags;
6123 struct tracer_opt *opts;
6124 int cnt;
6125
6126 if (!tracer)
6127 return NULL;
6128
6129 flags = tracer->flags;
6130
6131 if (!flags || !flags->opts)
6132 return NULL;
6133
6134 opts = flags->opts;
6135
6136 for (cnt = 0; opts[cnt].name; cnt++)
6137 ;
6138
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006139 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006140 if (!topts)
6141 return NULL;
6142
6143 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006144 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006145 &opts[cnt]);
6146
6147 return topts;
6148}
6149
6150static void
6151destroy_trace_option_files(struct trace_option_dentry *topts)
6152{
6153 int cnt;
6154
6155 if (!topts)
6156 return;
6157
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006158 for (cnt = 0; topts[cnt].opt; cnt++)
6159 debugfs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006160
6161 kfree(topts);
6162}
6163
Steven Rostedta8259072009-02-26 22:19:12 -05006164static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006165create_trace_option_core_file(struct trace_array *tr,
6166 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006167{
6168 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006169
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006170 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006171 if (!t_options)
6172 return NULL;
6173
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006174 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006175 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006176}
6177
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006178static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006179{
6180 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006181 int i;
6182
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006183 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006184 if (!t_options)
6185 return;
6186
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006187 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006188 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006189}
6190
Steven Rostedt499e5472012-02-22 15:50:28 -05006191static ssize_t
6192rb_simple_read(struct file *filp, char __user *ubuf,
6193 size_t cnt, loff_t *ppos)
6194{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006195 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006196 char buf[64];
6197 int r;
6198
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006199 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006200 r = sprintf(buf, "%d\n", r);
6201
6202 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6203}
6204
6205static ssize_t
6206rb_simple_write(struct file *filp, const char __user *ubuf,
6207 size_t cnt, loff_t *ppos)
6208{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006209 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006210 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006211 unsigned long val;
6212 int ret;
6213
6214 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6215 if (ret)
6216 return ret;
6217
6218 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006219 mutex_lock(&trace_types_lock);
6220 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006221 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006222 if (tr->current_trace->start)
6223 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006224 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006225 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006226 if (tr->current_trace->stop)
6227 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006228 }
6229 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006230 }
6231
6232 (*ppos)++;
6233
6234 return cnt;
6235}
6236
6237static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006238 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006239 .read = rb_simple_read,
6240 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006241 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006242 .llseek = default_llseek,
6243};
6244
Steven Rostedt277ba042012-08-03 16:10:49 -04006245struct dentry *trace_instance_dir;
6246
6247static void
6248init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6249
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006250static int
6251allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006252{
6253 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006254
6255 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6256
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006257 buf->tr = tr;
6258
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006259 buf->buffer = ring_buffer_alloc(size, rb_flags);
6260 if (!buf->buffer)
6261 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006262
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006263 buf->data = alloc_percpu(struct trace_array_cpu);
6264 if (!buf->data) {
6265 ring_buffer_free(buf->buffer);
6266 return -ENOMEM;
6267 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006268
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006269 /* Allocate the first page for all buffers */
6270 set_buffer_entries(&tr->trace_buffer,
6271 ring_buffer_size(tr->trace_buffer.buffer, 0));
6272
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006273 return 0;
6274}
6275
6276static int allocate_trace_buffers(struct trace_array *tr, int size)
6277{
6278 int ret;
6279
6280 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6281 if (ret)
6282 return ret;
6283
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006284#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006285 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6286 allocate_snapshot ? size : 1);
6287 if (WARN_ON(ret)) {
6288 ring_buffer_free(tr->trace_buffer.buffer);
6289 free_percpu(tr->trace_buffer.data);
6290 return -ENOMEM;
6291 }
6292 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006293
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006294 /*
6295 * Only the top level trace array gets its snapshot allocated
6296 * from the kernel command line.
6297 */
6298 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006299#endif
6300 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006301}
6302
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006303static void free_trace_buffer(struct trace_buffer *buf)
6304{
6305 if (buf->buffer) {
6306 ring_buffer_free(buf->buffer);
6307 buf->buffer = NULL;
6308 free_percpu(buf->data);
6309 buf->data = NULL;
6310 }
6311}
6312
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006313static void free_trace_buffers(struct trace_array *tr)
6314{
6315 if (!tr)
6316 return;
6317
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006318 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006319
6320#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006321 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006322#endif
6323}
6324
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006325static int new_instance_create(const char *name)
6326{
Steven Rostedt277ba042012-08-03 16:10:49 -04006327 struct trace_array *tr;
6328 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006329
6330 mutex_lock(&trace_types_lock);
6331
6332 ret = -EEXIST;
6333 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6334 if (tr->name && strcmp(tr->name, name) == 0)
6335 goto out_unlock;
6336 }
6337
6338 ret = -ENOMEM;
6339 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6340 if (!tr)
6341 goto out_unlock;
6342
6343 tr->name = kstrdup(name, GFP_KERNEL);
6344 if (!tr->name)
6345 goto out_free_tr;
6346
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006347 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6348 goto out_free_tr;
6349
6350 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6351
Steven Rostedt277ba042012-08-03 16:10:49 -04006352 raw_spin_lock_init(&tr->start_lock);
6353
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006354 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6355
Steven Rostedt277ba042012-08-03 16:10:49 -04006356 tr->current_trace = &nop_trace;
6357
6358 INIT_LIST_HEAD(&tr->systems);
6359 INIT_LIST_HEAD(&tr->events);
6360
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006361 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006362 goto out_free_tr;
6363
Steven Rostedt277ba042012-08-03 16:10:49 -04006364 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6365 if (!tr->dir)
6366 goto out_free_tr;
6367
6368 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006369 if (ret) {
6370 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006371 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006372 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006373
6374 init_tracer_debugfs(tr, tr->dir);
6375
6376 list_add(&tr->list, &ftrace_trace_arrays);
6377
6378 mutex_unlock(&trace_types_lock);
6379
6380 return 0;
6381
6382 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006383 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006384 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006385 kfree(tr->name);
6386 kfree(tr);
6387
6388 out_unlock:
6389 mutex_unlock(&trace_types_lock);
6390
6391 return ret;
6392
6393}
6394
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006395static int instance_delete(const char *name)
6396{
6397 struct trace_array *tr;
6398 int found = 0;
6399 int ret;
6400
6401 mutex_lock(&trace_types_lock);
6402
6403 ret = -ENODEV;
6404 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6405 if (tr->name && strcmp(tr->name, name) == 0) {
6406 found = 1;
6407 break;
6408 }
6409 }
6410 if (!found)
6411 goto out_unlock;
6412
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006413 ret = -EBUSY;
6414 if (tr->ref)
6415 goto out_unlock;
6416
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006417 list_del(&tr->list);
6418
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006419 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006420 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006421 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006422 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006423 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006424
6425 kfree(tr->name);
6426 kfree(tr);
6427
6428 ret = 0;
6429
6430 out_unlock:
6431 mutex_unlock(&trace_types_lock);
6432
6433 return ret;
6434}
6435
Steven Rostedt277ba042012-08-03 16:10:49 -04006436static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6437{
6438 struct dentry *parent;
6439 int ret;
6440
6441 /* Paranoid: Make sure the parent is the "instances" directory */
6442 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6443 if (WARN_ON_ONCE(parent != trace_instance_dir))
6444 return -ENOENT;
6445
6446 /*
6447 * The inode mutex is locked, but debugfs_create_dir() will also
6448 * take the mutex. As the instances directory can not be destroyed
6449 * or changed in any other way, it is safe to unlock it, and
6450 * let the dentry try. If two users try to make the same dir at
6451 * the same time, then the new_instance_create() will determine the
6452 * winner.
6453 */
6454 mutex_unlock(&inode->i_mutex);
6455
6456 ret = new_instance_create(dentry->d_iname);
6457
6458 mutex_lock(&inode->i_mutex);
6459
6460 return ret;
6461}
6462
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006463static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6464{
6465 struct dentry *parent;
6466 int ret;
6467
6468 /* Paranoid: Make sure the parent is the "instances" directory */
6469 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6470 if (WARN_ON_ONCE(parent != trace_instance_dir))
6471 return -ENOENT;
6472
6473 /* The caller did a dget() on dentry */
6474 mutex_unlock(&dentry->d_inode->i_mutex);
6475
6476 /*
6477 * The inode mutex is locked, but debugfs_create_dir() will also
6478 * take the mutex. As the instances directory can not be destroyed
6479 * or changed in any other way, it is safe to unlock it, and
6480 * let the dentry try. If two users try to make the same dir at
6481 * the same time, then the instance_delete() will determine the
6482 * winner.
6483 */
6484 mutex_unlock(&inode->i_mutex);
6485
6486 ret = instance_delete(dentry->d_iname);
6487
6488 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6489 mutex_lock(&dentry->d_inode->i_mutex);
6490
6491 return ret;
6492}
6493
Steven Rostedt277ba042012-08-03 16:10:49 -04006494static const struct inode_operations instance_dir_inode_operations = {
6495 .lookup = simple_lookup,
6496 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006497 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006498};
6499
6500static __init void create_trace_instances(struct dentry *d_tracer)
6501{
6502 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6503 if (WARN_ON(!trace_instance_dir))
6504 return;
6505
6506 /* Hijack the dir inode operations, to allow mkdir */
6507 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6508}
6509
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006510static void
6511init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6512{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006513 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006514
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006515 trace_create_file("available_tracers", 0444, d_tracer,
6516 tr, &show_traces_fops);
6517
6518 trace_create_file("current_tracer", 0644, d_tracer,
6519 tr, &set_tracer_fops);
6520
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006521 trace_create_file("tracing_cpumask", 0644, d_tracer,
6522 tr, &tracing_cpumask_fops);
6523
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006524 trace_create_file("trace_options", 0644, d_tracer,
6525 tr, &tracing_iter_fops);
6526
6527 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006528 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006529
6530 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006531 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006532
6533 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006534 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006535
6536 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6537 tr, &tracing_total_entries_fops);
6538
Wang YanQing238ae932013-05-26 16:52:01 +08006539 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006540 tr, &tracing_free_buffer_fops);
6541
6542 trace_create_file("trace_marker", 0220, d_tracer,
6543 tr, &tracing_mark_fops);
6544
6545 trace_create_file("trace_clock", 0644, d_tracer, tr,
6546 &trace_clock_fops);
6547
6548 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006549 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006550
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006551#ifdef CONFIG_TRACER_MAX_TRACE
6552 trace_create_file("tracing_max_latency", 0644, d_tracer,
6553 &tr->max_latency, &tracing_max_lat_fops);
6554#endif
6555
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006556 if (ftrace_create_function_files(tr, d_tracer))
6557 WARN(1, "Could not allocate function filter files");
6558
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006559#ifdef CONFIG_TRACER_SNAPSHOT
6560 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006561 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006562#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006563
6564 for_each_tracing_cpu(cpu)
6565 tracing_init_debugfs_percpu(tr, cpu);
6566
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006567}
6568
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006569static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006570{
6571 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006572
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006573 trace_access_lock_init();
6574
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006575 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006576 if (!d_tracer)
6577 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006578
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006579 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006580
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006581 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006582 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006583
Li Zefan339ae5d2009-04-17 10:34:30 +08006584 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006585 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006586
Avadh Patel69abe6a2009-04-10 16:04:48 -04006587 trace_create_file("saved_cmdlines", 0444, d_tracer,
6588 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006589
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006590 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6591 NULL, &tracing_saved_cmdlines_size_fops);
6592
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006593#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006594 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6595 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006596#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006597
Steven Rostedt277ba042012-08-03 16:10:49 -04006598 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006599
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006600 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006601
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006602 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006603}
6604
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006605static int trace_panic_handler(struct notifier_block *this,
6606 unsigned long event, void *unused)
6607{
Steven Rostedt944ac422008-10-23 19:26:08 -04006608 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006609 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006610 return NOTIFY_OK;
6611}
6612
6613static struct notifier_block trace_panic_notifier = {
6614 .notifier_call = trace_panic_handler,
6615 .next = NULL,
6616 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6617};
6618
6619static int trace_die_handler(struct notifier_block *self,
6620 unsigned long val,
6621 void *data)
6622{
6623 switch (val) {
6624 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006625 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006626 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006627 break;
6628 default:
6629 break;
6630 }
6631 return NOTIFY_OK;
6632}
6633
6634static struct notifier_block trace_die_notifier = {
6635 .notifier_call = trace_die_handler,
6636 .priority = 200
6637};
6638
6639/*
6640 * printk is set to max of 1024, we really don't need it that big.
6641 * Nothing should be printing 1000 characters anyway.
6642 */
6643#define TRACE_MAX_PRINT 1000
6644
6645/*
6646 * Define here KERN_TRACE so that we have one place to modify
6647 * it if we decide to change what log level the ftrace dump
6648 * should be at.
6649 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006650#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006651
Jason Wessel955b61e2010-08-05 09:22:23 -05006652void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006653trace_printk_seq(struct trace_seq *s)
6654{
6655 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006656 if (s->seq.len >= TRACE_MAX_PRINT)
6657 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006658
6659 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006660 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006661
6662 printk(KERN_TRACE "%s", s->buffer);
6663
Steven Rostedtf9520752009-03-02 14:04:40 -05006664 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006665}
6666
Jason Wessel955b61e2010-08-05 09:22:23 -05006667void trace_init_global_iter(struct trace_iterator *iter)
6668{
6669 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006670 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006671 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006672 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006673
6674 if (iter->trace && iter->trace->open)
6675 iter->trace->open(iter);
6676
6677 /* Annotate start of buffers if we had overruns */
6678 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6679 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6680
6681 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6682 if (trace_clocks[iter->tr->clock_id].in_ns)
6683 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006684}
6685
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006686void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006687{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006688 /* use static because iter can be a bit big for the stack */
6689 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006690 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006691 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006692 unsigned long flags;
6693 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006694
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006695 /* Only allow one dump user at a time. */
6696 if (atomic_inc_return(&dump_running) != 1) {
6697 atomic_dec(&dump_running);
6698 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006699 }
6700
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006701 /*
6702 * Always turn off tracing when we dump.
6703 * We don't need to show trace output of what happens
6704 * between multiple crashes.
6705 *
6706 * If the user does a sysrq-z, then they can re-enable
6707 * tracing with echo 1 > tracing_on.
6708 */
6709 tracing_off();
6710
6711 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006712
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006713 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006714 trace_init_global_iter(&iter);
6715
Steven Rostedtd7690412008-10-01 00:29:53 -04006716 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006717 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006718 }
6719
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006720 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6721
Török Edwinb54d3de2008-11-22 13:28:48 +02006722 /* don't look at user memory in panic mode */
6723 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6724
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006725 switch (oops_dump_mode) {
6726 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006727 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006728 break;
6729 case DUMP_ORIG:
6730 iter.cpu_file = raw_smp_processor_id();
6731 break;
6732 case DUMP_NONE:
6733 goto out_enable;
6734 default:
6735 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006736 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006737 }
6738
6739 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006740
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006741 /* Did function tracer already get disabled? */
6742 if (ftrace_is_dead()) {
6743 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6744 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6745 }
6746
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006747 /*
6748 * We need to stop all tracing on all CPUS to read the
6749 * the next buffer. This is a bit expensive, but is
6750 * not done often. We fill all what we can read,
6751 * and then release the locks again.
6752 */
6753
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006754 while (!trace_empty(&iter)) {
6755
6756 if (!cnt)
6757 printk(KERN_TRACE "---------------------------------\n");
6758
6759 cnt++;
6760
6761 /* reset all but tr, trace, and overruns */
6762 memset(&iter.seq, 0,
6763 sizeof(struct trace_iterator) -
6764 offsetof(struct trace_iterator, seq));
6765 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6766 iter.pos = -1;
6767
Jason Wessel955b61e2010-08-05 09:22:23 -05006768 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006769 int ret;
6770
6771 ret = print_trace_line(&iter);
6772 if (ret != TRACE_TYPE_NO_CONSUME)
6773 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006774 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006775 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006776
6777 trace_printk_seq(&iter.seq);
6778 }
6779
6780 if (!cnt)
6781 printk(KERN_TRACE " (ftrace buffer empty)\n");
6782 else
6783 printk(KERN_TRACE "---------------------------------\n");
6784
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006785 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006786 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006787
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006788 for_each_tracing_cpu(cpu) {
6789 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006790 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006791 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006792 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006793}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006794EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006795
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006796__init static int tracer_alloc_buffers(void)
6797{
Steven Rostedt73c51622009-03-11 13:42:01 -04006798 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306799 int ret = -ENOMEM;
6800
David Sharp750912f2010-12-08 13:46:47 -08006801
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306802 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6803 goto out;
6804
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006805 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306806 goto out_free_buffer_mask;
6807
Steven Rostedt07d777f2011-09-22 14:01:55 -04006808 /* Only allocate trace_printk buffers if a trace_printk exists */
6809 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006810 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006811 trace_printk_init_buffers();
6812
Steven Rostedt73c51622009-03-11 13:42:01 -04006813 /* To save memory, keep the ring buffer size to its minimum */
6814 if (ring_buffer_expanded)
6815 ring_buf_size = trace_buf_size;
6816 else
6817 ring_buf_size = 1;
6818
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306819 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006820 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006821
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006822 raw_spin_lock_init(&global_trace.start_lock);
6823
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006824 /* Used for event triggers */
6825 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6826 if (!temp_buffer)
6827 goto out_free_cpumask;
6828
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006829 if (trace_create_savedcmd() < 0)
6830 goto out_free_temp_buffer;
6831
Steven Rostedtab464282008-05-12 21:21:00 +02006832 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006833 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006834 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6835 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006836 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006837 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006838
Steven Rostedt499e5472012-02-22 15:50:28 -05006839 if (global_trace.buffer_disabled)
6840 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006841
Steven Rostedte1e232c2014-02-10 23:38:46 -05006842 if (trace_boot_clock) {
6843 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6844 if (ret < 0)
6845 pr_warning("Trace clock %s not defined, going back to default\n",
6846 trace_boot_clock);
6847 }
6848
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006849 /*
6850 * register_tracer() might reference current_trace, so it
6851 * needs to be set before we register anything. This is
6852 * just a bootstrap of current_trace anyway.
6853 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006854 global_trace.current_trace = &nop_trace;
6855
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006856 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6857
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006858 ftrace_init_global_array_ops(&global_trace);
6859
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006860 register_tracer(&nop_trace);
6861
Steven Rostedt60a11772008-05-12 21:20:44 +02006862 /* All seems OK, enable tracing */
6863 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006864
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006865 atomic_notifier_chain_register(&panic_notifier_list,
6866 &trace_panic_notifier);
6867
6868 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006869
Steven Rostedtae63b312012-05-03 23:09:03 -04006870 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6871
6872 INIT_LIST_HEAD(&global_trace.systems);
6873 INIT_LIST_HEAD(&global_trace.events);
6874 list_add(&global_trace.list, &ftrace_trace_arrays);
6875
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006876 while (trace_boot_options) {
6877 char *option;
6878
6879 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006880 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006881 }
6882
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006883 register_snapshot_cmd();
6884
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006885 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006886
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006887out_free_savedcmd:
6888 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006889out_free_temp_buffer:
6890 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306891out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006892 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306893out_free_buffer_mask:
6894 free_cpumask_var(tracing_buffer_mask);
6895out:
6896 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006897}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006898
6899__init static int clear_boot_tracer(void)
6900{
6901 /*
6902 * The default tracer at boot buffer is an init section.
6903 * This function is called in lateinit. If we did not
6904 * find the boot tracer, then clear it out, to prevent
6905 * later registration from accessing the buffer that is
6906 * about to be freed.
6907 */
6908 if (!default_bootup_tracer)
6909 return 0;
6910
6911 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6912 default_bootup_tracer);
6913 default_bootup_tracer = NULL;
6914
6915 return 0;
6916}
6917
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006918early_initcall(tracer_alloc_buffers);
6919fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006920late_initcall(clear_boot_tracer);