blob: 8a528392b1f465da19d297a84699c4931102cf3d [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050076static int
77dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010078{
79 return 0;
80}
Steven Rostedt0f048702008-11-05 16:05:44 -050081
82/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040083 * To prevent the comm cache from being overwritten when no
84 * tracing is active, only save the comm when a trace event
85 * occurred.
86 */
87static DEFINE_PER_CPU(bool, trace_cmdline_save);
88
89/*
Steven Rostedt0f048702008-11-05 16:05:44 -050090 * Kill all tracing for good (never come back).
91 * It is initialized to 1 but will turn to zero if the initialization
92 * of the tracer is successful. But that is the only place that sets
93 * this back to zero.
94 */
Hannes Eder4fd27352009-02-10 19:44:12 +010095static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050096
Christoph Lameter9288f992009-10-07 19:17:45 -040097DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040098
Jason Wessel955b61e2010-08-05 09:22:23 -050099cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200100
Steven Rostedt944ac422008-10-23 19:26:08 -0400101/*
102 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
103 *
104 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
105 * is set, then ftrace_dump is called. This will output the contents
106 * of the ftrace buffers to the console. This is very useful for
107 * capturing traces that lead to crashes and outputing it to a
108 * serial console.
109 *
110 * It is default off, but you can enable it with either specifying
111 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200112 * /proc/sys/kernel/ftrace_dump_on_oops
113 * Set 1 if you want to dump buffers of all CPUs
114 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400115 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116
117enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400118
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400119/* When set, tracing will stop when a WARN*() is hit */
120int __disable_trace_on_warning;
121
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500122static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500123
Li Zefanee6c2c12009-09-18 14:06:47 +0800124#define MAX_TRACER_SIZE 100
125static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500126static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100127
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500128static bool allocate_snapshot;
129
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200130static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100131{
Chen Gang67012ab2013-04-08 12:06:44 +0800132 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500133 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400134 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500135 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100136 return 1;
137}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200138__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100139
Steven Rostedt944ac422008-10-23 19:26:08 -0400140static int __init set_ftrace_dump_on_oops(char *str)
141{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200142 if (*str++ != '=' || !*str) {
143 ftrace_dump_on_oops = DUMP_ALL;
144 return 1;
145 }
146
147 if (!strcmp("orig_cpu", str)) {
148 ftrace_dump_on_oops = DUMP_ORIG;
149 return 1;
150 }
151
152 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400153}
154__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200155
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400156static int __init stop_trace_on_warning(char *str)
157{
158 __disable_trace_on_warning = 1;
159 return 1;
160}
161__setup("traceoff_on_warning=", stop_trace_on_warning);
162
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400163static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500164{
165 allocate_snapshot = true;
166 /* We also need the main ring buffer expanded */
167 ring_buffer_expanded = true;
168 return 1;
169}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400170__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500171
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400172
173static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
174static char *trace_boot_options __initdata;
175
176static int __init set_trace_boot_options(char *str)
177{
Chen Gang67012ab2013-04-08 12:06:44 +0800178 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400179 trace_boot_options = trace_boot_options_buf;
180 return 0;
181}
182__setup("trace_options=", set_trace_boot_options);
183
Steven Rostedte1e232c2014-02-10 23:38:46 -0500184static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
185static char *trace_boot_clock __initdata;
186
187static int __init set_trace_boot_clock(char *str)
188{
189 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
190 trace_boot_clock = trace_boot_clock_buf;
191 return 0;
192}
193__setup("trace_clock=", set_trace_boot_clock);
194
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400195
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800196unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200197{
198 nsec += 500;
199 do_div(nsec, 1000);
200 return nsec;
201}
202
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200203/*
204 * The global_trace is the descriptor that holds the tracing
205 * buffers for the live tracing. For each CPU, it contains
206 * a link list of pages that will store trace entries. The
207 * page descriptor of the pages in the memory is used to hold
208 * the link list by linking the lru item in the page descriptor
209 * to each of the pages in the buffer per CPU.
210 *
211 * For each active CPU there is a data field that holds the
212 * pages for the buffer for that CPU. Each CPU has the same number
213 * of pages allocated for its buffer.
214 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200215static struct trace_array global_trace;
216
Steven Rostedtae63b312012-05-03 23:09:03 -0400217LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200218
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400219int trace_array_get(struct trace_array *this_tr)
220{
221 struct trace_array *tr;
222 int ret = -ENODEV;
223
224 mutex_lock(&trace_types_lock);
225 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
226 if (tr == this_tr) {
227 tr->ref++;
228 ret = 0;
229 break;
230 }
231 }
232 mutex_unlock(&trace_types_lock);
233
234 return ret;
235}
236
237static void __trace_array_put(struct trace_array *this_tr)
238{
239 WARN_ON(!this_tr->ref);
240 this_tr->ref--;
241}
242
243void trace_array_put(struct trace_array *this_tr)
244{
245 mutex_lock(&trace_types_lock);
246 __trace_array_put(this_tr);
247 mutex_unlock(&trace_types_lock);
248}
249
Tom Zanussif306cc82013-10-24 08:34:17 -0500250int filter_check_discard(struct ftrace_event_file *file, void *rec,
251 struct ring_buffer *buffer,
252 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500253{
Tom Zanussif306cc82013-10-24 08:34:17 -0500254 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
255 !filter_match_preds(file->filter, rec)) {
256 ring_buffer_discard_commit(buffer, event);
257 return 1;
258 }
259
260 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500261}
Tom Zanussif306cc82013-10-24 08:34:17 -0500262EXPORT_SYMBOL_GPL(filter_check_discard);
263
264int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
265 struct ring_buffer *buffer,
266 struct ring_buffer_event *event)
267{
268 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
269 !filter_match_preds(call->filter, rec)) {
270 ring_buffer_discard_commit(buffer, event);
271 return 1;
272 }
273
274 return 0;
275}
276EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500277
Fabian Frederickad1438a2014-04-17 21:44:42 +0200278static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400279{
280 u64 ts;
281
282 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700283 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400284 return trace_clock_local();
285
Alexander Z Lam94571582013-08-02 18:36:16 -0700286 ts = ring_buffer_time_stamp(buf->buffer, cpu);
287 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400288
289 return ts;
290}
291
Alexander Z Lam94571582013-08-02 18:36:16 -0700292cycle_t ftrace_now(int cpu)
293{
294 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
295}
296
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400297/**
298 * tracing_is_enabled - Show if global_trace has been disabled
299 *
300 * Shows if the global trace has been enabled or not. It uses the
301 * mirror flag "buffer_disabled" to be used in fast paths such as for
302 * the irqsoff tracer. But it may be inaccurate due to races. If you
303 * need to know the accurate state, use tracing_is_on() which is a little
304 * slower, but accurate.
305 */
Steven Rostedt90369902008-11-05 16:05:44 -0500306int tracing_is_enabled(void)
307{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400308 /*
309 * For quick access (irqsoff uses this in fast path), just
310 * return the mirror variable of the state of the ring buffer.
311 * It's a little racy, but we don't really care.
312 */
313 smp_rmb();
314 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500315}
316
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200317/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400318 * trace_buf_size is the size in bytes that is allocated
319 * for a buffer. Note, the number of bytes is always rounded
320 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400321 *
322 * This number is purposely set to a low number of 16384.
323 * If the dump on oops happens, it will be much appreciated
324 * to not have to wait for all that output. Anyway this can be
325 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200326 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400327#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400328
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400329static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200330
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200331/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200332static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200333
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200334/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200335 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200336 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700337DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200338
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800339/*
340 * serialize the access of the ring buffer
341 *
342 * ring buffer serializes readers, but it is low level protection.
343 * The validity of the events (which returns by ring_buffer_peek() ..etc)
344 * are not protected by ring buffer.
345 *
346 * The content of events may become garbage if we allow other process consumes
347 * these events concurrently:
348 * A) the page of the consumed events may become a normal page
349 * (not reader page) in ring buffer, and this page will be rewrited
350 * by events producer.
351 * B) The page of the consumed events may become a page for splice_read,
352 * and this page will be returned to system.
353 *
354 * These primitives allow multi process access to different cpu ring buffer
355 * concurrently.
356 *
357 * These primitives don't distinguish read-only and read-consume access.
358 * Multi read-only access are also serialized.
359 */
360
361#ifdef CONFIG_SMP
362static DECLARE_RWSEM(all_cpu_access_lock);
363static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
364
365static inline void trace_access_lock(int cpu)
366{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500367 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800368 /* gain it for accessing the whole ring buffer. */
369 down_write(&all_cpu_access_lock);
370 } else {
371 /* gain it for accessing a cpu ring buffer. */
372
Steven Rostedtae3b5092013-01-23 15:22:59 -0500373 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800374 down_read(&all_cpu_access_lock);
375
376 /* Secondly block other access to this @cpu ring buffer. */
377 mutex_lock(&per_cpu(cpu_access_lock, cpu));
378 }
379}
380
381static inline void trace_access_unlock(int cpu)
382{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500383 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800384 up_write(&all_cpu_access_lock);
385 } else {
386 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
387 up_read(&all_cpu_access_lock);
388 }
389}
390
391static inline void trace_access_lock_init(void)
392{
393 int cpu;
394
395 for_each_possible_cpu(cpu)
396 mutex_init(&per_cpu(cpu_access_lock, cpu));
397}
398
399#else
400
401static DEFINE_MUTEX(access_lock);
402
403static inline void trace_access_lock(int cpu)
404{
405 (void)cpu;
406 mutex_lock(&access_lock);
407}
408
409static inline void trace_access_unlock(int cpu)
410{
411 (void)cpu;
412 mutex_unlock(&access_lock);
413}
414
415static inline void trace_access_lock_init(void)
416{
417}
418
419#endif
420
Steven Rostedtee6bce52008-11-12 17:52:37 -0500421/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500422unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400423 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500424 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400425 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700426
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400427static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400428{
429 if (tr->trace_buffer.buffer)
430 ring_buffer_record_on(tr->trace_buffer.buffer);
431 /*
432 * This flag is looked at when buffers haven't been allocated
433 * yet, or by some tracers (like irqsoff), that just want to
434 * know if the ring buffer has been disabled, but it can handle
435 * races of where it gets disabled but we still do a record.
436 * As the check is in the fast path of the tracers, it is more
437 * important to be fast than accurate.
438 */
439 tr->buffer_disabled = 0;
440 /* Make the flag seen by readers */
441 smp_wmb();
442}
443
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200444/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500445 * tracing_on - enable tracing buffers
446 *
447 * This function enables tracing buffers that may have been
448 * disabled with tracing_off.
449 */
450void tracing_on(void)
451{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400452 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500453}
454EXPORT_SYMBOL_GPL(tracing_on);
455
456/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500457 * __trace_puts - write a constant string into the trace buffer.
458 * @ip: The address of the caller
459 * @str: The constant string to write
460 * @size: The size of the string.
461 */
462int __trace_puts(unsigned long ip, const char *str, int size)
463{
464 struct ring_buffer_event *event;
465 struct ring_buffer *buffer;
466 struct print_entry *entry;
467 unsigned long irq_flags;
468 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800469 int pc;
470
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800471 if (!(trace_flags & TRACE_ITER_PRINTK))
472 return 0;
473
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800474 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500475
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500476 if (unlikely(tracing_selftest_running || tracing_disabled))
477 return 0;
478
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500479 alloc = sizeof(*entry) + size + 2; /* possible \n added */
480
481 local_save_flags(irq_flags);
482 buffer = global_trace.trace_buffer.buffer;
483 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800484 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500485 if (!event)
486 return 0;
487
488 entry = ring_buffer_event_data(event);
489 entry->ip = ip;
490
491 memcpy(&entry->buf, str, size);
492
493 /* Add a newline if necessary */
494 if (entry->buf[size - 1] != '\n') {
495 entry->buf[size] = '\n';
496 entry->buf[size + 1] = '\0';
497 } else
498 entry->buf[size] = '\0';
499
500 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800501 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500502
503 return size;
504}
505EXPORT_SYMBOL_GPL(__trace_puts);
506
507/**
508 * __trace_bputs - write the pointer to a constant string into trace buffer
509 * @ip: The address of the caller
510 * @str: The constant string to write to the buffer to
511 */
512int __trace_bputs(unsigned long ip, const char *str)
513{
514 struct ring_buffer_event *event;
515 struct ring_buffer *buffer;
516 struct bputs_entry *entry;
517 unsigned long irq_flags;
518 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800519 int pc;
520
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800521 if (!(trace_flags & TRACE_ITER_PRINTK))
522 return 0;
523
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800524 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500525
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500526 if (unlikely(tracing_selftest_running || tracing_disabled))
527 return 0;
528
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500529 local_save_flags(irq_flags);
530 buffer = global_trace.trace_buffer.buffer;
531 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800532 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500533 if (!event)
534 return 0;
535
536 entry = ring_buffer_event_data(event);
537 entry->ip = ip;
538 entry->str = str;
539
540 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800541 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500542
543 return 1;
544}
545EXPORT_SYMBOL_GPL(__trace_bputs);
546
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500547#ifdef CONFIG_TRACER_SNAPSHOT
548/**
549 * trace_snapshot - take a snapshot of the current buffer.
550 *
551 * This causes a swap between the snapshot buffer and the current live
552 * tracing buffer. You can use this to take snapshots of the live
553 * trace when some condition is triggered, but continue to trace.
554 *
555 * Note, make sure to allocate the snapshot with either
556 * a tracing_snapshot_alloc(), or by doing it manually
557 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
558 *
559 * If the snapshot buffer is not allocated, it will stop tracing.
560 * Basically making a permanent snapshot.
561 */
562void tracing_snapshot(void)
563{
564 struct trace_array *tr = &global_trace;
565 struct tracer *tracer = tr->current_trace;
566 unsigned long flags;
567
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500568 if (in_nmi()) {
569 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
570 internal_trace_puts("*** snapshot is being ignored ***\n");
571 return;
572 }
573
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500574 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500575 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
576 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500577 tracing_off();
578 return;
579 }
580
581 /* Note, snapshot can not be used when the tracer uses it */
582 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500583 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
584 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500585 return;
586 }
587
588 local_irq_save(flags);
589 update_max_tr(tr, current, smp_processor_id());
590 local_irq_restore(flags);
591}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500592EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500593
594static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
595 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400596static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
597
598static int alloc_snapshot(struct trace_array *tr)
599{
600 int ret;
601
602 if (!tr->allocated_snapshot) {
603
604 /* allocate spare buffer */
605 ret = resize_buffer_duplicate_size(&tr->max_buffer,
606 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
607 if (ret < 0)
608 return ret;
609
610 tr->allocated_snapshot = true;
611 }
612
613 return 0;
614}
615
Fabian Frederickad1438a2014-04-17 21:44:42 +0200616static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400617{
618 /*
619 * We don't free the ring buffer. instead, resize it because
620 * The max_tr ring buffer has some state (e.g. ring->clock) and
621 * we want preserve it.
622 */
623 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
624 set_buffer_entries(&tr->max_buffer, 1);
625 tracing_reset_online_cpus(&tr->max_buffer);
626 tr->allocated_snapshot = false;
627}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500628
629/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500630 * tracing_alloc_snapshot - allocate snapshot buffer.
631 *
632 * This only allocates the snapshot buffer if it isn't already
633 * allocated - it doesn't also take a snapshot.
634 *
635 * This is meant to be used in cases where the snapshot buffer needs
636 * to be set up for events that can't sleep but need to be able to
637 * trigger a snapshot.
638 */
639int tracing_alloc_snapshot(void)
640{
641 struct trace_array *tr = &global_trace;
642 int ret;
643
644 ret = alloc_snapshot(tr);
645 WARN_ON(ret < 0);
646
647 return ret;
648}
649EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
650
651/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500652 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
653 *
654 * This is similar to trace_snapshot(), but it will allocate the
655 * snapshot buffer if it isn't already allocated. Use this only
656 * where it is safe to sleep, as the allocation may sleep.
657 *
658 * This causes a swap between the snapshot buffer and the current live
659 * tracing buffer. You can use this to take snapshots of the live
660 * trace when some condition is triggered, but continue to trace.
661 */
662void tracing_snapshot_alloc(void)
663{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 int ret;
665
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500666 ret = tracing_alloc_snapshot();
667 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400668 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500669
670 tracing_snapshot();
671}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500672EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500673#else
674void tracing_snapshot(void)
675{
676 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
677}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500678EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500679int tracing_alloc_snapshot(void)
680{
681 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
682 return -ENODEV;
683}
684EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500685void tracing_snapshot_alloc(void)
686{
687 /* Give warning */
688 tracing_snapshot();
689}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500690EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500691#endif /* CONFIG_TRACER_SNAPSHOT */
692
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400693static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400694{
695 if (tr->trace_buffer.buffer)
696 ring_buffer_record_off(tr->trace_buffer.buffer);
697 /*
698 * This flag is looked at when buffers haven't been allocated
699 * yet, or by some tracers (like irqsoff), that just want to
700 * know if the ring buffer has been disabled, but it can handle
701 * races of where it gets disabled but we still do a record.
702 * As the check is in the fast path of the tracers, it is more
703 * important to be fast than accurate.
704 */
705 tr->buffer_disabled = 1;
706 /* Make the flag seen by readers */
707 smp_wmb();
708}
709
Steven Rostedt499e5472012-02-22 15:50:28 -0500710/**
711 * tracing_off - turn off tracing buffers
712 *
713 * This function stops the tracing buffers from recording data.
714 * It does not disable any overhead the tracers themselves may
715 * be causing. This function simply causes all recording to
716 * the ring buffers to fail.
717 */
718void tracing_off(void)
719{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400720 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500721}
722EXPORT_SYMBOL_GPL(tracing_off);
723
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400724void disable_trace_on_warning(void)
725{
726 if (__disable_trace_on_warning)
727 tracing_off();
728}
729
Steven Rostedt499e5472012-02-22 15:50:28 -0500730/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400731 * tracer_tracing_is_on - show real state of ring buffer enabled
732 * @tr : the trace array to know if ring buffer is enabled
733 *
734 * Shows real state of the ring buffer if it is enabled or not.
735 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400736static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400737{
738 if (tr->trace_buffer.buffer)
739 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
740 return !tr->buffer_disabled;
741}
742
Steven Rostedt499e5472012-02-22 15:50:28 -0500743/**
744 * tracing_is_on - show state of ring buffers enabled
745 */
746int tracing_is_on(void)
747{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400748 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500749}
750EXPORT_SYMBOL_GPL(tracing_is_on);
751
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400752static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200753{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400754 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200755
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200756 if (!str)
757 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800758 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200759 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800760 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200761 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400762 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200763 return 1;
764}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400765__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200766
Tim Bird0e950172010-02-25 15:36:43 -0800767static int __init set_tracing_thresh(char *str)
768{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800769 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800770 int ret;
771
772 if (!str)
773 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200774 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800775 if (ret < 0)
776 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800777 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800778 return 1;
779}
780__setup("tracing_thresh=", set_tracing_thresh);
781
Steven Rostedt57f50be2008-05-12 21:20:44 +0200782unsigned long nsecs_to_usecs(unsigned long nsecs)
783{
784 return nsecs / 1000;
785}
786
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200787/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200788static const char *trace_options[] = {
789 "print-parent",
790 "sym-offset",
791 "sym-addr",
792 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200793 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200794 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200795 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200796 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200797 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100798 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500799 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500800 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500801 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200802 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200803 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100804 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200805 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500806 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400807 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400808 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800809 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800810 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400811 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500812 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700813 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400814 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200815 NULL
816};
817
Zhaolei5079f322009-08-25 16:12:56 +0800818static struct {
819 u64 (*func)(void);
820 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800821 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800822} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000823 { trace_clock_local, "local", 1 },
824 { trace_clock_global, "global", 1 },
825 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700826 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000827 { trace_clock, "perf", 1 },
828 { ktime_get_mono_fast_ns, "mono", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800829 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800830};
831
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200832/*
833 * trace_parser_get_init - gets the buffer for trace parser
834 */
835int trace_parser_get_init(struct trace_parser *parser, int size)
836{
837 memset(parser, 0, sizeof(*parser));
838
839 parser->buffer = kmalloc(size, GFP_KERNEL);
840 if (!parser->buffer)
841 return 1;
842
843 parser->size = size;
844 return 0;
845}
846
847/*
848 * trace_parser_put - frees the buffer for trace parser
849 */
850void trace_parser_put(struct trace_parser *parser)
851{
852 kfree(parser->buffer);
853}
854
855/*
856 * trace_get_user - reads the user input string separated by space
857 * (matched by isspace(ch))
858 *
859 * For each string found the 'struct trace_parser' is updated,
860 * and the function returns.
861 *
862 * Returns number of bytes read.
863 *
864 * See kernel/trace/trace.h for 'struct trace_parser' details.
865 */
866int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
867 size_t cnt, loff_t *ppos)
868{
869 char ch;
870 size_t read = 0;
871 ssize_t ret;
872
873 if (!*ppos)
874 trace_parser_clear(parser);
875
876 ret = get_user(ch, ubuf++);
877 if (ret)
878 goto out;
879
880 read++;
881 cnt--;
882
883 /*
884 * The parser is not finished with the last write,
885 * continue reading the user input without skipping spaces.
886 */
887 if (!parser->cont) {
888 /* skip white space */
889 while (cnt && isspace(ch)) {
890 ret = get_user(ch, ubuf++);
891 if (ret)
892 goto out;
893 read++;
894 cnt--;
895 }
896
897 /* only spaces were written */
898 if (isspace(ch)) {
899 *ppos += read;
900 ret = read;
901 goto out;
902 }
903
904 parser->idx = 0;
905 }
906
907 /* read the non-space input */
908 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800909 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200910 parser->buffer[parser->idx++] = ch;
911 else {
912 ret = -EINVAL;
913 goto out;
914 }
915 ret = get_user(ch, ubuf++);
916 if (ret)
917 goto out;
918 read++;
919 cnt--;
920 }
921
922 /* We either got finished input or we have to wait for another call. */
923 if (isspace(ch)) {
924 parser->buffer[parser->idx] = 0;
925 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400926 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200927 parser->cont = true;
928 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400929 } else {
930 ret = -EINVAL;
931 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200932 }
933
934 *ppos += read;
935 ret = read;
936
937out:
938 return ret;
939}
940
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200941static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200942{
943 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200944
945 if (s->len <= s->readpos)
946 return -EBUSY;
947
948 len = s->len - s->readpos;
949 if (cnt > len)
950 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300951 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200952
Steven Rostedte74da522009-03-04 20:31:11 -0500953 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200954 return cnt;
955}
956
Tim Bird0e950172010-02-25 15:36:43 -0800957unsigned long __read_mostly tracing_thresh;
958
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400959#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400960/*
961 * Copy the new maximum trace into the separate maximum-trace
962 * structure. (this way the maximum trace is permanently saved,
963 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
964 */
965static void
966__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
967{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500968 struct trace_buffer *trace_buf = &tr->trace_buffer;
969 struct trace_buffer *max_buf = &tr->max_buffer;
970 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
971 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400972
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500973 max_buf->cpu = cpu;
974 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400975
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -0500976 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -0400977 max_data->critical_start = data->critical_start;
978 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400979
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300980 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400981 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -0400982 /*
983 * If tsk == current, then use current_uid(), as that does not use
984 * RCU. The irq tracer can be called out of RCU scope.
985 */
986 if (tsk == current)
987 max_data->uid = current_uid();
988 else
989 max_data->uid = task_uid(tsk);
990
Steven Rostedt8248ac02009-09-02 12:27:41 -0400991 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
992 max_data->policy = tsk->policy;
993 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400994
995 /* record this tasks comm */
996 tracing_record_cmdline(tsk);
997}
998
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200999/**
1000 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1001 * @tr: tracer
1002 * @tsk: the task with the latency
1003 * @cpu: The cpu that initiated the trace.
1004 *
1005 * Flip the buffers between the @tr and the max_tr and record information
1006 * about which task was the cause of this latency.
1007 */
Ingo Molnare309b412008-05-12 21:20:51 +02001008void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001009update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1010{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001011 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001012
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001013 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001014 return;
1015
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001016 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001017
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001018 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001019 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001020 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001021 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001022 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001023
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001024 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001025
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001026 buf = tr->trace_buffer.buffer;
1027 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1028 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001029
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001030 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001031 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001032}
1033
1034/**
1035 * update_max_tr_single - only copy one trace over, and reset the rest
1036 * @tr - tracer
1037 * @tsk - task with the latency
1038 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001039 *
1040 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001041 */
Ingo Molnare309b412008-05-12 21:20:51 +02001042void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001043update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1044{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001045 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001046
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001047 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001048 return;
1049
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001050 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001051 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001052 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001053 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001054 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001055 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001056
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001057 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001058
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001059 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001060
Steven Rostedte8165db2009-09-03 19:13:05 -04001061 if (ret == -EBUSY) {
1062 /*
1063 * We failed to swap the buffer due to a commit taking
1064 * place on this CPU. We fail to record, but we reset
1065 * the max trace buffer (no one writes directly to it)
1066 * and flag that it failed.
1067 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001068 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001069 "Failed to swap buffers due to commit in progress\n");
1070 }
1071
Steven Rostedte8165db2009-09-03 19:13:05 -04001072 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001073
1074 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001075 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001076}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001077#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001078
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001079static int wait_on_pipe(struct trace_iterator *iter)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001080{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001081 /* Iterators are static, they should be filled or empty */
1082 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001083 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001084
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001085 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001086}
1087
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001088#ifdef CONFIG_FTRACE_STARTUP_TEST
1089static int run_tracer_selftest(struct tracer *type)
1090{
1091 struct trace_array *tr = &global_trace;
1092 struct tracer *saved_tracer = tr->current_trace;
1093 int ret;
1094
1095 if (!type->selftest || tracing_selftest_disabled)
1096 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001097
1098 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001099 * Run a selftest on this tracer.
1100 * Here we reset the trace buffer, and set the current
1101 * tracer to be this tracer. The tracer can then run some
1102 * internal tracing to verify that everything is in order.
1103 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001104 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001105 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001106
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001107 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001108
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001109#ifdef CONFIG_TRACER_MAX_TRACE
1110 if (type->use_max_tr) {
1111 /* If we expanded the buffers, make sure the max is expanded too */
1112 if (ring_buffer_expanded)
1113 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1114 RING_BUFFER_ALL_CPUS);
1115 tr->allocated_snapshot = true;
1116 }
1117#endif
1118
1119 /* the test is responsible for initializing and enabling */
1120 pr_info("Testing tracer %s: ", type->name);
1121 ret = type->selftest(type, tr);
1122 /* the test is responsible for resetting too */
1123 tr->current_trace = saved_tracer;
1124 if (ret) {
1125 printk(KERN_CONT "FAILED!\n");
1126 /* Add the warning after printing 'FAILED' */
1127 WARN_ON(1);
1128 return -1;
1129 }
1130 /* Only reset on passing, to avoid touching corrupted buffers */
1131 tracing_reset_online_cpus(&tr->trace_buffer);
1132
1133#ifdef CONFIG_TRACER_MAX_TRACE
1134 if (type->use_max_tr) {
1135 tr->allocated_snapshot = false;
1136
1137 /* Shrink the max buffer again */
1138 if (ring_buffer_expanded)
1139 ring_buffer_resize(tr->max_buffer.buffer, 1,
1140 RING_BUFFER_ALL_CPUS);
1141 }
1142#endif
1143
1144 printk(KERN_CONT "PASSED\n");
1145 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001146}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001147#else
1148static inline int run_tracer_selftest(struct tracer *type)
1149{
1150 return 0;
1151}
1152#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001153
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001154/**
1155 * register_tracer - register a tracer with the ftrace system.
1156 * @type - the plugin for the tracer
1157 *
1158 * Register a new plugin tracer.
1159 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001160int register_tracer(struct tracer *type)
1161{
1162 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001163 int ret = 0;
1164
1165 if (!type->name) {
1166 pr_info("Tracer must have a name\n");
1167 return -1;
1168 }
1169
Dan Carpenter24a461d2010-07-10 12:06:44 +02001170 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001171 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1172 return -1;
1173 }
1174
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001175 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001176
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001177 tracing_selftest_running = true;
1178
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001179 for (t = trace_types; t; t = t->next) {
1180 if (strcmp(type->name, t->name) == 0) {
1181 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001182 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001183 type->name);
1184 ret = -1;
1185 goto out;
1186 }
1187 }
1188
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001189 if (!type->set_flag)
1190 type->set_flag = &dummy_set_flag;
1191 if (!type->flags)
1192 type->flags = &dummy_tracer_flags;
1193 else
1194 if (!type->flags->opts)
1195 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001196
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001197 ret = run_tracer_selftest(type);
1198 if (ret < 0)
1199 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001200
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001201 type->next = trace_types;
1202 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001203
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001204 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001205 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001206 mutex_unlock(&trace_types_lock);
1207
Steven Rostedtdac74942009-02-05 01:13:38 -05001208 if (ret || !default_bootup_tracer)
1209 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001210
Li Zefanee6c2c12009-09-18 14:06:47 +08001211 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001212 goto out_unlock;
1213
1214 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1215 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001216 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001217 default_bootup_tracer = NULL;
1218 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001219 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001220#ifdef CONFIG_FTRACE_STARTUP_TEST
1221 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1222 type->name);
1223#endif
1224
1225 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001226 return ret;
1227}
1228
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001229void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001230{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001231 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001232
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001233 if (!buffer)
1234 return;
1235
Steven Rostedtf6339032009-09-04 12:35:16 -04001236 ring_buffer_record_disable(buffer);
1237
1238 /* Make sure all commits have finished */
1239 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001240 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001241
1242 ring_buffer_record_enable(buffer);
1243}
1244
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001245void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001246{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001247 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001248 int cpu;
1249
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001250 if (!buffer)
1251 return;
1252
Steven Rostedt621968c2009-09-04 12:02:35 -04001253 ring_buffer_record_disable(buffer);
1254
1255 /* Make sure all commits have finished */
1256 synchronize_sched();
1257
Alexander Z Lam94571582013-08-02 18:36:16 -07001258 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001259
1260 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001261 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001262
1263 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001264}
1265
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001266/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001267void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001268{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001269 struct trace_array *tr;
1270
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001271 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001272 tracing_reset_online_cpus(&tr->trace_buffer);
1273#ifdef CONFIG_TRACER_MAX_TRACE
1274 tracing_reset_online_cpus(&tr->max_buffer);
1275#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001276 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001277}
1278
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001279#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001280#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001281static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001282struct saved_cmdlines_buffer {
1283 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1284 unsigned *map_cmdline_to_pid;
1285 unsigned cmdline_num;
1286 int cmdline_idx;
1287 char *saved_cmdlines;
1288};
1289static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001290
Steven Rostedt25b0b442008-05-12 21:21:00 +02001291/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001292static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001293
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001294static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001295{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001296 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1297}
1298
1299static inline void set_cmdline(int idx, const char *cmdline)
1300{
1301 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1302}
1303
1304static int allocate_cmdlines_buffer(unsigned int val,
1305 struct saved_cmdlines_buffer *s)
1306{
1307 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1308 GFP_KERNEL);
1309 if (!s->map_cmdline_to_pid)
1310 return -ENOMEM;
1311
1312 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1313 if (!s->saved_cmdlines) {
1314 kfree(s->map_cmdline_to_pid);
1315 return -ENOMEM;
1316 }
1317
1318 s->cmdline_idx = 0;
1319 s->cmdline_num = val;
1320 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1321 sizeof(s->map_pid_to_cmdline));
1322 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1323 val * sizeof(*s->map_cmdline_to_pid));
1324
1325 return 0;
1326}
1327
1328static int trace_create_savedcmd(void)
1329{
1330 int ret;
1331
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001332 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001333 if (!savedcmd)
1334 return -ENOMEM;
1335
1336 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1337 if (ret < 0) {
1338 kfree(savedcmd);
1339 savedcmd = NULL;
1340 return -ENOMEM;
1341 }
1342
1343 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001344}
1345
Carsten Emdeb5130b12009-09-13 01:43:07 +02001346int is_tracing_stopped(void)
1347{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001348 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001349}
1350
Steven Rostedt0f048702008-11-05 16:05:44 -05001351/**
1352 * tracing_start - quick start of the tracer
1353 *
1354 * If tracing is enabled but was stopped by tracing_stop,
1355 * this will start the tracer back up.
1356 */
1357void tracing_start(void)
1358{
1359 struct ring_buffer *buffer;
1360 unsigned long flags;
1361
1362 if (tracing_disabled)
1363 return;
1364
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001365 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1366 if (--global_trace.stop_count) {
1367 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001368 /* Someone screwed up their debugging */
1369 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001370 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001371 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001372 goto out;
1373 }
1374
Steven Rostedta2f80712010-03-12 19:56:00 -05001375 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001376 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001377
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001378 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001379 if (buffer)
1380 ring_buffer_record_enable(buffer);
1381
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001382#ifdef CONFIG_TRACER_MAX_TRACE
1383 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001384 if (buffer)
1385 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001386#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001387
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001388 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001389
Steven Rostedt0f048702008-11-05 16:05:44 -05001390 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001391 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1392}
1393
1394static void tracing_start_tr(struct trace_array *tr)
1395{
1396 struct ring_buffer *buffer;
1397 unsigned long flags;
1398
1399 if (tracing_disabled)
1400 return;
1401
1402 /* If global, we need to also start the max tracer */
1403 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1404 return tracing_start();
1405
1406 raw_spin_lock_irqsave(&tr->start_lock, flags);
1407
1408 if (--tr->stop_count) {
1409 if (tr->stop_count < 0) {
1410 /* Someone screwed up their debugging */
1411 WARN_ON_ONCE(1);
1412 tr->stop_count = 0;
1413 }
1414 goto out;
1415 }
1416
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001417 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001418 if (buffer)
1419 ring_buffer_record_enable(buffer);
1420
1421 out:
1422 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001423}
1424
1425/**
1426 * tracing_stop - quick stop of the tracer
1427 *
1428 * Light weight way to stop tracing. Use in conjunction with
1429 * tracing_start.
1430 */
1431void tracing_stop(void)
1432{
1433 struct ring_buffer *buffer;
1434 unsigned long flags;
1435
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001436 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1437 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001438 goto out;
1439
Steven Rostedta2f80712010-03-12 19:56:00 -05001440 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001441 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001442
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001443 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001444 if (buffer)
1445 ring_buffer_record_disable(buffer);
1446
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001447#ifdef CONFIG_TRACER_MAX_TRACE
1448 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001449 if (buffer)
1450 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001451#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001452
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001453 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001454
Steven Rostedt0f048702008-11-05 16:05:44 -05001455 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001456 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1457}
1458
1459static void tracing_stop_tr(struct trace_array *tr)
1460{
1461 struct ring_buffer *buffer;
1462 unsigned long flags;
1463
1464 /* If global, we need to also stop the max tracer */
1465 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1466 return tracing_stop();
1467
1468 raw_spin_lock_irqsave(&tr->start_lock, flags);
1469 if (tr->stop_count++)
1470 goto out;
1471
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001472 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001473 if (buffer)
1474 ring_buffer_record_disable(buffer);
1475
1476 out:
1477 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001478}
1479
Ingo Molnare309b412008-05-12 21:20:51 +02001480void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001481
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001482static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001483{
Carsten Emdea635cf02009-03-18 09:00:41 +01001484 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001485
1486 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001487 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001488
1489 /*
1490 * It's not the end of the world if we don't get
1491 * the lock, but we also don't want to spin
1492 * nor do we want to disable interrupts,
1493 * so if we miss here, then better luck next time.
1494 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001495 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001496 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001497
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001498 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001499 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001500 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001501
Carsten Emdea635cf02009-03-18 09:00:41 +01001502 /*
1503 * Check whether the cmdline buffer at idx has a pid
1504 * mapped. We are going to overwrite that entry so we
1505 * need to clear the map_pid_to_cmdline. Otherwise we
1506 * would read the new comm for the old pid.
1507 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001508 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001509 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001510 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001511
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001512 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1513 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001514
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001515 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001516 }
1517
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001518 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001519
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001520 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001521
1522 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001523}
1524
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001525static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001526{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001527 unsigned map;
1528
Steven Rostedt4ca53082009-03-16 19:20:15 -04001529 if (!pid) {
1530 strcpy(comm, "<idle>");
1531 return;
1532 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001533
Steven Rostedt74bf4072010-01-25 15:11:53 -05001534 if (WARN_ON_ONCE(pid < 0)) {
1535 strcpy(comm, "<XXX>");
1536 return;
1537 }
1538
Steven Rostedt4ca53082009-03-16 19:20:15 -04001539 if (pid > PID_MAX_DEFAULT) {
1540 strcpy(comm, "<...>");
1541 return;
1542 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001543
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001544 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001545 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001546 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001547 else
1548 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001549}
1550
1551void trace_find_cmdline(int pid, char comm[])
1552{
1553 preempt_disable();
1554 arch_spin_lock(&trace_cmdline_lock);
1555
1556 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001557
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001558 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001559 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001560}
1561
Ingo Molnare309b412008-05-12 21:20:51 +02001562void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001563{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001564 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001565 return;
1566
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001567 if (!__this_cpu_read(trace_cmdline_save))
1568 return;
1569
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001570 if (trace_save_cmdline(tsk))
1571 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001572}
1573
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001574void
Steven Rostedt38697052008-10-01 13:14:09 -04001575tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1576 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001577{
1578 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001579
Steven Rostedt777e2082008-09-29 23:02:42 -04001580 entry->preempt_count = pc & 0xff;
1581 entry->pid = (tsk) ? tsk->pid : 0;
1582 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001583#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001584 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001585#else
1586 TRACE_FLAG_IRQS_NOSUPPORT |
1587#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1589 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001590 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1591 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001592}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001593EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001594
Steven Rostedte77405a2009-09-02 14:17:06 -04001595struct ring_buffer_event *
1596trace_buffer_lock_reserve(struct ring_buffer *buffer,
1597 int type,
1598 unsigned long len,
1599 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001600{
1601 struct ring_buffer_event *event;
1602
Steven Rostedte77405a2009-09-02 14:17:06 -04001603 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001604 if (event != NULL) {
1605 struct trace_entry *ent = ring_buffer_event_data(event);
1606
1607 tracing_generic_entry_update(ent, flags, pc);
1608 ent->type = type;
1609 }
1610
1611 return event;
1612}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001613
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001614void
1615__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1616{
1617 __this_cpu_write(trace_cmdline_save, true);
1618 ring_buffer_unlock_commit(buffer, event);
1619}
1620
Steven Rostedte77405a2009-09-02 14:17:06 -04001621static inline void
1622__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1623 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001624 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001625{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001626 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001627
Steven Rostedte77405a2009-09-02 14:17:06 -04001628 ftrace_trace_stack(buffer, flags, 6, pc);
1629 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001630}
1631
Steven Rostedte77405a2009-09-02 14:17:06 -04001632void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1633 struct ring_buffer_event *event,
1634 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001635{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001636 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001637}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001638EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001639
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001640static struct ring_buffer *temp_buffer;
1641
Steven Rostedtef5580d2009-02-27 19:38:04 -05001642struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001643trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1644 struct ftrace_event_file *ftrace_file,
1645 int type, unsigned long len,
1646 unsigned long flags, int pc)
1647{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001648 struct ring_buffer_event *entry;
1649
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001650 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001651 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001652 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001653 /*
1654 * If tracing is off, but we have triggers enabled
1655 * we still need to look at the event data. Use the temp_buffer
1656 * to store the trace event for the tigger to use. It's recusive
1657 * safe and will not be recorded anywhere.
1658 */
1659 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1660 *current_rb = temp_buffer;
1661 entry = trace_buffer_lock_reserve(*current_rb,
1662 type, len, flags, pc);
1663 }
1664 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001665}
1666EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1667
1668struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001669trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1670 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001671 unsigned long flags, int pc)
1672{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001673 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001674 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001675 type, len, flags, pc);
1676}
Steven Rostedt94487d62009-05-05 19:22:53 -04001677EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001678
Steven Rostedte77405a2009-09-02 14:17:06 -04001679void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1680 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001681 unsigned long flags, int pc)
1682{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001683 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001684}
Steven Rostedt94487d62009-05-05 19:22:53 -04001685EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001686
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001687void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1688 struct ring_buffer_event *event,
1689 unsigned long flags, int pc,
1690 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001691{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001692 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001693
1694 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1695 ftrace_trace_userstack(buffer, flags, pc);
1696}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001697EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001698
Steven Rostedte77405a2009-09-02 14:17:06 -04001699void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1700 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001701{
Steven Rostedte77405a2009-09-02 14:17:06 -04001702 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001703}
Steven Rostedt12acd472009-04-17 16:01:56 -04001704EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001705
Ingo Molnare309b412008-05-12 21:20:51 +02001706void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001707trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001708 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1709 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001710{
Tom Zanussie1112b42009-03-31 00:48:49 -05001711 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001712 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001713 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001714 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001715
Steven Rostedtd7690412008-10-01 00:29:53 -04001716 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001717 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001718 return;
1719
Steven Rostedte77405a2009-09-02 14:17:06 -04001720 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001721 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001722 if (!event)
1723 return;
1724 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001725 entry->ip = ip;
1726 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001727
Tom Zanussif306cc82013-10-24 08:34:17 -05001728 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001729 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001730}
1731
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001732#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001733
1734#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1735struct ftrace_stack {
1736 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1737};
1738
1739static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1740static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1741
Steven Rostedte77405a2009-09-02 14:17:06 -04001742static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001743 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001744 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001745{
Tom Zanussie1112b42009-03-31 00:48:49 -05001746 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001747 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001748 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001749 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001750 int use_stack;
1751 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001752
1753 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001754 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001755
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001756 /*
1757 * Since events can happen in NMIs there's no safe way to
1758 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1759 * or NMI comes in, it will just have to use the default
1760 * FTRACE_STACK_SIZE.
1761 */
1762 preempt_disable_notrace();
1763
Shan Wei82146522012-11-19 13:21:01 +08001764 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001765 /*
1766 * We don't need any atomic variables, just a barrier.
1767 * If an interrupt comes in, we don't care, because it would
1768 * have exited and put the counter back to what we want.
1769 * We just need a barrier to keep gcc from moving things
1770 * around.
1771 */
1772 barrier();
1773 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001774 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001775 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1776
1777 if (regs)
1778 save_stack_trace_regs(regs, &trace);
1779 else
1780 save_stack_trace(&trace);
1781
1782 if (trace.nr_entries > size)
1783 size = trace.nr_entries;
1784 } else
1785 /* From now on, use_stack is a boolean */
1786 use_stack = 0;
1787
1788 size *= sizeof(unsigned long);
1789
1790 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1791 sizeof(*entry) + size, flags, pc);
1792 if (!event)
1793 goto out;
1794 entry = ring_buffer_event_data(event);
1795
1796 memset(&entry->caller, 0, size);
1797
1798 if (use_stack)
1799 memcpy(&entry->caller, trace.entries,
1800 trace.nr_entries * sizeof(unsigned long));
1801 else {
1802 trace.max_entries = FTRACE_STACK_ENTRIES;
1803 trace.entries = entry->caller;
1804 if (regs)
1805 save_stack_trace_regs(regs, &trace);
1806 else
1807 save_stack_trace(&trace);
1808 }
1809
1810 entry->size = trace.nr_entries;
1811
Tom Zanussif306cc82013-10-24 08:34:17 -05001812 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001813 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001814
1815 out:
1816 /* Again, don't let gcc optimize things here */
1817 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001818 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001819 preempt_enable_notrace();
1820
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001821}
1822
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001823void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1824 int skip, int pc, struct pt_regs *regs)
1825{
1826 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1827 return;
1828
1829 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1830}
1831
Steven Rostedte77405a2009-09-02 14:17:06 -04001832void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1833 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001834{
1835 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1836 return;
1837
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001838 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001839}
1840
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001841void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1842 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001843{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001844 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001845}
1846
Steven Rostedt03889382009-12-11 09:48:22 -05001847/**
1848 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001849 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001850 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001851void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001852{
1853 unsigned long flags;
1854
1855 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001856 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001857
1858 local_save_flags(flags);
1859
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001860 /*
1861 * Skip 3 more, seems to get us at the caller of
1862 * this function.
1863 */
1864 skip += 3;
1865 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1866 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001867}
1868
Steven Rostedt91e86e52010-11-10 12:56:12 +01001869static DEFINE_PER_CPU(int, user_stack_count);
1870
Steven Rostedte77405a2009-09-02 14:17:06 -04001871void
1872ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001873{
Tom Zanussie1112b42009-03-31 00:48:49 -05001874 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001875 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001876 struct userstack_entry *entry;
1877 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001878
1879 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1880 return;
1881
Steven Rostedtb6345872010-03-12 20:03:30 -05001882 /*
1883 * NMIs can not handle page faults, even with fix ups.
1884 * The save user stack can (and often does) fault.
1885 */
1886 if (unlikely(in_nmi()))
1887 return;
1888
Steven Rostedt91e86e52010-11-10 12:56:12 +01001889 /*
1890 * prevent recursion, since the user stack tracing may
1891 * trigger other kernel events.
1892 */
1893 preempt_disable();
1894 if (__this_cpu_read(user_stack_count))
1895 goto out;
1896
1897 __this_cpu_inc(user_stack_count);
1898
Steven Rostedte77405a2009-09-02 14:17:06 -04001899 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001900 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001901 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001902 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001903 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001904
Steven Rostedt48659d32009-09-11 11:36:23 -04001905 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001906 memset(&entry->caller, 0, sizeof(entry->caller));
1907
1908 trace.nr_entries = 0;
1909 trace.max_entries = FTRACE_STACK_ENTRIES;
1910 trace.skip = 0;
1911 trace.entries = entry->caller;
1912
1913 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001914 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001915 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001916
Li Zefan1dbd1952010-12-09 15:47:56 +08001917 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001918 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001919 out:
1920 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001921}
1922
Hannes Eder4fd27352009-02-10 19:44:12 +01001923#ifdef UNUSED
1924static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001925{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001926 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001927}
Hannes Eder4fd27352009-02-10 19:44:12 +01001928#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001929
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001930#endif /* CONFIG_STACKTRACE */
1931
Steven Rostedt07d777f2011-09-22 14:01:55 -04001932/* created for use with alloc_percpu */
1933struct trace_buffer_struct {
1934 char buffer[TRACE_BUF_SIZE];
1935};
1936
1937static struct trace_buffer_struct *trace_percpu_buffer;
1938static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1939static struct trace_buffer_struct *trace_percpu_irq_buffer;
1940static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1941
1942/*
1943 * The buffer used is dependent on the context. There is a per cpu
1944 * buffer for normal context, softirq contex, hard irq context and
1945 * for NMI context. Thise allows for lockless recording.
1946 *
1947 * Note, if the buffers failed to be allocated, then this returns NULL
1948 */
1949static char *get_trace_buf(void)
1950{
1951 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001952
1953 /*
1954 * If we have allocated per cpu buffers, then we do not
1955 * need to do any locking.
1956 */
1957 if (in_nmi())
1958 percpu_buffer = trace_percpu_nmi_buffer;
1959 else if (in_irq())
1960 percpu_buffer = trace_percpu_irq_buffer;
1961 else if (in_softirq())
1962 percpu_buffer = trace_percpu_sirq_buffer;
1963 else
1964 percpu_buffer = trace_percpu_buffer;
1965
1966 if (!percpu_buffer)
1967 return NULL;
1968
Shan Weid8a03492012-11-13 09:53:04 +08001969 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001970}
1971
1972static int alloc_percpu_trace_buffer(void)
1973{
1974 struct trace_buffer_struct *buffers;
1975 struct trace_buffer_struct *sirq_buffers;
1976 struct trace_buffer_struct *irq_buffers;
1977 struct trace_buffer_struct *nmi_buffers;
1978
1979 buffers = alloc_percpu(struct trace_buffer_struct);
1980 if (!buffers)
1981 goto err_warn;
1982
1983 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1984 if (!sirq_buffers)
1985 goto err_sirq;
1986
1987 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1988 if (!irq_buffers)
1989 goto err_irq;
1990
1991 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1992 if (!nmi_buffers)
1993 goto err_nmi;
1994
1995 trace_percpu_buffer = buffers;
1996 trace_percpu_sirq_buffer = sirq_buffers;
1997 trace_percpu_irq_buffer = irq_buffers;
1998 trace_percpu_nmi_buffer = nmi_buffers;
1999
2000 return 0;
2001
2002 err_nmi:
2003 free_percpu(irq_buffers);
2004 err_irq:
2005 free_percpu(sirq_buffers);
2006 err_sirq:
2007 free_percpu(buffers);
2008 err_warn:
2009 WARN(1, "Could not allocate percpu trace_printk buffer");
2010 return -ENOMEM;
2011}
2012
Steven Rostedt81698832012-10-11 10:15:05 -04002013static int buffers_allocated;
2014
Steven Rostedt07d777f2011-09-22 14:01:55 -04002015void trace_printk_init_buffers(void)
2016{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002017 if (buffers_allocated)
2018 return;
2019
2020 if (alloc_percpu_trace_buffer())
2021 return;
2022
Steven Rostedt2184db42014-05-28 13:14:40 -04002023 /* trace_printk() is for debug use only. Don't use it in production. */
2024
2025 pr_warning("\n**********************************************************\n");
2026 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2027 pr_warning("** **\n");
2028 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2029 pr_warning("** **\n");
2030 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2031 pr_warning("** unsafe for produciton use. **\n");
2032 pr_warning("** **\n");
2033 pr_warning("** If you see this message and you are not debugging **\n");
2034 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2035 pr_warning("** **\n");
2036 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2037 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002038
Steven Rostedtb382ede62012-10-10 21:44:34 -04002039 /* Expand the buffers to set size */
2040 tracing_update_buffers();
2041
Steven Rostedt07d777f2011-09-22 14:01:55 -04002042 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002043
2044 /*
2045 * trace_printk_init_buffers() can be called by modules.
2046 * If that happens, then we need to start cmdline recording
2047 * directly here. If the global_trace.buffer is already
2048 * allocated here, then this was called by module code.
2049 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002050 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002051 tracing_start_cmdline_record();
2052}
2053
2054void trace_printk_start_comm(void)
2055{
2056 /* Start tracing comms if trace printk is set */
2057 if (!buffers_allocated)
2058 return;
2059 tracing_start_cmdline_record();
2060}
2061
2062static void trace_printk_start_stop_comm(int enabled)
2063{
2064 if (!buffers_allocated)
2065 return;
2066
2067 if (enabled)
2068 tracing_start_cmdline_record();
2069 else
2070 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002071}
2072
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002073/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002074 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002075 *
2076 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002077int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002078{
Tom Zanussie1112b42009-03-31 00:48:49 -05002079 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002080 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002081 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002082 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002083 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002084 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002085 char *tbuffer;
2086 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002087
2088 if (unlikely(tracing_selftest_running || tracing_disabled))
2089 return 0;
2090
2091 /* Don't pollute graph traces with trace_vprintk internals */
2092 pause_graph_tracing();
2093
2094 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002095 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002096
Steven Rostedt07d777f2011-09-22 14:01:55 -04002097 tbuffer = get_trace_buf();
2098 if (!tbuffer) {
2099 len = 0;
2100 goto out;
2101 }
2102
2103 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2104
2105 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002106 goto out;
2107
Steven Rostedt07d777f2011-09-22 14:01:55 -04002108 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002109 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002110 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002111 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2112 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002113 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002114 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002115 entry = ring_buffer_event_data(event);
2116 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002117 entry->fmt = fmt;
2118
Steven Rostedt07d777f2011-09-22 14:01:55 -04002119 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002120 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002121 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002122 ftrace_trace_stack(buffer, flags, 6, pc);
2123 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002124
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002125out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002126 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002127 unpause_graph_tracing();
2128
2129 return len;
2130}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002131EXPORT_SYMBOL_GPL(trace_vbprintk);
2132
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002133static int
2134__trace_array_vprintk(struct ring_buffer *buffer,
2135 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002136{
Tom Zanussie1112b42009-03-31 00:48:49 -05002137 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002138 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002139 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002140 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002141 unsigned long flags;
2142 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002143
2144 if (tracing_disabled || tracing_selftest_running)
2145 return 0;
2146
Steven Rostedt07d777f2011-09-22 14:01:55 -04002147 /* Don't pollute graph traces with trace_vprintk internals */
2148 pause_graph_tracing();
2149
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002150 pc = preempt_count();
2151 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002152
Steven Rostedt07d777f2011-09-22 14:01:55 -04002153
2154 tbuffer = get_trace_buf();
2155 if (!tbuffer) {
2156 len = 0;
2157 goto out;
2158 }
2159
2160 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2161 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002162 goto out;
2163
Steven Rostedt07d777f2011-09-22 14:01:55 -04002164 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002165 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002166 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002167 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002168 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002169 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002170 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002171 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002172
Steven Rostedt07d777f2011-09-22 14:01:55 -04002173 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002174 entry->buf[len] = '\0';
Tom Zanussif306cc82013-10-24 08:34:17 -05002175 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002176 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002177 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002178 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002179 out:
2180 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002181 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002182
2183 return len;
2184}
Steven Rostedt659372d2009-09-03 19:11:07 -04002185
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002186int trace_array_vprintk(struct trace_array *tr,
2187 unsigned long ip, const char *fmt, va_list args)
2188{
2189 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2190}
2191
2192int trace_array_printk(struct trace_array *tr,
2193 unsigned long ip, const char *fmt, ...)
2194{
2195 int ret;
2196 va_list ap;
2197
2198 if (!(trace_flags & TRACE_ITER_PRINTK))
2199 return 0;
2200
2201 va_start(ap, fmt);
2202 ret = trace_array_vprintk(tr, ip, fmt, ap);
2203 va_end(ap);
2204 return ret;
2205}
2206
2207int trace_array_printk_buf(struct ring_buffer *buffer,
2208 unsigned long ip, const char *fmt, ...)
2209{
2210 int ret;
2211 va_list ap;
2212
2213 if (!(trace_flags & TRACE_ITER_PRINTK))
2214 return 0;
2215
2216 va_start(ap, fmt);
2217 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2218 va_end(ap);
2219 return ret;
2220}
2221
Steven Rostedt659372d2009-09-03 19:11:07 -04002222int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2223{
Steven Rostedta813a152009-10-09 01:41:35 -04002224 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002225}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002226EXPORT_SYMBOL_GPL(trace_vprintk);
2227
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002228static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002229{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002230 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2231
Steven Rostedt5a90f572008-09-03 17:42:51 -04002232 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002233 if (buf_iter)
2234 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002235}
2236
Ingo Molnare309b412008-05-12 21:20:51 +02002237static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002238peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2239 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002240{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002241 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002242 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002243
Steven Rostedtd7690412008-10-01 00:29:53 -04002244 if (buf_iter)
2245 event = ring_buffer_iter_peek(buf_iter, ts);
2246 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002247 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002248 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002249
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002250 if (event) {
2251 iter->ent_size = ring_buffer_event_length(event);
2252 return ring_buffer_event_data(event);
2253 }
2254 iter->ent_size = 0;
2255 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002256}
Steven Rostedtd7690412008-10-01 00:29:53 -04002257
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002258static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002259__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2260 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002261{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002262 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002263 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002264 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002265 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002266 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002267 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002268 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002269 int cpu;
2270
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002271 /*
2272 * If we are in a per_cpu trace file, don't bother by iterating over
2273 * all cpu and peek directly.
2274 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002275 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002276 if (ring_buffer_empty_cpu(buffer, cpu_file))
2277 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002278 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002279 if (ent_cpu)
2280 *ent_cpu = cpu_file;
2281
2282 return ent;
2283 }
2284
Steven Rostedtab464282008-05-12 21:21:00 +02002285 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002286
2287 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002288 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002289
Steven Rostedtbc21b472010-03-31 19:49:26 -04002290 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002291
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002292 /*
2293 * Pick the entry with the smallest timestamp:
2294 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002295 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002296 next = ent;
2297 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002298 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002299 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002300 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002301 }
2302 }
2303
Steven Rostedt12b5da32012-03-27 10:43:28 -04002304 iter->ent_size = next_size;
2305
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002306 if (ent_cpu)
2307 *ent_cpu = next_cpu;
2308
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002309 if (ent_ts)
2310 *ent_ts = next_ts;
2311
Steven Rostedtbc21b472010-03-31 19:49:26 -04002312 if (missing_events)
2313 *missing_events = next_lost;
2314
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002315 return next;
2316}
2317
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002318/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002319struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2320 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002321{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002322 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002323}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002324
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002325/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002326void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002327{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002328 iter->ent = __find_next_entry(iter, &iter->cpu,
2329 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002330
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002331 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002332 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002333
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002334 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002335}
2336
Ingo Molnare309b412008-05-12 21:20:51 +02002337static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002338{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002339 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002340 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002341}
2342
Ingo Molnare309b412008-05-12 21:20:51 +02002343static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002344{
2345 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002346 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002347 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002348
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002349 WARN_ON_ONCE(iter->leftover);
2350
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002351 (*pos)++;
2352
2353 /* can't go backwards */
2354 if (iter->idx > i)
2355 return NULL;
2356
2357 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002358 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002359 else
2360 ent = iter;
2361
2362 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002363 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002364
2365 iter->pos = *pos;
2366
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002367 return ent;
2368}
2369
Jason Wessel955b61e2010-08-05 09:22:23 -05002370void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002371{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002372 struct ring_buffer_event *event;
2373 struct ring_buffer_iter *buf_iter;
2374 unsigned long entries = 0;
2375 u64 ts;
2376
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002377 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002378
Steven Rostedt6d158a82012-06-27 20:46:14 -04002379 buf_iter = trace_buffer_iter(iter, cpu);
2380 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002381 return;
2382
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002383 ring_buffer_iter_reset(buf_iter);
2384
2385 /*
2386 * We could have the case with the max latency tracers
2387 * that a reset never took place on a cpu. This is evident
2388 * by the timestamp being before the start of the buffer.
2389 */
2390 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002391 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002392 break;
2393 entries++;
2394 ring_buffer_read(buf_iter, NULL);
2395 }
2396
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002397 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002398}
2399
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002400/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002401 * The current tracer is copied to avoid a global locking
2402 * all around.
2403 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002404static void *s_start(struct seq_file *m, loff_t *pos)
2405{
2406 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002407 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002408 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002409 void *p = NULL;
2410 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002411 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002412
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002413 /*
2414 * copy the tracer to avoid using a global lock all around.
2415 * iter->trace is a copy of current_trace, the pointer to the
2416 * name may be used instead of a strcmp(), as iter->trace->name
2417 * will point to the same string as current_trace->name.
2418 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002419 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002420 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2421 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002422 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002423
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002424#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002425 if (iter->snapshot && iter->trace->use_max_tr)
2426 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002427#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002428
2429 if (!iter->snapshot)
2430 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002431
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002432 if (*pos != iter->pos) {
2433 iter->ent = NULL;
2434 iter->cpu = 0;
2435 iter->idx = -1;
2436
Steven Rostedtae3b5092013-01-23 15:22:59 -05002437 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002438 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002439 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002440 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002441 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002442
Lai Jiangshanac91d852010-03-02 17:54:50 +08002443 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002444 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2445 ;
2446
2447 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002448 /*
2449 * If we overflowed the seq_file before, then we want
2450 * to just reuse the trace_seq buffer again.
2451 */
2452 if (iter->leftover)
2453 p = iter;
2454 else {
2455 l = *pos - 1;
2456 p = s_next(m, p, &l);
2457 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002458 }
2459
Lai Jiangshan4f535962009-05-18 19:35:34 +08002460 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002461 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002462 return p;
2463}
2464
2465static void s_stop(struct seq_file *m, void *p)
2466{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002467 struct trace_iterator *iter = m->private;
2468
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002469#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002470 if (iter->snapshot && iter->trace->use_max_tr)
2471 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002472#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002473
2474 if (!iter->snapshot)
2475 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002476
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002477 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002478 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002479}
2480
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002481static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002482get_total_entries(struct trace_buffer *buf,
2483 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002484{
2485 unsigned long count;
2486 int cpu;
2487
2488 *total = 0;
2489 *entries = 0;
2490
2491 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002492 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002493 /*
2494 * If this buffer has skipped entries, then we hold all
2495 * entries for the trace and we need to ignore the
2496 * ones before the time stamp.
2497 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002498 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2499 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002500 /* total is the same as the entries */
2501 *total += count;
2502 } else
2503 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002504 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002505 *entries += count;
2506 }
2507}
2508
Ingo Molnare309b412008-05-12 21:20:51 +02002509static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002510{
Michael Ellermana6168352008-08-20 16:36:11 -07002511 seq_puts(m, "# _------=> CPU# \n");
2512 seq_puts(m, "# / _-----=> irqs-off \n");
2513 seq_puts(m, "# | / _----=> need-resched \n");
2514 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2515 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002516 seq_puts(m, "# |||| / delay \n");
2517 seq_puts(m, "# cmd pid ||||| time | caller \n");
2518 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002519}
2520
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002521static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002522{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002523 unsigned long total;
2524 unsigned long entries;
2525
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002526 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002527 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2528 entries, total, num_online_cpus());
2529 seq_puts(m, "#\n");
2530}
2531
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002532static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002533{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002534 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002535 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002536 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002537}
2538
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002539static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002540{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002541 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002542 seq_puts(m, "# _-----=> irqs-off\n");
2543 seq_puts(m, "# / _----=> need-resched\n");
2544 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2545 seq_puts(m, "# || / _--=> preempt-depth\n");
2546 seq_puts(m, "# ||| / delay\n");
2547 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2548 seq_puts(m, "# | | | |||| | |\n");
2549}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002550
Jiri Olsa62b915f2010-04-02 19:01:22 +02002551void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002552print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2553{
2554 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002555 struct trace_buffer *buf = iter->trace_buffer;
2556 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002557 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002558 unsigned long entries;
2559 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002560 const char *name = "preemption";
2561
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002562 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002563
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002564 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002565
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002566 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002567 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002568 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002569 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002570 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002571 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002572 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002573 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002574 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002575 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002576#if defined(CONFIG_PREEMPT_NONE)
2577 "server",
2578#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2579 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002580#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002581 "preempt",
2582#else
2583 "unknown",
2584#endif
2585 /* These are reserved for later use */
2586 0, 0, 0, 0);
2587#ifdef CONFIG_SMP
2588 seq_printf(m, " #P:%d)\n", num_online_cpus());
2589#else
2590 seq_puts(m, ")\n");
2591#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002592 seq_puts(m, "# -----------------\n");
2593 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002594 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002595 data->comm, data->pid,
2596 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002597 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002598 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002599
2600 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002601 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002602 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2603 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002604 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002605 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2606 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002607 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002608 }
2609
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002610 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002611}
2612
Steven Rostedta3097202008-11-07 22:36:02 -05002613static void test_cpu_buff_start(struct trace_iterator *iter)
2614{
2615 struct trace_seq *s = &iter->seq;
2616
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002617 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2618 return;
2619
2620 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2621 return;
2622
Rusty Russell44623442009-01-01 10:12:23 +10302623 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002624 return;
2625
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002626 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002627 return;
2628
Rusty Russell44623442009-01-01 10:12:23 +10302629 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002630
2631 /* Don't print started cpu buffer for the first entry of the trace */
2632 if (iter->idx > 1)
2633 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2634 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002635}
2636
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002637static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002638{
Steven Rostedt214023c2008-05-12 21:20:46 +02002639 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002640 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002641 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002642 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002643
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002644 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002645
Steven Rostedta3097202008-11-07 22:36:02 -05002646 test_cpu_buff_start(iter);
2647
Steven Rostedtf633cef2008-12-23 23:24:13 -05002648 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002649
2650 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002651 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2652 if (!trace_print_lat_context(iter))
2653 goto partial;
2654 } else {
2655 if (!trace_print_context(iter))
2656 goto partial;
2657 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002658 }
2659
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002660 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002661 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002662
2663 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2664 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002665
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002666 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002667partial:
2668 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002669}
2670
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002671static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002672{
2673 struct trace_seq *s = &iter->seq;
2674 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002675 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002676
2677 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002678
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002679 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002680 if (!trace_seq_printf(s, "%d %d %llu ",
2681 entry->pid, iter->cpu, iter->ts))
2682 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002683 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002684
Steven Rostedtf633cef2008-12-23 23:24:13 -05002685 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002686 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002687 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002688
2689 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2690 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002691
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002692 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002693partial:
2694 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002695}
2696
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002697static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002698{
2699 struct trace_seq *s = &iter->seq;
2700 unsigned char newline = '\n';
2701 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002702 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002703
2704 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002705
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002706 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2707 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2708 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2709 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2710 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002711
Steven Rostedtf633cef2008-12-23 23:24:13 -05002712 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002713 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002714 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002715 if (ret != TRACE_TYPE_HANDLED)
2716 return ret;
2717 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002718
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002719 SEQ_PUT_FIELD_RET(s, newline);
2720
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002721 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002722}
2723
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002724static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002725{
2726 struct trace_seq *s = &iter->seq;
2727 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002728 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002729
2730 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002731
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002732 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2733 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002734 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002735 SEQ_PUT_FIELD_RET(s, iter->ts);
2736 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002737
Steven Rostedtf633cef2008-12-23 23:24:13 -05002738 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002739 return event ? event->funcs->binary(iter, 0, event) :
2740 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002741}
2742
Jiri Olsa62b915f2010-04-02 19:01:22 +02002743int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002744{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002745 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002746 int cpu;
2747
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002748 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002749 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002750 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002751 buf_iter = trace_buffer_iter(iter, cpu);
2752 if (buf_iter) {
2753 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002754 return 0;
2755 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002756 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002757 return 0;
2758 }
2759 return 1;
2760 }
2761
Steven Rostedtab464282008-05-12 21:21:00 +02002762 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002763 buf_iter = trace_buffer_iter(iter, cpu);
2764 if (buf_iter) {
2765 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002766 return 0;
2767 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002768 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002769 return 0;
2770 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002771 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002772
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002773 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002774}
2775
Lai Jiangshan4f535962009-05-18 19:35:34 +08002776/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002777enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002778{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002779 enum print_line_t ret;
2780
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002781 if (iter->lost_events &&
2782 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2783 iter->cpu, iter->lost_events))
2784 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002785
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002786 if (iter->trace && iter->trace->print_line) {
2787 ret = iter->trace->print_line(iter);
2788 if (ret != TRACE_TYPE_UNHANDLED)
2789 return ret;
2790 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002791
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002792 if (iter->ent->type == TRACE_BPUTS &&
2793 trace_flags & TRACE_ITER_PRINTK &&
2794 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2795 return trace_print_bputs_msg_only(iter);
2796
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002797 if (iter->ent->type == TRACE_BPRINT &&
2798 trace_flags & TRACE_ITER_PRINTK &&
2799 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002800 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002801
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002802 if (iter->ent->type == TRACE_PRINT &&
2803 trace_flags & TRACE_ITER_PRINTK &&
2804 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002805 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002806
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002807 if (trace_flags & TRACE_ITER_BIN)
2808 return print_bin_fmt(iter);
2809
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002810 if (trace_flags & TRACE_ITER_HEX)
2811 return print_hex_fmt(iter);
2812
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002813 if (trace_flags & TRACE_ITER_RAW)
2814 return print_raw_fmt(iter);
2815
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002816 return print_trace_fmt(iter);
2817}
2818
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002819void trace_latency_header(struct seq_file *m)
2820{
2821 struct trace_iterator *iter = m->private;
2822
2823 /* print nothing if the buffers are empty */
2824 if (trace_empty(iter))
2825 return;
2826
2827 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2828 print_trace_header(m, iter);
2829
2830 if (!(trace_flags & TRACE_ITER_VERBOSE))
2831 print_lat_help_header(m);
2832}
2833
Jiri Olsa62b915f2010-04-02 19:01:22 +02002834void trace_default_header(struct seq_file *m)
2835{
2836 struct trace_iterator *iter = m->private;
2837
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002838 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2839 return;
2840
Jiri Olsa62b915f2010-04-02 19:01:22 +02002841 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2842 /* print nothing if the buffers are empty */
2843 if (trace_empty(iter))
2844 return;
2845 print_trace_header(m, iter);
2846 if (!(trace_flags & TRACE_ITER_VERBOSE))
2847 print_lat_help_header(m);
2848 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002849 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2850 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002851 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002852 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002853 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002854 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002855 }
2856}
2857
Steven Rostedte0a413f2011-09-29 21:26:16 -04002858static void test_ftrace_alive(struct seq_file *m)
2859{
2860 if (!ftrace_is_dead())
2861 return;
2862 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2863 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2864}
2865
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002866#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002867static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002868{
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002869 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2870 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2871 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
Wang YanQingb9be6d02013-09-14 12:59:16 +08002872 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002873 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2874 seq_printf(m, "# is not a '0' or '1')\n");
2875}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002876
2877static void show_snapshot_percpu_help(struct seq_file *m)
2878{
2879 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2880#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2881 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2882 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2883#else
2884 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2885 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2886#endif
2887 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2888 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2889 seq_printf(m, "# is not a '0' or '1')\n");
2890}
2891
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002892static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2893{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002894 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002895 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2896 else
2897 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2898
2899 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002900 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2901 show_snapshot_main_help(m);
2902 else
2903 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002904}
2905#else
2906/* Should never be called */
2907static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2908#endif
2909
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002910static int s_show(struct seq_file *m, void *v)
2911{
2912 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002913 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002914
2915 if (iter->ent == NULL) {
2916 if (iter->tr) {
2917 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2918 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002919 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002920 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002921 if (iter->snapshot && trace_empty(iter))
2922 print_snapshot_help(m, iter);
2923 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002924 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002925 else
2926 trace_default_header(m);
2927
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002928 } else if (iter->leftover) {
2929 /*
2930 * If we filled the seq_file buffer earlier, we
2931 * want to just show it now.
2932 */
2933 ret = trace_print_seq(m, &iter->seq);
2934
2935 /* ret should this time be zero, but you never know */
2936 iter->leftover = ret;
2937
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002938 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002939 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002940 ret = trace_print_seq(m, &iter->seq);
2941 /*
2942 * If we overflow the seq_file buffer, then it will
2943 * ask us for this data again at start up.
2944 * Use that instead.
2945 * ret is 0 if seq_file write succeeded.
2946 * -1 otherwise.
2947 */
2948 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002949 }
2950
2951 return 0;
2952}
2953
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002954/*
2955 * Should be used after trace_array_get(), trace_types_lock
2956 * ensures that i_cdev was already initialized.
2957 */
2958static inline int tracing_get_cpu(struct inode *inode)
2959{
2960 if (inode->i_cdev) /* See trace_create_cpu_file() */
2961 return (long)inode->i_cdev - 1;
2962 return RING_BUFFER_ALL_CPUS;
2963}
2964
James Morris88e9d342009-09-22 16:43:43 -07002965static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002966 .start = s_start,
2967 .next = s_next,
2968 .stop = s_stop,
2969 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002970};
2971
Ingo Molnare309b412008-05-12 21:20:51 +02002972static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02002973__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002974{
Oleg Nesterov6484c712013-07-23 17:26:10 +02002975 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002976 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002977 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002978
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002979 if (tracing_disabled)
2980 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002981
Jiri Olsa50e18b92012-04-25 10:23:39 +02002982 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002983 if (!iter)
2984 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002985
Steven Rostedt6d158a82012-06-27 20:46:14 -04002986 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2987 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002988 if (!iter->buffer_iter)
2989 goto release;
2990
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002991 /*
2992 * We make a copy of the current tracer to avoid concurrent
2993 * changes on it while we are reading.
2994 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002995 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002996 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002997 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002998 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002999
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003000 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003001
Li Zefan79f55992009-06-15 14:58:26 +08003002 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003003 goto fail;
3004
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003005 iter->tr = tr;
3006
3007#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003008 /* Currently only the top directory has a snapshot */
3009 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003010 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003011 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003012#endif
3013 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003014 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003015 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003016 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003017 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003018
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003019 /* Notify the tracer early; before we stop tracing. */
3020 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003021 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003022
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003023 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003024 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003025 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3026
David Sharp8be07092012-11-13 12:18:22 -08003027 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003028 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003029 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3030
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003031 /* stop the trace while dumping if we are not opening "snapshot" */
3032 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003033 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003034
Steven Rostedtae3b5092013-01-23 15:22:59 -05003035 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003036 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003037 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003038 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003039 }
3040 ring_buffer_read_prepare_sync();
3041 for_each_tracing_cpu(cpu) {
3042 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003043 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003044 }
3045 } else {
3046 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003047 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003048 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003049 ring_buffer_read_prepare_sync();
3050 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003051 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003052 }
3053
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003054 mutex_unlock(&trace_types_lock);
3055
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003056 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003057
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003058 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003059 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003060 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003061 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003062release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003063 seq_release_private(inode, file);
3064 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003065}
3066
3067int tracing_open_generic(struct inode *inode, struct file *filp)
3068{
Steven Rostedt60a11772008-05-12 21:20:44 +02003069 if (tracing_disabled)
3070 return -ENODEV;
3071
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003072 filp->private_data = inode->i_private;
3073 return 0;
3074}
3075
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003076bool tracing_is_disabled(void)
3077{
3078 return (tracing_disabled) ? true: false;
3079}
3080
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003081/*
3082 * Open and update trace_array ref count.
3083 * Must have the current trace_array passed to it.
3084 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003085static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003086{
3087 struct trace_array *tr = inode->i_private;
3088
3089 if (tracing_disabled)
3090 return -ENODEV;
3091
3092 if (trace_array_get(tr) < 0)
3093 return -ENODEV;
3094
3095 filp->private_data = inode->i_private;
3096
3097 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003098}
3099
Hannes Eder4fd27352009-02-10 19:44:12 +01003100static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003101{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003102 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003103 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003104 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003105 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003106
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003107 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003108 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003109 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003110 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003111
Oleg Nesterov6484c712013-07-23 17:26:10 +02003112 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003113 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003114 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003115
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003116 for_each_tracing_cpu(cpu) {
3117 if (iter->buffer_iter[cpu])
3118 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3119 }
3120
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003121 if (iter->trace && iter->trace->close)
3122 iter->trace->close(iter);
3123
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003124 if (!iter->snapshot)
3125 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003126 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003127
3128 __trace_array_put(tr);
3129
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003130 mutex_unlock(&trace_types_lock);
3131
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003132 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003133 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003134 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003135 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003136 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003137
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003138 return 0;
3139}
3140
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003141static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3142{
3143 struct trace_array *tr = inode->i_private;
3144
3145 trace_array_put(tr);
3146 return 0;
3147}
3148
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003149static int tracing_single_release_tr(struct inode *inode, struct file *file)
3150{
3151 struct trace_array *tr = inode->i_private;
3152
3153 trace_array_put(tr);
3154
3155 return single_release(inode, file);
3156}
3157
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003158static int tracing_open(struct inode *inode, struct file *file)
3159{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003160 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003161 struct trace_iterator *iter;
3162 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003163
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003164 if (trace_array_get(tr) < 0)
3165 return -ENODEV;
3166
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003167 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003168 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3169 int cpu = tracing_get_cpu(inode);
3170
3171 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003172 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003173 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003174 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003175 }
3176
3177 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003178 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003179 if (IS_ERR(iter))
3180 ret = PTR_ERR(iter);
3181 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3182 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3183 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003184
3185 if (ret < 0)
3186 trace_array_put(tr);
3187
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003188 return ret;
3189}
3190
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003191/*
3192 * Some tracers are not suitable for instance buffers.
3193 * A tracer is always available for the global array (toplevel)
3194 * or if it explicitly states that it is.
3195 */
3196static bool
3197trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3198{
3199 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3200}
3201
3202/* Find the next tracer that this trace array may use */
3203static struct tracer *
3204get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3205{
3206 while (t && !trace_ok_for_array(t, tr))
3207 t = t->next;
3208
3209 return t;
3210}
3211
Ingo Molnare309b412008-05-12 21:20:51 +02003212static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003213t_next(struct seq_file *m, void *v, loff_t *pos)
3214{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003215 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003216 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003217
3218 (*pos)++;
3219
3220 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003221 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003222
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003223 return t;
3224}
3225
3226static void *t_start(struct seq_file *m, loff_t *pos)
3227{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003228 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003229 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003230 loff_t l = 0;
3231
3232 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003233
3234 t = get_tracer_for_array(tr, trace_types);
3235 for (; t && l < *pos; t = t_next(m, t, &l))
3236 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003237
3238 return t;
3239}
3240
3241static void t_stop(struct seq_file *m, void *p)
3242{
3243 mutex_unlock(&trace_types_lock);
3244}
3245
3246static int t_show(struct seq_file *m, void *v)
3247{
3248 struct tracer *t = v;
3249
3250 if (!t)
3251 return 0;
3252
3253 seq_printf(m, "%s", t->name);
3254 if (t->next)
3255 seq_putc(m, ' ');
3256 else
3257 seq_putc(m, '\n');
3258
3259 return 0;
3260}
3261
James Morris88e9d342009-09-22 16:43:43 -07003262static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003263 .start = t_start,
3264 .next = t_next,
3265 .stop = t_stop,
3266 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003267};
3268
3269static int show_traces_open(struct inode *inode, struct file *file)
3270{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003271 struct trace_array *tr = inode->i_private;
3272 struct seq_file *m;
3273 int ret;
3274
Steven Rostedt60a11772008-05-12 21:20:44 +02003275 if (tracing_disabled)
3276 return -ENODEV;
3277
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003278 ret = seq_open(file, &show_traces_seq_ops);
3279 if (ret)
3280 return ret;
3281
3282 m = file->private_data;
3283 m->private = tr;
3284
3285 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003286}
3287
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003288static ssize_t
3289tracing_write_stub(struct file *filp, const char __user *ubuf,
3290 size_t count, loff_t *ppos)
3291{
3292 return count;
3293}
3294
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003295loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003296{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003297 int ret;
3298
Slava Pestov364829b2010-11-24 15:13:16 -08003299 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003300 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003301 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003302 file->f_pos = ret = 0;
3303
3304 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003305}
3306
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003307static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003308 .open = tracing_open,
3309 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003310 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003311 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003312 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003313};
3314
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003315static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003316 .open = show_traces_open,
3317 .read = seq_read,
3318 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003319 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003320};
3321
Ingo Molnar36dfe922008-05-12 21:20:52 +02003322/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003323 * The tracer itself will not take this lock, but still we want
3324 * to provide a consistent cpumask to user-space:
3325 */
3326static DEFINE_MUTEX(tracing_cpumask_update_lock);
3327
3328/*
3329 * Temporary storage for the character representation of the
3330 * CPU bitmask (and one more byte for the newline):
3331 */
3332static char mask_str[NR_CPUS + 1];
3333
Ingo Molnarc7078de2008-05-12 21:20:52 +02003334static ssize_t
3335tracing_cpumask_read(struct file *filp, char __user *ubuf,
3336 size_t count, loff_t *ppos)
3337{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003338 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003339 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003340
3341 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003342
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003343 len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003344 if (count - len < 2) {
3345 count = -EINVAL;
3346 goto out_err;
3347 }
3348 len += sprintf(mask_str + len, "\n");
3349 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3350
3351out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003352 mutex_unlock(&tracing_cpumask_update_lock);
3353
3354 return count;
3355}
3356
3357static ssize_t
3358tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3359 size_t count, loff_t *ppos)
3360{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003361 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303362 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003363 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303364
3365 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3366 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003367
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303368 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003369 if (err)
3370 goto err_unlock;
3371
Li Zefan215368e2009-06-15 10:56:42 +08003372 mutex_lock(&tracing_cpumask_update_lock);
3373
Steven Rostedta5e25882008-12-02 15:34:05 -05003374 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003375 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003376 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003377 /*
3378 * Increase/decrease the disabled counter if we are
3379 * about to flip a bit in the cpumask:
3380 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003381 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303382 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003383 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3384 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003385 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003386 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303387 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003388 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3389 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003390 }
3391 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003392 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003393 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003394
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003395 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003396
Ingo Molnarc7078de2008-05-12 21:20:52 +02003397 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303398 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003399
Ingo Molnarc7078de2008-05-12 21:20:52 +02003400 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003401
3402err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003403 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003404
3405 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003406}
3407
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003408static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003409 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003410 .read = tracing_cpumask_read,
3411 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003412 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003413 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003414};
3415
Li Zefanfdb372e2009-12-08 11:15:59 +08003416static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003417{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003418 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003419 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003420 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003421 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003422
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003423 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003424 tracer_flags = tr->current_trace->flags->val;
3425 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003426
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003427 for (i = 0; trace_options[i]; i++) {
3428 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003429 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003430 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003431 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003432 }
3433
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003434 for (i = 0; trace_opts[i].name; i++) {
3435 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003436 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003437 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003438 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003439 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003440 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003441
Li Zefanfdb372e2009-12-08 11:15:59 +08003442 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003443}
3444
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003445static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003446 struct tracer_flags *tracer_flags,
3447 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003448{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003449 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003450 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003451
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003452 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003453 if (ret)
3454 return ret;
3455
3456 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003457 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003458 else
Zhaolei77708412009-08-07 18:53:21 +08003459 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003460 return 0;
3461}
3462
Li Zefan8d18eaa2009-12-08 11:17:06 +08003463/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003464static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003465{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003466 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003467 struct tracer_flags *tracer_flags = trace->flags;
3468 struct tracer_opt *opts = NULL;
3469 int i;
3470
3471 for (i = 0; tracer_flags->opts[i].name; i++) {
3472 opts = &tracer_flags->opts[i];
3473
3474 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003475 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003476 }
3477
3478 return -EINVAL;
3479}
3480
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003481/* Some tracers require overwrite to stay enabled */
3482int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3483{
3484 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3485 return -1;
3486
3487 return 0;
3488}
3489
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003490int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003491{
3492 /* do nothing if flag is already set */
3493 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003494 return 0;
3495
3496 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003497 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003498 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003499 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003500
3501 if (enabled)
3502 trace_flags |= mask;
3503 else
3504 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003505
3506 if (mask == TRACE_ITER_RECORD_CMD)
3507 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003508
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003509 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003510 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003511#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003512 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003513#endif
3514 }
Steven Rostedt81698832012-10-11 10:15:05 -04003515
3516 if (mask == TRACE_ITER_PRINTK)
3517 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003518
3519 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003520}
3521
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003522static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003523{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003524 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003525 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003526 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003527 int i;
3528
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003529 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003530
Li Zefan8d18eaa2009-12-08 11:17:06 +08003531 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003532 neg = 1;
3533 cmp += 2;
3534 }
3535
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003536 mutex_lock(&trace_types_lock);
3537
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003538 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003539 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003540 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003541 break;
3542 }
3543 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003544
3545 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003546 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003547 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003548
3549 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003550
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003551 return ret;
3552}
3553
3554static ssize_t
3555tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3556 size_t cnt, loff_t *ppos)
3557{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003558 struct seq_file *m = filp->private_data;
3559 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003560 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003561 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003562
3563 if (cnt >= sizeof(buf))
3564 return -EINVAL;
3565
3566 if (copy_from_user(&buf, ubuf, cnt))
3567 return -EFAULT;
3568
Steven Rostedta8dd2172013-01-09 20:54:17 -05003569 buf[cnt] = 0;
3570
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003571 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003572 if (ret < 0)
3573 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003574
Jiri Olsacf8517c2009-10-23 19:36:16 -04003575 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003576
3577 return cnt;
3578}
3579
Li Zefanfdb372e2009-12-08 11:15:59 +08003580static int tracing_trace_options_open(struct inode *inode, struct file *file)
3581{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003582 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003583 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003584
Li Zefanfdb372e2009-12-08 11:15:59 +08003585 if (tracing_disabled)
3586 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003587
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003588 if (trace_array_get(tr) < 0)
3589 return -ENODEV;
3590
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003591 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3592 if (ret < 0)
3593 trace_array_put(tr);
3594
3595 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003596}
3597
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003598static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003599 .open = tracing_trace_options_open,
3600 .read = seq_read,
3601 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003602 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003603 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003604};
3605
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003606static const char readme_msg[] =
3607 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003608 "# echo 0 > tracing_on : quick way to disable tracing\n"
3609 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3610 " Important files:\n"
3611 " trace\t\t\t- The static contents of the buffer\n"
3612 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3613 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3614 " current_tracer\t- function and latency tracers\n"
3615 " available_tracers\t- list of configured tracers for current_tracer\n"
3616 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3617 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3618 " trace_clock\t\t-change the clock used to order events\n"
3619 " local: Per cpu clock but may not be synced across CPUs\n"
3620 " global: Synced across CPUs but slows tracing down.\n"
3621 " counter: Not a clock, but just an increment\n"
3622 " uptime: Jiffy counter from time of boot\n"
3623 " perf: Same clock that perf events use\n"
3624#ifdef CONFIG_X86_64
3625 " x86-tsc: TSC cycle counter\n"
3626#endif
3627 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3628 " tracing_cpumask\t- Limit which CPUs to trace\n"
3629 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3630 "\t\t\t Remove sub-buffer with rmdir\n"
3631 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003632 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3633 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003634 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003635#ifdef CONFIG_DYNAMIC_FTRACE
3636 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003637 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3638 "\t\t\t functions\n"
3639 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3640 "\t modules: Can select a group via module\n"
3641 "\t Format: :mod:<module-name>\n"
3642 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3643 "\t triggers: a command to perform when function is hit\n"
3644 "\t Format: <function>:<trigger>[:count]\n"
3645 "\t trigger: traceon, traceoff\n"
3646 "\t\t enable_event:<system>:<event>\n"
3647 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003648#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003649 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003650#endif
3651#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003652 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003653#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003654 "\t\t dump\n"
3655 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003656 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3657 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3658 "\t The first one will disable tracing every time do_fault is hit\n"
3659 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3660 "\t The first time do trap is hit and it disables tracing, the\n"
3661 "\t counter will decrement to 2. If tracing is already disabled,\n"
3662 "\t the counter will not decrement. It only decrements when the\n"
3663 "\t trigger did work\n"
3664 "\t To remove trigger without count:\n"
3665 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3666 "\t To remove trigger with a count:\n"
3667 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003668 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003669 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3670 "\t modules: Can select a group via module command :mod:\n"
3671 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003672#endif /* CONFIG_DYNAMIC_FTRACE */
3673#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003674 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3675 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003676#endif
3677#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3678 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003679 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003680 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3681#endif
3682#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003683 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3684 "\t\t\t snapshot buffer. Read the contents for more\n"
3685 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003686#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003687#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003688 " stack_trace\t\t- Shows the max stack trace when active\n"
3689 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003690 "\t\t\t Write into this file to reset the max size (trigger a\n"
3691 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003692#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003693 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3694 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003695#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003696#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003697 " events/\t\t- Directory containing all trace event subsystems:\n"
3698 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3699 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003700 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3701 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003702 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003703 " events/<system>/<event>/\t- Directory containing control files for\n"
3704 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003705 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3706 " filter\t\t- If set, only events passing filter are traced\n"
3707 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003708 "\t Format: <trigger>[:count][if <filter>]\n"
3709 "\t trigger: traceon, traceoff\n"
3710 "\t enable_event:<system>:<event>\n"
3711 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003712#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003713 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003714#endif
3715#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003716 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003717#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003718 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3719 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3720 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3721 "\t events/block/block_unplug/trigger\n"
3722 "\t The first disables tracing every time block_unplug is hit.\n"
3723 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3724 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3725 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3726 "\t Like function triggers, the counter is only decremented if it\n"
3727 "\t enabled or disabled tracing.\n"
3728 "\t To remove a trigger without a count:\n"
3729 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3730 "\t To remove a trigger with a count:\n"
3731 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3732 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003733;
3734
3735static ssize_t
3736tracing_readme_read(struct file *filp, char __user *ubuf,
3737 size_t cnt, loff_t *ppos)
3738{
3739 return simple_read_from_buffer(ubuf, cnt, ppos,
3740 readme_msg, strlen(readme_msg));
3741}
3742
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003743static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003744 .open = tracing_open_generic,
3745 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003746 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003747};
3748
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003749static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003750{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003751 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003752
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003753 if (*pos || m->count)
3754 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003755
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003756 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003757
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003758 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3759 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003760 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003761 continue;
3762
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003763 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003764 }
3765
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003766 return NULL;
3767}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003768
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003769static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3770{
3771 void *v;
3772 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003773
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003774 preempt_disable();
3775 arch_spin_lock(&trace_cmdline_lock);
3776
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003777 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003778 while (l <= *pos) {
3779 v = saved_cmdlines_next(m, v, &l);
3780 if (!v)
3781 return NULL;
3782 }
3783
3784 return v;
3785}
3786
3787static void saved_cmdlines_stop(struct seq_file *m, void *v)
3788{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003789 arch_spin_unlock(&trace_cmdline_lock);
3790 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003791}
3792
3793static int saved_cmdlines_show(struct seq_file *m, void *v)
3794{
3795 char buf[TASK_COMM_LEN];
3796 unsigned int *pid = v;
3797
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003798 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003799 seq_printf(m, "%d %s\n", *pid, buf);
3800 return 0;
3801}
3802
3803static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3804 .start = saved_cmdlines_start,
3805 .next = saved_cmdlines_next,
3806 .stop = saved_cmdlines_stop,
3807 .show = saved_cmdlines_show,
3808};
3809
3810static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3811{
3812 if (tracing_disabled)
3813 return -ENODEV;
3814
3815 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003816}
3817
3818static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003819 .open = tracing_saved_cmdlines_open,
3820 .read = seq_read,
3821 .llseek = seq_lseek,
3822 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003823};
3824
3825static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003826tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3827 size_t cnt, loff_t *ppos)
3828{
3829 char buf[64];
3830 int r;
3831
3832 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003833 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003834 arch_spin_unlock(&trace_cmdline_lock);
3835
3836 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3837}
3838
3839static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3840{
3841 kfree(s->saved_cmdlines);
3842 kfree(s->map_cmdline_to_pid);
3843 kfree(s);
3844}
3845
3846static int tracing_resize_saved_cmdlines(unsigned int val)
3847{
3848 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3849
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003850 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003851 if (!s)
3852 return -ENOMEM;
3853
3854 if (allocate_cmdlines_buffer(val, s) < 0) {
3855 kfree(s);
3856 return -ENOMEM;
3857 }
3858
3859 arch_spin_lock(&trace_cmdline_lock);
3860 savedcmd_temp = savedcmd;
3861 savedcmd = s;
3862 arch_spin_unlock(&trace_cmdline_lock);
3863 free_saved_cmdlines_buffer(savedcmd_temp);
3864
3865 return 0;
3866}
3867
3868static ssize_t
3869tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3870 size_t cnt, loff_t *ppos)
3871{
3872 unsigned long val;
3873 int ret;
3874
3875 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3876 if (ret)
3877 return ret;
3878
3879 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3880 if (!val || val > PID_MAX_DEFAULT)
3881 return -EINVAL;
3882
3883 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3884 if (ret < 0)
3885 return ret;
3886
3887 *ppos += cnt;
3888
3889 return cnt;
3890}
3891
3892static const struct file_operations tracing_saved_cmdlines_size_fops = {
3893 .open = tracing_open_generic,
3894 .read = tracing_saved_cmdlines_size_read,
3895 .write = tracing_saved_cmdlines_size_write,
3896};
3897
3898static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003899tracing_set_trace_read(struct file *filp, char __user *ubuf,
3900 size_t cnt, loff_t *ppos)
3901{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003902 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003903 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003904 int r;
3905
3906 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003907 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003908 mutex_unlock(&trace_types_lock);
3909
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003910 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003911}
3912
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003913int tracer_init(struct tracer *t, struct trace_array *tr)
3914{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003915 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003916 return t->init(tr);
3917}
3918
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003919static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003920{
3921 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003922
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003923 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003924 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003925}
3926
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003927#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003928/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003929static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3930 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003931{
3932 int cpu, ret = 0;
3933
3934 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3935 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003936 ret = ring_buffer_resize(trace_buf->buffer,
3937 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003938 if (ret < 0)
3939 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003940 per_cpu_ptr(trace_buf->data, cpu)->entries =
3941 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003942 }
3943 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003944 ret = ring_buffer_resize(trace_buf->buffer,
3945 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003946 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003947 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3948 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003949 }
3950
3951 return ret;
3952}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003953#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003954
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003955static int __tracing_resize_ring_buffer(struct trace_array *tr,
3956 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003957{
3958 int ret;
3959
3960 /*
3961 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003962 * we use the size that was given, and we can forget about
3963 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003964 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05003965 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04003966
Steven Rostedtb382ede62012-10-10 21:44:34 -04003967 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003968 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003969 return 0;
3970
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003971 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003972 if (ret < 0)
3973 return ret;
3974
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003975#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003976 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3977 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003978 goto out;
3979
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003980 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003981 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003982 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3983 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003984 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003985 /*
3986 * AARGH! We are left with different
3987 * size max buffer!!!!
3988 * The max buffer is our "snapshot" buffer.
3989 * When a tracer needs a snapshot (one of the
3990 * latency tracers), it swaps the max buffer
3991 * with the saved snap shot. We succeeded to
3992 * update the size of the main buffer, but failed to
3993 * update the size of the max buffer. But when we tried
3994 * to reset the main buffer to the original size, we
3995 * failed there too. This is very unlikely to
3996 * happen, but if it does, warn and kill all
3997 * tracing.
3998 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003999 WARN_ON(1);
4000 tracing_disabled = 1;
4001 }
4002 return ret;
4003 }
4004
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004005 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004006 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004007 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004008 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004009
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004010 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004011#endif /* CONFIG_TRACER_MAX_TRACE */
4012
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004013 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004014 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004015 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004016 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004017
4018 return ret;
4019}
4020
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004021static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4022 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004023{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004024 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004025
4026 mutex_lock(&trace_types_lock);
4027
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004028 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4029 /* make sure, this cpu is enabled in the mask */
4030 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4031 ret = -EINVAL;
4032 goto out;
4033 }
4034 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004035
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004036 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004037 if (ret < 0)
4038 ret = -ENOMEM;
4039
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004040out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004041 mutex_unlock(&trace_types_lock);
4042
4043 return ret;
4044}
4045
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004046
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004047/**
4048 * tracing_update_buffers - used by tracing facility to expand ring buffers
4049 *
4050 * To save on memory when the tracing is never used on a system with it
4051 * configured in. The ring buffers are set to a minimum size. But once
4052 * a user starts to use the tracing facility, then they need to grow
4053 * to their default size.
4054 *
4055 * This function is to be called when a tracer is about to be used.
4056 */
4057int tracing_update_buffers(void)
4058{
4059 int ret = 0;
4060
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004061 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004062 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004063 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004064 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004065 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004066
4067 return ret;
4068}
4069
Steven Rostedt577b7852009-02-26 23:43:05 -05004070struct trace_option_dentry;
4071
4072static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004073create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004074
4075static void
4076destroy_trace_option_files(struct trace_option_dentry *topts);
4077
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004078/*
4079 * Used to clear out the tracer before deletion of an instance.
4080 * Must have trace_types_lock held.
4081 */
4082static void tracing_set_nop(struct trace_array *tr)
4083{
4084 if (tr->current_trace == &nop_trace)
4085 return;
4086
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004087 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004088
4089 if (tr->current_trace->reset)
4090 tr->current_trace->reset(tr);
4091
4092 tr->current_trace = &nop_trace;
4093}
4094
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004095static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004096{
Steven Rostedt577b7852009-02-26 23:43:05 -05004097 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004098 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004099#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004100 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004101#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004102 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004103
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004104 mutex_lock(&trace_types_lock);
4105
Steven Rostedt73c51622009-03-11 13:42:01 -04004106 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004107 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004108 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004109 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004110 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004111 ret = 0;
4112 }
4113
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004114 for (t = trace_types; t; t = t->next) {
4115 if (strcmp(t->name, buf) == 0)
4116 break;
4117 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004118 if (!t) {
4119 ret = -EINVAL;
4120 goto out;
4121 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004122 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004123 goto out;
4124
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004125 /* Some tracers are only allowed for the top level buffer */
4126 if (!trace_ok_for_array(t, tr)) {
4127 ret = -EINVAL;
4128 goto out;
4129 }
4130
Steven Rostedt9f029e82008-11-12 15:24:24 -05004131 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004132
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004133 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004134
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004135 if (tr->current_trace->reset)
4136 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004137
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004138 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004139 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004140
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004141#ifdef CONFIG_TRACER_MAX_TRACE
4142 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004143
4144 if (had_max_tr && !t->use_max_tr) {
4145 /*
4146 * We need to make sure that the update_max_tr sees that
4147 * current_trace changed to nop_trace to keep it from
4148 * swapping the buffers after we resize it.
4149 * The update_max_tr is called from interrupts disabled
4150 * so a synchronized_sched() is sufficient.
4151 */
4152 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004153 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004154 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004155#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004156 /* Currently, only the top instance has options */
4157 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4158 destroy_trace_option_files(topts);
4159 topts = create_trace_option_files(tr, t);
4160 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004161
4162#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004163 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004164 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004165 if (ret < 0)
4166 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004167 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004168#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004169
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004170 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004171 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004172 if (ret)
4173 goto out;
4174 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004175
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004176 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004177 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004178 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004179 out:
4180 mutex_unlock(&trace_types_lock);
4181
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004182 return ret;
4183}
4184
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004185static ssize_t
4186tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4187 size_t cnt, loff_t *ppos)
4188{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004189 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004190 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004191 int i;
4192 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004193 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004194
Steven Rostedt60063a62008-10-28 10:44:24 -04004195 ret = cnt;
4196
Li Zefanee6c2c12009-09-18 14:06:47 +08004197 if (cnt > MAX_TRACER_SIZE)
4198 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004199
4200 if (copy_from_user(&buf, ubuf, cnt))
4201 return -EFAULT;
4202
4203 buf[cnt] = 0;
4204
4205 /* strip ending whitespace. */
4206 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4207 buf[i] = 0;
4208
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004209 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004210 if (err)
4211 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004212
Jiri Olsacf8517c2009-10-23 19:36:16 -04004213 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004214
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004215 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004216}
4217
4218static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004219tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4220 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004221{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004222 char buf[64];
4223 int r;
4224
Steven Rostedtcffae432008-05-12 21:21:00 +02004225 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004226 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004227 if (r > sizeof(buf))
4228 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004229 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004230}
4231
4232static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004233tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4234 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004235{
Hannes Eder5e398412009-02-10 19:44:34 +01004236 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004237 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004238
Peter Huewe22fe9b52011-06-07 21:58:27 +02004239 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4240 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004241 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004242
4243 *ptr = val * 1000;
4244
4245 return cnt;
4246}
4247
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004248static ssize_t
4249tracing_thresh_read(struct file *filp, char __user *ubuf,
4250 size_t cnt, loff_t *ppos)
4251{
4252 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4253}
4254
4255static ssize_t
4256tracing_thresh_write(struct file *filp, const char __user *ubuf,
4257 size_t cnt, loff_t *ppos)
4258{
4259 struct trace_array *tr = filp->private_data;
4260 int ret;
4261
4262 mutex_lock(&trace_types_lock);
4263 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4264 if (ret < 0)
4265 goto out;
4266
4267 if (tr->current_trace->update_thresh) {
4268 ret = tr->current_trace->update_thresh(tr);
4269 if (ret < 0)
4270 goto out;
4271 }
4272
4273 ret = cnt;
4274out:
4275 mutex_unlock(&trace_types_lock);
4276
4277 return ret;
4278}
4279
4280static ssize_t
4281tracing_max_lat_read(struct file *filp, char __user *ubuf,
4282 size_t cnt, loff_t *ppos)
4283{
4284 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4285}
4286
4287static ssize_t
4288tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4289 size_t cnt, loff_t *ppos)
4290{
4291 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4292}
4293
Steven Rostedtb3806b42008-05-12 21:20:46 +02004294static int tracing_open_pipe(struct inode *inode, struct file *filp)
4295{
Oleg Nesterov15544202013-07-23 17:25:57 +02004296 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004297 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004298 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004299
4300 if (tracing_disabled)
4301 return -ENODEV;
4302
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004303 if (trace_array_get(tr) < 0)
4304 return -ENODEV;
4305
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004306 mutex_lock(&trace_types_lock);
4307
Steven Rostedtb3806b42008-05-12 21:20:46 +02004308 /* create a buffer to store the information to pass to userspace */
4309 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004310 if (!iter) {
4311 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004312 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004313 goto out;
4314 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004315
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004316 /*
4317 * We make a copy of the current tracer to avoid concurrent
4318 * changes on it while we are reading.
4319 */
4320 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
4321 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004322 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004323 goto fail;
4324 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004325 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004326
4327 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4328 ret = -ENOMEM;
4329 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304330 }
4331
Steven Rostedta3097202008-11-07 22:36:02 -05004332 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304333 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004334
Steven Rostedt112f38a72009-06-01 15:16:05 -04004335 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4336 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4337
David Sharp8be07092012-11-13 12:18:22 -08004338 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004339 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004340 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4341
Oleg Nesterov15544202013-07-23 17:25:57 +02004342 iter->tr = tr;
4343 iter->trace_buffer = &tr->trace_buffer;
4344 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004345 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004346 filp->private_data = iter;
4347
Steven Rostedt107bad82008-05-12 21:21:01 +02004348 if (iter->trace->pipe_open)
4349 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004350
Arnd Bergmannb4447862010-07-07 23:40:11 +02004351 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004352out:
4353 mutex_unlock(&trace_types_lock);
4354 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004355
4356fail:
4357 kfree(iter->trace);
4358 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004359 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004360 mutex_unlock(&trace_types_lock);
4361 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004362}
4363
4364static int tracing_release_pipe(struct inode *inode, struct file *file)
4365{
4366 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004367 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004368
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004369 mutex_lock(&trace_types_lock);
4370
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004371 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004372 iter->trace->pipe_close(iter);
4373
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004374 mutex_unlock(&trace_types_lock);
4375
Rusty Russell44623442009-01-01 10:12:23 +10304376 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004377 mutex_destroy(&iter->mutex);
4378 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004379 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004380
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004381 trace_array_put(tr);
4382
Steven Rostedtb3806b42008-05-12 21:20:46 +02004383 return 0;
4384}
4385
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004386static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004387trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004388{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004389 /* Iterators are static, they should be filled or empty */
4390 if (trace_buffer_iter(iter, iter->cpu_file))
4391 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004392
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004393 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004394 /*
4395 * Always select as readable when in blocking mode
4396 */
4397 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004398 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004399 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004400 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004401}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004402
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004403static unsigned int
4404tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4405{
4406 struct trace_iterator *iter = filp->private_data;
4407
4408 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004409}
4410
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004411/* Must be called with trace_types_lock mutex held. */
4412static int tracing_wait_pipe(struct file *filp)
4413{
4414 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004415 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004416
4417 while (trace_empty(iter)) {
4418
4419 if ((filp->f_flags & O_NONBLOCK)) {
4420 return -EAGAIN;
4421 }
4422
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004423 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004424 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004425 * We still block if tracing is disabled, but we have never
4426 * read anything. This allows a user to cat this file, and
4427 * then enable tracing. But after we have read something,
4428 * we give an EOF when tracing is again disabled.
4429 *
4430 * iter->pos will be 0 if we haven't read anything.
4431 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004432 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004433 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004434
4435 mutex_unlock(&iter->mutex);
4436
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004437 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004438
4439 mutex_lock(&iter->mutex);
4440
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004441 if (ret)
4442 return ret;
4443
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004444 if (signal_pending(current))
4445 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004446 }
4447
4448 return 1;
4449}
4450
Steven Rostedtb3806b42008-05-12 21:20:46 +02004451/*
4452 * Consumer reader.
4453 */
4454static ssize_t
4455tracing_read_pipe(struct file *filp, char __user *ubuf,
4456 size_t cnt, loff_t *ppos)
4457{
4458 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004459 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004460 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004461
4462 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004463 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4464 if (sret != -EBUSY)
4465 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004466
Steven Rostedtf9520752009-03-02 14:04:40 -05004467 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004468
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004469 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02004470 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004471 if (unlikely(iter->trace->name != tr->current_trace->name))
4472 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004473 mutex_unlock(&trace_types_lock);
4474
4475 /*
4476 * Avoid more than one consumer on a single file descriptor
4477 * This is just a matter of traces coherency, the ring buffer itself
4478 * is protected.
4479 */
4480 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004481 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004482 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4483 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004484 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004485 }
4486
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004487waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004488 sret = tracing_wait_pipe(filp);
4489 if (sret <= 0)
4490 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004491
4492 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004493 if (trace_empty(iter)) {
4494 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004495 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004496 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004497
4498 if (cnt >= PAGE_SIZE)
4499 cnt = PAGE_SIZE - 1;
4500
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004501 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004502 memset(&iter->seq, 0,
4503 sizeof(struct trace_iterator) -
4504 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004505 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004506 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004507
Lai Jiangshan4f535962009-05-18 19:35:34 +08004508 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004509 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004510 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004511 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004512 int len = iter->seq.len;
4513
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004514 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004515 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004516 /* don't print partial lines */
4517 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004518 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004519 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004520 if (ret != TRACE_TYPE_NO_CONSUME)
4521 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004522
4523 if (iter->seq.len >= cnt)
4524 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004525
4526 /*
4527 * Setting the full flag means we reached the trace_seq buffer
4528 * size and we should leave by partial output condition above.
4529 * One of the trace_seq_* functions is not used properly.
4530 */
4531 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4532 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004533 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004534 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004535 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004536
Steven Rostedtb3806b42008-05-12 21:20:46 +02004537 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004538 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4539 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05004540 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004541
4542 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004543 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004544 * entries, go back to wait for more entries.
4545 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004546 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004547 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004548
Steven Rostedt107bad82008-05-12 21:21:01 +02004549out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004550 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004551
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004552 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004553}
4554
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004555static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4556 unsigned int idx)
4557{
4558 __free_page(spd->pages[idx]);
4559}
4560
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004561static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004562 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004563 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004564 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004565 .steal = generic_pipe_buf_steal,
4566 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004567};
4568
Steven Rostedt34cd4992009-02-09 12:06:29 -05004569static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004570tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004571{
4572 size_t count;
4573 int ret;
4574
4575 /* Seq buffer is page-sized, exactly what we need. */
4576 for (;;) {
4577 count = iter->seq.len;
4578 ret = print_trace_line(iter);
4579 count = iter->seq.len - count;
4580 if (rem < count) {
4581 rem = 0;
4582 iter->seq.len -= count;
4583 break;
4584 }
4585 if (ret == TRACE_TYPE_PARTIAL_LINE) {
4586 iter->seq.len -= count;
4587 break;
4588 }
4589
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004590 if (ret != TRACE_TYPE_NO_CONSUME)
4591 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004592 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004593 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004594 rem = 0;
4595 iter->ent = NULL;
4596 break;
4597 }
4598 }
4599
4600 return rem;
4601}
4602
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004603static ssize_t tracing_splice_read_pipe(struct file *filp,
4604 loff_t *ppos,
4605 struct pipe_inode_info *pipe,
4606 size_t len,
4607 unsigned int flags)
4608{
Jens Axboe35f3d142010-05-20 10:43:18 +02004609 struct page *pages_def[PIPE_DEF_BUFFERS];
4610 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004611 struct trace_iterator *iter = filp->private_data;
4612 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004613 .pages = pages_def,
4614 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004615 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004616 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004617 .flags = flags,
4618 .ops = &tracing_pipe_buf_ops,
4619 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004620 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004621 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004622 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004623 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004624 unsigned int i;
4625
Jens Axboe35f3d142010-05-20 10:43:18 +02004626 if (splice_grow_spd(pipe, &spd))
4627 return -ENOMEM;
4628
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004629 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004630 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004631 if (unlikely(iter->trace->name != tr->current_trace->name))
4632 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004633 mutex_unlock(&trace_types_lock);
4634
4635 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004636
4637 if (iter->trace->splice_read) {
4638 ret = iter->trace->splice_read(iter, filp,
4639 ppos, pipe, len, flags);
4640 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004641 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004642 }
4643
4644 ret = tracing_wait_pipe(filp);
4645 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004646 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004647
Jason Wessel955b61e2010-08-05 09:22:23 -05004648 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004649 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004650 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004651 }
4652
Lai Jiangshan4f535962009-05-18 19:35:34 +08004653 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004654 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004655
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004656 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004657 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004658 spd.pages[i] = alloc_page(GFP_KERNEL);
4659 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004660 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004661
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004662 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004663
4664 /* Copy the data into the page, so we can start over. */
4665 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004666 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004667 iter->seq.len);
4668 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004669 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004670 break;
4671 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004672 spd.partial[i].offset = 0;
4673 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004674
Steven Rostedtf9520752009-03-02 14:04:40 -05004675 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004676 }
4677
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004678 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004679 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004680 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004681
4682 spd.nr_pages = i;
4683
Jens Axboe35f3d142010-05-20 10:43:18 +02004684 ret = splice_to_pipe(pipe, &spd);
4685out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004686 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004687 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004688
Steven Rostedt34cd4992009-02-09 12:06:29 -05004689out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004690 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004691 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004692}
4693
Steven Rostedta98a3c32008-05-12 21:20:59 +02004694static ssize_t
4695tracing_entries_read(struct file *filp, char __user *ubuf,
4696 size_t cnt, loff_t *ppos)
4697{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004698 struct inode *inode = file_inode(filp);
4699 struct trace_array *tr = inode->i_private;
4700 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004701 char buf[64];
4702 int r = 0;
4703 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004704
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004705 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004706
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004707 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004708 int cpu, buf_size_same;
4709 unsigned long size;
4710
4711 size = 0;
4712 buf_size_same = 1;
4713 /* check if all cpu sizes are same */
4714 for_each_tracing_cpu(cpu) {
4715 /* fill in the size from first enabled cpu */
4716 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004717 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4718 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004719 buf_size_same = 0;
4720 break;
4721 }
4722 }
4723
4724 if (buf_size_same) {
4725 if (!ring_buffer_expanded)
4726 r = sprintf(buf, "%lu (expanded: %lu)\n",
4727 size >> 10,
4728 trace_buf_size >> 10);
4729 else
4730 r = sprintf(buf, "%lu\n", size >> 10);
4731 } else
4732 r = sprintf(buf, "X\n");
4733 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004734 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004735
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004736 mutex_unlock(&trace_types_lock);
4737
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004738 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4739 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004740}
4741
4742static ssize_t
4743tracing_entries_write(struct file *filp, const char __user *ubuf,
4744 size_t cnt, loff_t *ppos)
4745{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004746 struct inode *inode = file_inode(filp);
4747 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004748 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004749 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004750
Peter Huewe22fe9b52011-06-07 21:58:27 +02004751 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4752 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004753 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004754
4755 /* must have at least 1 entry */
4756 if (!val)
4757 return -EINVAL;
4758
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004759 /* value is in KB */
4760 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004761 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004762 if (ret < 0)
4763 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004764
Jiri Olsacf8517c2009-10-23 19:36:16 -04004765 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004766
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004767 return cnt;
4768}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004769
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004770static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004771tracing_total_entries_read(struct file *filp, char __user *ubuf,
4772 size_t cnt, loff_t *ppos)
4773{
4774 struct trace_array *tr = filp->private_data;
4775 char buf[64];
4776 int r, cpu;
4777 unsigned long size = 0, expanded_size = 0;
4778
4779 mutex_lock(&trace_types_lock);
4780 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004781 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004782 if (!ring_buffer_expanded)
4783 expanded_size += trace_buf_size >> 10;
4784 }
4785 if (ring_buffer_expanded)
4786 r = sprintf(buf, "%lu\n", size);
4787 else
4788 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4789 mutex_unlock(&trace_types_lock);
4790
4791 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4792}
4793
4794static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004795tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4796 size_t cnt, loff_t *ppos)
4797{
4798 /*
4799 * There is no need to read what the user has written, this function
4800 * is just to make sure that there is no error when "echo" is used
4801 */
4802
4803 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004804
4805 return cnt;
4806}
4807
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004808static int
4809tracing_free_buffer_release(struct inode *inode, struct file *filp)
4810{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004811 struct trace_array *tr = inode->i_private;
4812
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004813 /* disable tracing ? */
4814 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07004815 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004816 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004817 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004818
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004819 trace_array_put(tr);
4820
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004821 return 0;
4822}
4823
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004824static ssize_t
4825tracing_mark_write(struct file *filp, const char __user *ubuf,
4826 size_t cnt, loff_t *fpos)
4827{
Steven Rostedtd696b582011-09-22 11:50:27 -04004828 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07004829 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04004830 struct ring_buffer_event *event;
4831 struct ring_buffer *buffer;
4832 struct print_entry *entry;
4833 unsigned long irq_flags;
4834 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004835 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004836 int nr_pages = 1;
4837 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004838 int offset;
4839 int size;
4840 int len;
4841 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004842 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004843
Steven Rostedtc76f0692008-11-07 22:36:02 -05004844 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004845 return -EINVAL;
4846
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004847 if (!(trace_flags & TRACE_ITER_MARKERS))
4848 return -EINVAL;
4849
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004850 if (cnt > TRACE_BUF_SIZE)
4851 cnt = TRACE_BUF_SIZE;
4852
Steven Rostedtd696b582011-09-22 11:50:27 -04004853 /*
4854 * Userspace is injecting traces into the kernel trace buffer.
4855 * We want to be as non intrusive as possible.
4856 * To do so, we do not want to allocate any special buffers
4857 * or take any locks, but instead write the userspace data
4858 * straight into the ring buffer.
4859 *
4860 * First we need to pin the userspace buffer into memory,
4861 * which, most likely it is, because it just referenced it.
4862 * But there's no guarantee that it is. By using get_user_pages_fast()
4863 * and kmap_atomic/kunmap_atomic() we can get access to the
4864 * pages directly. We then write the data directly into the
4865 * ring buffer.
4866 */
4867 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004868
Steven Rostedtd696b582011-09-22 11:50:27 -04004869 /* check if we cross pages */
4870 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4871 nr_pages = 2;
4872
4873 offset = addr & (PAGE_SIZE - 1);
4874 addr &= PAGE_MASK;
4875
4876 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4877 if (ret < nr_pages) {
4878 while (--ret >= 0)
4879 put_page(pages[ret]);
4880 written = -EFAULT;
4881 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004882 }
4883
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004884 for (i = 0; i < nr_pages; i++)
4885 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004886
4887 local_save_flags(irq_flags);
4888 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07004889 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004890 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4891 irq_flags, preempt_count());
4892 if (!event) {
4893 /* Ring buffer disabled, return as if not open for write */
4894 written = -EBADF;
4895 goto out_unlock;
4896 }
4897
4898 entry = ring_buffer_event_data(event);
4899 entry->ip = _THIS_IP_;
4900
4901 if (nr_pages == 2) {
4902 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004903 memcpy(&entry->buf, map_page[0] + offset, len);
4904 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004905 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004906 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004907
4908 if (entry->buf[cnt - 1] != '\n') {
4909 entry->buf[cnt] = '\n';
4910 entry->buf[cnt + 1] = '\0';
4911 } else
4912 entry->buf[cnt] = '\0';
4913
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004914 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004915
4916 written = cnt;
4917
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004918 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004919
Steven Rostedtd696b582011-09-22 11:50:27 -04004920 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004921 for (i = 0; i < nr_pages; i++){
4922 kunmap_atomic(map_page[i]);
4923 put_page(pages[i]);
4924 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004925 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004926 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004927}
4928
Li Zefan13f16d22009-12-08 11:16:11 +08004929static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004930{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004931 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004932 int i;
4933
4934 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004935 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004936 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004937 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4938 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004939 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004940
Li Zefan13f16d22009-12-08 11:16:11 +08004941 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004942}
4943
Steven Rostedte1e232c2014-02-10 23:38:46 -05004944static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08004945{
Zhaolei5079f322009-08-25 16:12:56 +08004946 int i;
4947
Zhaolei5079f322009-08-25 16:12:56 +08004948 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4949 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4950 break;
4951 }
4952 if (i == ARRAY_SIZE(trace_clocks))
4953 return -EINVAL;
4954
Zhaolei5079f322009-08-25 16:12:56 +08004955 mutex_lock(&trace_types_lock);
4956
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004957 tr->clock_id = i;
4958
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004959 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004960
David Sharp60303ed2012-10-11 16:27:52 -07004961 /*
4962 * New clock may not be consistent with the previous clock.
4963 * Reset the buffer so that it doesn't have incomparable timestamps.
4964 */
Alexander Z Lam94571582013-08-02 18:36:16 -07004965 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004966
4967#ifdef CONFIG_TRACER_MAX_TRACE
4968 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4969 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07004970 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004971#endif
David Sharp60303ed2012-10-11 16:27:52 -07004972
Zhaolei5079f322009-08-25 16:12:56 +08004973 mutex_unlock(&trace_types_lock);
4974
Steven Rostedte1e232c2014-02-10 23:38:46 -05004975 return 0;
4976}
4977
4978static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4979 size_t cnt, loff_t *fpos)
4980{
4981 struct seq_file *m = filp->private_data;
4982 struct trace_array *tr = m->private;
4983 char buf[64];
4984 const char *clockstr;
4985 int ret;
4986
4987 if (cnt >= sizeof(buf))
4988 return -EINVAL;
4989
4990 if (copy_from_user(&buf, ubuf, cnt))
4991 return -EFAULT;
4992
4993 buf[cnt] = 0;
4994
4995 clockstr = strstrip(buf);
4996
4997 ret = tracing_set_clock(tr, clockstr);
4998 if (ret)
4999 return ret;
5000
Zhaolei5079f322009-08-25 16:12:56 +08005001 *fpos += cnt;
5002
5003 return cnt;
5004}
5005
Li Zefan13f16d22009-12-08 11:16:11 +08005006static int tracing_clock_open(struct inode *inode, struct file *file)
5007{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005008 struct trace_array *tr = inode->i_private;
5009 int ret;
5010
Li Zefan13f16d22009-12-08 11:16:11 +08005011 if (tracing_disabled)
5012 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005013
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005014 if (trace_array_get(tr))
5015 return -ENODEV;
5016
5017 ret = single_open(file, tracing_clock_show, inode->i_private);
5018 if (ret < 0)
5019 trace_array_put(tr);
5020
5021 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005022}
5023
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005024struct ftrace_buffer_info {
5025 struct trace_iterator iter;
5026 void *spare;
5027 unsigned int read;
5028};
5029
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005030#ifdef CONFIG_TRACER_SNAPSHOT
5031static int tracing_snapshot_open(struct inode *inode, struct file *file)
5032{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005033 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005034 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005035 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005036 int ret = 0;
5037
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005038 if (trace_array_get(tr) < 0)
5039 return -ENODEV;
5040
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005041 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005042 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005043 if (IS_ERR(iter))
5044 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005045 } else {
5046 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005047 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005048 m = kzalloc(sizeof(*m), GFP_KERNEL);
5049 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005050 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005051 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5052 if (!iter) {
5053 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005054 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005055 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005056 ret = 0;
5057
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005058 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005059 iter->trace_buffer = &tr->max_buffer;
5060 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005061 m->private = iter;
5062 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005063 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005064out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005065 if (ret < 0)
5066 trace_array_put(tr);
5067
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005068 return ret;
5069}
5070
5071static ssize_t
5072tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5073 loff_t *ppos)
5074{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005075 struct seq_file *m = filp->private_data;
5076 struct trace_iterator *iter = m->private;
5077 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005078 unsigned long val;
5079 int ret;
5080
5081 ret = tracing_update_buffers();
5082 if (ret < 0)
5083 return ret;
5084
5085 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5086 if (ret)
5087 return ret;
5088
5089 mutex_lock(&trace_types_lock);
5090
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005091 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005092 ret = -EBUSY;
5093 goto out;
5094 }
5095
5096 switch (val) {
5097 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005098 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5099 ret = -EINVAL;
5100 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005101 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005102 if (tr->allocated_snapshot)
5103 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005104 break;
5105 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005106/* Only allow per-cpu swap if the ring buffer supports it */
5107#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5108 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5109 ret = -EINVAL;
5110 break;
5111 }
5112#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005113 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005114 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005115 if (ret < 0)
5116 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005117 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005118 local_irq_disable();
5119 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005120 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005121 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005122 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005123 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005124 local_irq_enable();
5125 break;
5126 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005127 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005128 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5129 tracing_reset_online_cpus(&tr->max_buffer);
5130 else
5131 tracing_reset(&tr->max_buffer, iter->cpu_file);
5132 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005133 break;
5134 }
5135
5136 if (ret >= 0) {
5137 *ppos += cnt;
5138 ret = cnt;
5139 }
5140out:
5141 mutex_unlock(&trace_types_lock);
5142 return ret;
5143}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005144
5145static int tracing_snapshot_release(struct inode *inode, struct file *file)
5146{
5147 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005148 int ret;
5149
5150 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005151
5152 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005153 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005154
5155 /* If write only, the seq_file is just a stub */
5156 if (m)
5157 kfree(m->private);
5158 kfree(m);
5159
5160 return 0;
5161}
5162
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005163static int tracing_buffers_open(struct inode *inode, struct file *filp);
5164static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5165 size_t count, loff_t *ppos);
5166static int tracing_buffers_release(struct inode *inode, struct file *file);
5167static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5168 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5169
5170static int snapshot_raw_open(struct inode *inode, struct file *filp)
5171{
5172 struct ftrace_buffer_info *info;
5173 int ret;
5174
5175 ret = tracing_buffers_open(inode, filp);
5176 if (ret < 0)
5177 return ret;
5178
5179 info = filp->private_data;
5180
5181 if (info->iter.trace->use_max_tr) {
5182 tracing_buffers_release(inode, filp);
5183 return -EBUSY;
5184 }
5185
5186 info->iter.snapshot = true;
5187 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5188
5189 return ret;
5190}
5191
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005192#endif /* CONFIG_TRACER_SNAPSHOT */
5193
5194
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005195static const struct file_operations tracing_thresh_fops = {
5196 .open = tracing_open_generic,
5197 .read = tracing_thresh_read,
5198 .write = tracing_thresh_write,
5199 .llseek = generic_file_llseek,
5200};
5201
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005202static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005203 .open = tracing_open_generic,
5204 .read = tracing_max_lat_read,
5205 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005206 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005207};
5208
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005209static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005210 .open = tracing_open_generic,
5211 .read = tracing_set_trace_read,
5212 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005213 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005214};
5215
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005216static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005217 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005218 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005219 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005220 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005221 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005222 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005223};
5224
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005225static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005226 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005227 .read = tracing_entries_read,
5228 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005229 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005230 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005231};
5232
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005233static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005234 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005235 .read = tracing_total_entries_read,
5236 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005237 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005238};
5239
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005240static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005241 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005242 .write = tracing_free_buffer_write,
5243 .release = tracing_free_buffer_release,
5244};
5245
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005246static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005247 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005248 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005249 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005250 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005251};
5252
Zhaolei5079f322009-08-25 16:12:56 +08005253static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005254 .open = tracing_clock_open,
5255 .read = seq_read,
5256 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005257 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005258 .write = tracing_clock_write,
5259};
5260
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005261#ifdef CONFIG_TRACER_SNAPSHOT
5262static const struct file_operations snapshot_fops = {
5263 .open = tracing_snapshot_open,
5264 .read = seq_read,
5265 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005266 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005267 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005268};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005269
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005270static const struct file_operations snapshot_raw_fops = {
5271 .open = snapshot_raw_open,
5272 .read = tracing_buffers_read,
5273 .release = tracing_buffers_release,
5274 .splice_read = tracing_buffers_splice_read,
5275 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005276};
5277
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005278#endif /* CONFIG_TRACER_SNAPSHOT */
5279
Steven Rostedt2cadf912008-12-01 22:20:19 -05005280static int tracing_buffers_open(struct inode *inode, struct file *filp)
5281{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005282 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005283 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005284 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005285
5286 if (tracing_disabled)
5287 return -ENODEV;
5288
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005289 if (trace_array_get(tr) < 0)
5290 return -ENODEV;
5291
Steven Rostedt2cadf912008-12-01 22:20:19 -05005292 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005293 if (!info) {
5294 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005295 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005296 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005297
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005298 mutex_lock(&trace_types_lock);
5299
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005300 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005301 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005302 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005303 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005304 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005305 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005306 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005307
5308 filp->private_data = info;
5309
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005310 mutex_unlock(&trace_types_lock);
5311
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005312 ret = nonseekable_open(inode, filp);
5313 if (ret < 0)
5314 trace_array_put(tr);
5315
5316 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005317}
5318
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005319static unsigned int
5320tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5321{
5322 struct ftrace_buffer_info *info = filp->private_data;
5323 struct trace_iterator *iter = &info->iter;
5324
5325 return trace_poll(iter, filp, poll_table);
5326}
5327
Steven Rostedt2cadf912008-12-01 22:20:19 -05005328static ssize_t
5329tracing_buffers_read(struct file *filp, char __user *ubuf,
5330 size_t count, loff_t *ppos)
5331{
5332 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005333 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005334 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005335 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005336
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005337 if (!count)
5338 return 0;
5339
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005340 mutex_lock(&trace_types_lock);
5341
5342#ifdef CONFIG_TRACER_MAX_TRACE
5343 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5344 size = -EBUSY;
5345 goto out_unlock;
5346 }
5347#endif
5348
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005349 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005350 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5351 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005352 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005353 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005354 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005355
Steven Rostedt2cadf912008-12-01 22:20:19 -05005356 /* Do we have previous read data to read? */
5357 if (info->read < PAGE_SIZE)
5358 goto read;
5359
Steven Rostedtb6273442013-02-28 13:44:11 -05005360 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005361 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005362 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005363 &info->spare,
5364 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005365 iter->cpu_file, 0);
5366 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005367
5368 if (ret < 0) {
5369 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005370 if ((filp->f_flags & O_NONBLOCK)) {
5371 size = -EAGAIN;
5372 goto out_unlock;
5373 }
5374 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005375 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005376 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005377 if (ret) {
5378 size = ret;
5379 goto out_unlock;
5380 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005381 if (signal_pending(current)) {
5382 size = -EINTR;
5383 goto out_unlock;
5384 }
Steven Rostedtb6273442013-02-28 13:44:11 -05005385 goto again;
5386 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005387 size = 0;
5388 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05005389 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005390
Steven Rostedt436fc282011-10-14 10:44:25 -04005391 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005392 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005393 size = PAGE_SIZE - info->read;
5394 if (size > count)
5395 size = count;
5396
5397 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005398 if (ret == size) {
5399 size = -EFAULT;
5400 goto out_unlock;
5401 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005402 size -= ret;
5403
Steven Rostedt2cadf912008-12-01 22:20:19 -05005404 *ppos += size;
5405 info->read += size;
5406
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005407 out_unlock:
5408 mutex_unlock(&trace_types_lock);
5409
Steven Rostedt2cadf912008-12-01 22:20:19 -05005410 return size;
5411}
5412
5413static int tracing_buffers_release(struct inode *inode, struct file *file)
5414{
5415 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005416 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005417
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005418 mutex_lock(&trace_types_lock);
5419
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005420 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005421
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005422 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005423 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005424 kfree(info);
5425
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005426 mutex_unlock(&trace_types_lock);
5427
Steven Rostedt2cadf912008-12-01 22:20:19 -05005428 return 0;
5429}
5430
5431struct buffer_ref {
5432 struct ring_buffer *buffer;
5433 void *page;
5434 int ref;
5435};
5436
5437static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5438 struct pipe_buffer *buf)
5439{
5440 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5441
5442 if (--ref->ref)
5443 return;
5444
5445 ring_buffer_free_read_page(ref->buffer, ref->page);
5446 kfree(ref);
5447 buf->private = 0;
5448}
5449
Steven Rostedt2cadf912008-12-01 22:20:19 -05005450static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5451 struct pipe_buffer *buf)
5452{
5453 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5454
5455 ref->ref++;
5456}
5457
5458/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005459static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005460 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005461 .confirm = generic_pipe_buf_confirm,
5462 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005463 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005464 .get = buffer_pipe_buf_get,
5465};
5466
5467/*
5468 * Callback from splice_to_pipe(), if we need to release some pages
5469 * at the end of the spd in case we error'ed out in filling the pipe.
5470 */
5471static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5472{
5473 struct buffer_ref *ref =
5474 (struct buffer_ref *)spd->partial[i].private;
5475
5476 if (--ref->ref)
5477 return;
5478
5479 ring_buffer_free_read_page(ref->buffer, ref->page);
5480 kfree(ref);
5481 spd->partial[i].private = 0;
5482}
5483
5484static ssize_t
5485tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5486 struct pipe_inode_info *pipe, size_t len,
5487 unsigned int flags)
5488{
5489 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005490 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005491 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5492 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005493 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005494 .pages = pages_def,
5495 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005496 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005497 .flags = flags,
5498 .ops = &buffer_pipe_buf_ops,
5499 .spd_release = buffer_spd_release,
5500 };
5501 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005502 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005503 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005504
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005505 mutex_lock(&trace_types_lock);
5506
5507#ifdef CONFIG_TRACER_MAX_TRACE
5508 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
5509 ret = -EBUSY;
5510 goto out;
5511 }
5512#endif
5513
5514 if (splice_grow_spd(pipe, &spd)) {
5515 ret = -ENOMEM;
5516 goto out;
5517 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005518
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005519 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005520 ret = -EINVAL;
5521 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005522 }
5523
5524 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005525 if (len < PAGE_SIZE) {
5526 ret = -EINVAL;
5527 goto out;
5528 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005529 len &= PAGE_MASK;
5530 }
5531
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005532 again:
5533 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005534 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005535
Al Viroa786c062014-04-11 12:01:03 -04005536 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005537 struct page *page;
5538 int r;
5539
5540 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5541 if (!ref)
5542 break;
5543
Steven Rostedt7267fa62009-04-29 00:16:21 -04005544 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005545 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005546 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005547 if (!ref->page) {
5548 kfree(ref);
5549 break;
5550 }
5551
5552 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005553 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005554 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005555 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005556 kfree(ref);
5557 break;
5558 }
5559
5560 /*
5561 * zero out any left over data, this is going to
5562 * user land.
5563 */
5564 size = ring_buffer_page_len(ref->page);
5565 if (size < PAGE_SIZE)
5566 memset(ref->page + size, 0, PAGE_SIZE - size);
5567
5568 page = virt_to_page(ref->page);
5569
5570 spd.pages[i] = page;
5571 spd.partial[i].len = PAGE_SIZE;
5572 spd.partial[i].offset = 0;
5573 spd.partial[i].private = (unsigned long)ref;
5574 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005575 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005576
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005577 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005578 }
5579
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005580 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005581 spd.nr_pages = i;
5582
5583 /* did we read anything? */
5584 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005585 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005586 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005587 goto out;
5588 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005589 mutex_unlock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005590 ret = wait_on_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005591 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005592 if (ret)
5593 goto out;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005594 if (signal_pending(current)) {
5595 ret = -EINTR;
5596 goto out;
5597 }
5598 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005599 }
5600
5601 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005602 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005603out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005604 mutex_unlock(&trace_types_lock);
5605
Steven Rostedt2cadf912008-12-01 22:20:19 -05005606 return ret;
5607}
5608
5609static const struct file_operations tracing_buffers_fops = {
5610 .open = tracing_buffers_open,
5611 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005612 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005613 .release = tracing_buffers_release,
5614 .splice_read = tracing_buffers_splice_read,
5615 .llseek = no_llseek,
5616};
5617
Steven Rostedtc8d77182009-04-29 18:03:45 -04005618static ssize_t
5619tracing_stats_read(struct file *filp, char __user *ubuf,
5620 size_t count, loff_t *ppos)
5621{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005622 struct inode *inode = file_inode(filp);
5623 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005624 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005625 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005626 struct trace_seq *s;
5627 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005628 unsigned long long t;
5629 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005630
Li Zefane4f2d102009-06-15 10:57:28 +08005631 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005632 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005633 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005634
5635 trace_seq_init(s);
5636
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005637 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005638 trace_seq_printf(s, "entries: %ld\n", cnt);
5639
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005640 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005641 trace_seq_printf(s, "overrun: %ld\n", cnt);
5642
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005643 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005644 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5645
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005646 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005647 trace_seq_printf(s, "bytes: %ld\n", cnt);
5648
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005649 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005650 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005651 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005652 usec_rem = do_div(t, USEC_PER_SEC);
5653 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5654 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005655
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005656 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005657 usec_rem = do_div(t, USEC_PER_SEC);
5658 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5659 } else {
5660 /* counter or tsc mode for trace_clock */
5661 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005662 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005663
5664 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005665 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005666 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005667
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005668 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005669 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5670
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005671 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005672 trace_seq_printf(s, "read events: %ld\n", cnt);
5673
Steven Rostedtc8d77182009-04-29 18:03:45 -04005674 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
5675
5676 kfree(s);
5677
5678 return count;
5679}
5680
5681static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005682 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005683 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005684 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005685 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005686};
5687
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005688#ifdef CONFIG_DYNAMIC_FTRACE
5689
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005690int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005691{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005692 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005693}
5694
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005695static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005696tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005697 size_t cnt, loff_t *ppos)
5698{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005699 static char ftrace_dyn_info_buffer[1024];
5700 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005701 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005702 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005703 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005704 int r;
5705
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005706 mutex_lock(&dyn_info_mutex);
5707 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005708
Steven Rostedta26a2a22008-10-31 00:03:22 -04005709 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005710 buf[r++] = '\n';
5711
5712 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5713
5714 mutex_unlock(&dyn_info_mutex);
5715
5716 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005717}
5718
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005719static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005720 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005721 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005722 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005723};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005724#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005725
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005726#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5727static void
5728ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005729{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005730 tracing_snapshot();
5731}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005732
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005733static void
5734ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5735{
5736 unsigned long *count = (long *)data;
5737
5738 if (!*count)
5739 return;
5740
5741 if (*count != -1)
5742 (*count)--;
5743
5744 tracing_snapshot();
5745}
5746
5747static int
5748ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5749 struct ftrace_probe_ops *ops, void *data)
5750{
5751 long count = (long)data;
5752
5753 seq_printf(m, "%ps:", (void *)ip);
5754
5755 seq_printf(m, "snapshot");
5756
5757 if (count == -1)
5758 seq_printf(m, ":unlimited\n");
5759 else
5760 seq_printf(m, ":count=%ld\n", count);
5761
5762 return 0;
5763}
5764
5765static struct ftrace_probe_ops snapshot_probe_ops = {
5766 .func = ftrace_snapshot,
5767 .print = ftrace_snapshot_print,
5768};
5769
5770static struct ftrace_probe_ops snapshot_count_probe_ops = {
5771 .func = ftrace_count_snapshot,
5772 .print = ftrace_snapshot_print,
5773};
5774
5775static int
5776ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5777 char *glob, char *cmd, char *param, int enable)
5778{
5779 struct ftrace_probe_ops *ops;
5780 void *count = (void *)-1;
5781 char *number;
5782 int ret;
5783
5784 /* hash funcs only work with set_ftrace_filter */
5785 if (!enable)
5786 return -EINVAL;
5787
5788 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5789
5790 if (glob[0] == '!') {
5791 unregister_ftrace_function_probe_func(glob+1, ops);
5792 return 0;
5793 }
5794
5795 if (!param)
5796 goto out_reg;
5797
5798 number = strsep(&param, ":");
5799
5800 if (!strlen(number))
5801 goto out_reg;
5802
5803 /*
5804 * We use the callback data field (which is a pointer)
5805 * as our counter.
5806 */
5807 ret = kstrtoul(number, 0, (unsigned long *)&count);
5808 if (ret)
5809 return ret;
5810
5811 out_reg:
5812 ret = register_ftrace_function_probe(glob, ops, count);
5813
5814 if (ret >= 0)
5815 alloc_snapshot(&global_trace);
5816
5817 return ret < 0 ? ret : 0;
5818}
5819
5820static struct ftrace_func_command ftrace_snapshot_cmd = {
5821 .name = "snapshot",
5822 .func = ftrace_trace_snapshot_callback,
5823};
5824
Tom Zanussi38de93a2013-10-24 08:34:18 -05005825static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005826{
5827 return register_ftrace_command(&ftrace_snapshot_cmd);
5828}
5829#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05005830static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005831#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005832
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005833struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005834{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005835 if (tr->dir)
5836 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005837
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01005838 if (!debugfs_initialized())
5839 return NULL;
5840
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005841 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5842 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005843
zhangwei(Jovi)687c8782013-03-11 15:13:29 +08005844 if (!tr->dir)
5845 pr_warn_once("Could not create debugfs directory 'tracing'\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005846
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005847 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005848}
5849
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005850struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005851{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005852 return tracing_init_dentry_tr(&global_trace);
5853}
5854
5855static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5856{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005857 struct dentry *d_tracer;
5858
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005859 if (tr->percpu_dir)
5860 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005861
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005862 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005863 if (!d_tracer)
5864 return NULL;
5865
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005866 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005867
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005868 WARN_ONCE(!tr->percpu_dir,
5869 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005870
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005871 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005872}
5873
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005874static struct dentry *
5875trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
5876 void *data, long cpu, const struct file_operations *fops)
5877{
5878 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
5879
5880 if (ret) /* See tracing_get_cpu() */
5881 ret->d_inode->i_cdev = (void *)(cpu + 1);
5882 return ret;
5883}
5884
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005885static void
5886tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005887{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005888 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005889 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005890 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005891
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005892 if (!d_percpu)
5893 return;
5894
Steven Rostedtdd49a382010-10-20 21:51:26 -04005895 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005896 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5897 if (!d_cpu) {
5898 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5899 return;
5900 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005901
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005902 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005903 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02005904 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005905
5906 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005907 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005908 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005909
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005910 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005911 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005912
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005913 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005914 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005915
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005916 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005917 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005918
5919#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005920 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02005921 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005922
Oleg Nesterov649e9c72013-07-23 17:25:54 +02005923 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005924 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005925#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005926}
5927
Steven Rostedt60a11772008-05-12 21:20:44 +02005928#ifdef CONFIG_FTRACE_SELFTEST
5929/* Let selftest have access to static functions in this file */
5930#include "trace_selftest.c"
5931#endif
5932
Steven Rostedt577b7852009-02-26 23:43:05 -05005933struct trace_option_dentry {
5934 struct tracer_opt *opt;
5935 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005936 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005937 struct dentry *entry;
5938};
5939
5940static ssize_t
5941trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5942 loff_t *ppos)
5943{
5944 struct trace_option_dentry *topt = filp->private_data;
5945 char *buf;
5946
5947 if (topt->flags->val & topt->opt->bit)
5948 buf = "1\n";
5949 else
5950 buf = "0\n";
5951
5952 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5953}
5954
5955static ssize_t
5956trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5957 loff_t *ppos)
5958{
5959 struct trace_option_dentry *topt = filp->private_data;
5960 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005961 int ret;
5962
Peter Huewe22fe9b52011-06-07 21:58:27 +02005963 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5964 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005965 return ret;
5966
Li Zefan8d18eaa2009-12-08 11:17:06 +08005967 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005968 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005969
5970 if (!!(topt->flags->val & topt->opt->bit) != val) {
5971 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05005972 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005973 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005974 mutex_unlock(&trace_types_lock);
5975 if (ret)
5976 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005977 }
5978
5979 *ppos += cnt;
5980
5981 return cnt;
5982}
5983
5984
5985static const struct file_operations trace_options_fops = {
5986 .open = tracing_open_generic,
5987 .read = trace_options_read,
5988 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005989 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005990};
5991
Steven Rostedta8259072009-02-26 22:19:12 -05005992static ssize_t
5993trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5994 loff_t *ppos)
5995{
5996 long index = (long)filp->private_data;
5997 char *buf;
5998
5999 if (trace_flags & (1 << index))
6000 buf = "1\n";
6001 else
6002 buf = "0\n";
6003
6004 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6005}
6006
6007static ssize_t
6008trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6009 loff_t *ppos)
6010{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006011 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006012 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05006013 unsigned long val;
6014 int ret;
6015
Peter Huewe22fe9b52011-06-07 21:58:27 +02006016 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6017 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006018 return ret;
6019
Zhaoleif2d84b62009-08-07 18:55:48 +08006020 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006021 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006022
6023 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006024 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006025 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006026
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006027 if (ret < 0)
6028 return ret;
6029
Steven Rostedta8259072009-02-26 22:19:12 -05006030 *ppos += cnt;
6031
6032 return cnt;
6033}
6034
Steven Rostedta8259072009-02-26 22:19:12 -05006035static const struct file_operations trace_options_core_fops = {
6036 .open = tracing_open_generic,
6037 .read = trace_options_core_read,
6038 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006039 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006040};
6041
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006042struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006043 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006044 struct dentry *parent,
6045 void *data,
6046 const struct file_operations *fops)
6047{
6048 struct dentry *ret;
6049
6050 ret = debugfs_create_file(name, mode, parent, data, fops);
6051 if (!ret)
6052 pr_warning("Could not create debugfs '%s' entry\n", name);
6053
6054 return ret;
6055}
6056
6057
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006058static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006059{
6060 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006061
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006062 if (tr->options)
6063 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006064
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006065 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006066 if (!d_tracer)
6067 return NULL;
6068
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006069 tr->options = debugfs_create_dir("options", d_tracer);
6070 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006071 pr_warning("Could not create debugfs directory 'options'\n");
6072 return NULL;
6073 }
6074
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006075 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006076}
6077
Steven Rostedt577b7852009-02-26 23:43:05 -05006078static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006079create_trace_option_file(struct trace_array *tr,
6080 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006081 struct tracer_flags *flags,
6082 struct tracer_opt *opt)
6083{
6084 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006085
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006086 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006087 if (!t_options)
6088 return;
6089
6090 topt->flags = flags;
6091 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006092 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006093
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006094 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006095 &trace_options_fops);
6096
Steven Rostedt577b7852009-02-26 23:43:05 -05006097}
6098
6099static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006100create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006101{
6102 struct trace_option_dentry *topts;
6103 struct tracer_flags *flags;
6104 struct tracer_opt *opts;
6105 int cnt;
6106
6107 if (!tracer)
6108 return NULL;
6109
6110 flags = tracer->flags;
6111
6112 if (!flags || !flags->opts)
6113 return NULL;
6114
6115 opts = flags->opts;
6116
6117 for (cnt = 0; opts[cnt].name; cnt++)
6118 ;
6119
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006120 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006121 if (!topts)
6122 return NULL;
6123
6124 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006125 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006126 &opts[cnt]);
6127
6128 return topts;
6129}
6130
6131static void
6132destroy_trace_option_files(struct trace_option_dentry *topts)
6133{
6134 int cnt;
6135
6136 if (!topts)
6137 return;
6138
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006139 for (cnt = 0; topts[cnt].opt; cnt++)
6140 debugfs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006141
6142 kfree(topts);
6143}
6144
Steven Rostedta8259072009-02-26 22:19:12 -05006145static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006146create_trace_option_core_file(struct trace_array *tr,
6147 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006148{
6149 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006150
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006151 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006152 if (!t_options)
6153 return NULL;
6154
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006155 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006156 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006157}
6158
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006159static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006160{
6161 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006162 int i;
6163
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006164 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006165 if (!t_options)
6166 return;
6167
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006168 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006169 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006170}
6171
Steven Rostedt499e5472012-02-22 15:50:28 -05006172static ssize_t
6173rb_simple_read(struct file *filp, char __user *ubuf,
6174 size_t cnt, loff_t *ppos)
6175{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006176 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006177 char buf[64];
6178 int r;
6179
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006180 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006181 r = sprintf(buf, "%d\n", r);
6182
6183 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6184}
6185
6186static ssize_t
6187rb_simple_write(struct file *filp, const char __user *ubuf,
6188 size_t cnt, loff_t *ppos)
6189{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006190 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006191 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006192 unsigned long val;
6193 int ret;
6194
6195 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6196 if (ret)
6197 return ret;
6198
6199 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006200 mutex_lock(&trace_types_lock);
6201 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006202 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006203 if (tr->current_trace->start)
6204 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006205 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006206 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006207 if (tr->current_trace->stop)
6208 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006209 }
6210 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006211 }
6212
6213 (*ppos)++;
6214
6215 return cnt;
6216}
6217
6218static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006219 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006220 .read = rb_simple_read,
6221 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006222 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006223 .llseek = default_llseek,
6224};
6225
Steven Rostedt277ba042012-08-03 16:10:49 -04006226struct dentry *trace_instance_dir;
6227
6228static void
6229init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6230
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006231static int
6232allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006233{
6234 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006235
6236 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6237
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006238 buf->tr = tr;
6239
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006240 buf->buffer = ring_buffer_alloc(size, rb_flags);
6241 if (!buf->buffer)
6242 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006243
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006244 buf->data = alloc_percpu(struct trace_array_cpu);
6245 if (!buf->data) {
6246 ring_buffer_free(buf->buffer);
6247 return -ENOMEM;
6248 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006249
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006250 /* Allocate the first page for all buffers */
6251 set_buffer_entries(&tr->trace_buffer,
6252 ring_buffer_size(tr->trace_buffer.buffer, 0));
6253
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006254 return 0;
6255}
6256
6257static int allocate_trace_buffers(struct trace_array *tr, int size)
6258{
6259 int ret;
6260
6261 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6262 if (ret)
6263 return ret;
6264
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006265#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006266 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6267 allocate_snapshot ? size : 1);
6268 if (WARN_ON(ret)) {
6269 ring_buffer_free(tr->trace_buffer.buffer);
6270 free_percpu(tr->trace_buffer.data);
6271 return -ENOMEM;
6272 }
6273 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006274
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006275 /*
6276 * Only the top level trace array gets its snapshot allocated
6277 * from the kernel command line.
6278 */
6279 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006280#endif
6281 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006282}
6283
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006284static void free_trace_buffer(struct trace_buffer *buf)
6285{
6286 if (buf->buffer) {
6287 ring_buffer_free(buf->buffer);
6288 buf->buffer = NULL;
6289 free_percpu(buf->data);
6290 buf->data = NULL;
6291 }
6292}
6293
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006294static void free_trace_buffers(struct trace_array *tr)
6295{
6296 if (!tr)
6297 return;
6298
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006299 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006300
6301#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006302 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006303#endif
6304}
6305
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006306static int new_instance_create(const char *name)
6307{
Steven Rostedt277ba042012-08-03 16:10:49 -04006308 struct trace_array *tr;
6309 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006310
6311 mutex_lock(&trace_types_lock);
6312
6313 ret = -EEXIST;
6314 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6315 if (tr->name && strcmp(tr->name, name) == 0)
6316 goto out_unlock;
6317 }
6318
6319 ret = -ENOMEM;
6320 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6321 if (!tr)
6322 goto out_unlock;
6323
6324 tr->name = kstrdup(name, GFP_KERNEL);
6325 if (!tr->name)
6326 goto out_free_tr;
6327
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006328 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6329 goto out_free_tr;
6330
6331 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6332
Steven Rostedt277ba042012-08-03 16:10:49 -04006333 raw_spin_lock_init(&tr->start_lock);
6334
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006335 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6336
Steven Rostedt277ba042012-08-03 16:10:49 -04006337 tr->current_trace = &nop_trace;
6338
6339 INIT_LIST_HEAD(&tr->systems);
6340 INIT_LIST_HEAD(&tr->events);
6341
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006342 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006343 goto out_free_tr;
6344
Steven Rostedt277ba042012-08-03 16:10:49 -04006345 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6346 if (!tr->dir)
6347 goto out_free_tr;
6348
6349 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006350 if (ret) {
6351 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006352 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006353 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006354
6355 init_tracer_debugfs(tr, tr->dir);
6356
6357 list_add(&tr->list, &ftrace_trace_arrays);
6358
6359 mutex_unlock(&trace_types_lock);
6360
6361 return 0;
6362
6363 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006364 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006365 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006366 kfree(tr->name);
6367 kfree(tr);
6368
6369 out_unlock:
6370 mutex_unlock(&trace_types_lock);
6371
6372 return ret;
6373
6374}
6375
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006376static int instance_delete(const char *name)
6377{
6378 struct trace_array *tr;
6379 int found = 0;
6380 int ret;
6381
6382 mutex_lock(&trace_types_lock);
6383
6384 ret = -ENODEV;
6385 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6386 if (tr->name && strcmp(tr->name, name) == 0) {
6387 found = 1;
6388 break;
6389 }
6390 }
6391 if (!found)
6392 goto out_unlock;
6393
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006394 ret = -EBUSY;
6395 if (tr->ref)
6396 goto out_unlock;
6397
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006398 list_del(&tr->list);
6399
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006400 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006401 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006402 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006403 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006404 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006405
6406 kfree(tr->name);
6407 kfree(tr);
6408
6409 ret = 0;
6410
6411 out_unlock:
6412 mutex_unlock(&trace_types_lock);
6413
6414 return ret;
6415}
6416
Steven Rostedt277ba042012-08-03 16:10:49 -04006417static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6418{
6419 struct dentry *parent;
6420 int ret;
6421
6422 /* Paranoid: Make sure the parent is the "instances" directory */
6423 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6424 if (WARN_ON_ONCE(parent != trace_instance_dir))
6425 return -ENOENT;
6426
6427 /*
6428 * The inode mutex is locked, but debugfs_create_dir() will also
6429 * take the mutex. As the instances directory can not be destroyed
6430 * or changed in any other way, it is safe to unlock it, and
6431 * let the dentry try. If two users try to make the same dir at
6432 * the same time, then the new_instance_create() will determine the
6433 * winner.
6434 */
6435 mutex_unlock(&inode->i_mutex);
6436
6437 ret = new_instance_create(dentry->d_iname);
6438
6439 mutex_lock(&inode->i_mutex);
6440
6441 return ret;
6442}
6443
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006444static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6445{
6446 struct dentry *parent;
6447 int ret;
6448
6449 /* Paranoid: Make sure the parent is the "instances" directory */
6450 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
6451 if (WARN_ON_ONCE(parent != trace_instance_dir))
6452 return -ENOENT;
6453
6454 /* The caller did a dget() on dentry */
6455 mutex_unlock(&dentry->d_inode->i_mutex);
6456
6457 /*
6458 * The inode mutex is locked, but debugfs_create_dir() will also
6459 * take the mutex. As the instances directory can not be destroyed
6460 * or changed in any other way, it is safe to unlock it, and
6461 * let the dentry try. If two users try to make the same dir at
6462 * the same time, then the instance_delete() will determine the
6463 * winner.
6464 */
6465 mutex_unlock(&inode->i_mutex);
6466
6467 ret = instance_delete(dentry->d_iname);
6468
6469 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6470 mutex_lock(&dentry->d_inode->i_mutex);
6471
6472 return ret;
6473}
6474
Steven Rostedt277ba042012-08-03 16:10:49 -04006475static const struct inode_operations instance_dir_inode_operations = {
6476 .lookup = simple_lookup,
6477 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006478 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006479};
6480
6481static __init void create_trace_instances(struct dentry *d_tracer)
6482{
6483 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6484 if (WARN_ON(!trace_instance_dir))
6485 return;
6486
6487 /* Hijack the dir inode operations, to allow mkdir */
6488 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6489}
6490
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006491static void
6492init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6493{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006494 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006495
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006496 trace_create_file("available_tracers", 0444, d_tracer,
6497 tr, &show_traces_fops);
6498
6499 trace_create_file("current_tracer", 0644, d_tracer,
6500 tr, &set_tracer_fops);
6501
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006502 trace_create_file("tracing_cpumask", 0644, d_tracer,
6503 tr, &tracing_cpumask_fops);
6504
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006505 trace_create_file("trace_options", 0644, d_tracer,
6506 tr, &tracing_iter_fops);
6507
6508 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006509 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006510
6511 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006512 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006513
6514 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006515 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006516
6517 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6518 tr, &tracing_total_entries_fops);
6519
Wang YanQing238ae932013-05-26 16:52:01 +08006520 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006521 tr, &tracing_free_buffer_fops);
6522
6523 trace_create_file("trace_marker", 0220, d_tracer,
6524 tr, &tracing_mark_fops);
6525
6526 trace_create_file("trace_clock", 0644, d_tracer, tr,
6527 &trace_clock_fops);
6528
6529 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006530 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006531
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006532#ifdef CONFIG_TRACER_MAX_TRACE
6533 trace_create_file("tracing_max_latency", 0644, d_tracer,
6534 &tr->max_latency, &tracing_max_lat_fops);
6535#endif
6536
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006537 if (ftrace_create_function_files(tr, d_tracer))
6538 WARN(1, "Could not allocate function filter files");
6539
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006540#ifdef CONFIG_TRACER_SNAPSHOT
6541 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006542 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006543#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006544
6545 for_each_tracing_cpu(cpu)
6546 tracing_init_debugfs_percpu(tr, cpu);
6547
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006548}
6549
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006550static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006551{
6552 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006553
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006554 trace_access_lock_init();
6555
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006556 d_tracer = tracing_init_dentry();
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006557 if (!d_tracer)
6558 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006559
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006560 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006561
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006562 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006563 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006564
Li Zefan339ae5d2009-04-17 10:34:30 +08006565 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006566 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006567
Avadh Patel69abe6a2009-04-10 16:04:48 -04006568 trace_create_file("saved_cmdlines", 0444, d_tracer,
6569 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006570
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006571 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6572 NULL, &tracing_saved_cmdlines_size_fops);
6573
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006574#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006575 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6576 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006577#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006578
Steven Rostedt277ba042012-08-03 16:10:49 -04006579 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006580
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006581 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006582
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006583 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006584}
6585
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006586static int trace_panic_handler(struct notifier_block *this,
6587 unsigned long event, void *unused)
6588{
Steven Rostedt944ac422008-10-23 19:26:08 -04006589 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006590 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006591 return NOTIFY_OK;
6592}
6593
6594static struct notifier_block trace_panic_notifier = {
6595 .notifier_call = trace_panic_handler,
6596 .next = NULL,
6597 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6598};
6599
6600static int trace_die_handler(struct notifier_block *self,
6601 unsigned long val,
6602 void *data)
6603{
6604 switch (val) {
6605 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006606 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006607 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006608 break;
6609 default:
6610 break;
6611 }
6612 return NOTIFY_OK;
6613}
6614
6615static struct notifier_block trace_die_notifier = {
6616 .notifier_call = trace_die_handler,
6617 .priority = 200
6618};
6619
6620/*
6621 * printk is set to max of 1024, we really don't need it that big.
6622 * Nothing should be printing 1000 characters anyway.
6623 */
6624#define TRACE_MAX_PRINT 1000
6625
6626/*
6627 * Define here KERN_TRACE so that we have one place to modify
6628 * it if we decide to change what log level the ftrace dump
6629 * should be at.
6630 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006631#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006632
Jason Wessel955b61e2010-08-05 09:22:23 -05006633void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006634trace_printk_seq(struct trace_seq *s)
6635{
6636 /* Probably should print a warning here. */
zhangwei(Jovi)bd6df182013-03-11 15:13:37 +08006637 if (s->len >= TRACE_MAX_PRINT)
6638 s->len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006639
6640 /* should be zero ended, but we are paranoid. */
6641 s->buffer[s->len] = 0;
6642
6643 printk(KERN_TRACE "%s", s->buffer);
6644
Steven Rostedtf9520752009-03-02 14:04:40 -05006645 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006646}
6647
Jason Wessel955b61e2010-08-05 09:22:23 -05006648void trace_init_global_iter(struct trace_iterator *iter)
6649{
6650 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006651 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006652 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006653 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006654
6655 if (iter->trace && iter->trace->open)
6656 iter->trace->open(iter);
6657
6658 /* Annotate start of buffers if we had overruns */
6659 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6660 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6661
6662 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6663 if (trace_clocks[iter->tr->clock_id].in_ns)
6664 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006665}
6666
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006667void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006668{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006669 /* use static because iter can be a bit big for the stack */
6670 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006671 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006672 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006673 unsigned long flags;
6674 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006675
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006676 /* Only allow one dump user at a time. */
6677 if (atomic_inc_return(&dump_running) != 1) {
6678 atomic_dec(&dump_running);
6679 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006680 }
6681
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006682 /*
6683 * Always turn off tracing when we dump.
6684 * We don't need to show trace output of what happens
6685 * between multiple crashes.
6686 *
6687 * If the user does a sysrq-z, then they can re-enable
6688 * tracing with echo 1 > tracing_on.
6689 */
6690 tracing_off();
6691
6692 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006693
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006694 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006695 trace_init_global_iter(&iter);
6696
Steven Rostedtd7690412008-10-01 00:29:53 -04006697 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006698 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006699 }
6700
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006701 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6702
Török Edwinb54d3de2008-11-22 13:28:48 +02006703 /* don't look at user memory in panic mode */
6704 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6705
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006706 switch (oops_dump_mode) {
6707 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006708 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006709 break;
6710 case DUMP_ORIG:
6711 iter.cpu_file = raw_smp_processor_id();
6712 break;
6713 case DUMP_NONE:
6714 goto out_enable;
6715 default:
6716 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006717 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006718 }
6719
6720 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006721
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006722 /* Did function tracer already get disabled? */
6723 if (ftrace_is_dead()) {
6724 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
6725 printk("# MAY BE MISSING FUNCTION EVENTS\n");
6726 }
6727
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006728 /*
6729 * We need to stop all tracing on all CPUS to read the
6730 * the next buffer. This is a bit expensive, but is
6731 * not done often. We fill all what we can read,
6732 * and then release the locks again.
6733 */
6734
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006735 while (!trace_empty(&iter)) {
6736
6737 if (!cnt)
6738 printk(KERN_TRACE "---------------------------------\n");
6739
6740 cnt++;
6741
6742 /* reset all but tr, trace, and overruns */
6743 memset(&iter.seq, 0,
6744 sizeof(struct trace_iterator) -
6745 offsetof(struct trace_iterator, seq));
6746 iter.iter_flags |= TRACE_FILE_LAT_FMT;
6747 iter.pos = -1;
6748
Jason Wessel955b61e2010-08-05 09:22:23 -05006749 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08006750 int ret;
6751
6752 ret = print_trace_line(&iter);
6753 if (ret != TRACE_TYPE_NO_CONSUME)
6754 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006755 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05006756 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006757
6758 trace_printk_seq(&iter.seq);
6759 }
6760
6761 if (!cnt)
6762 printk(KERN_TRACE " (ftrace buffer empty)\n");
6763 else
6764 printk(KERN_TRACE "---------------------------------\n");
6765
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006766 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006767 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006768
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006769 for_each_tracing_cpu(cpu) {
6770 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006771 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006772 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04006773 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006774}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07006775EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006776
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006777__init static int tracer_alloc_buffers(void)
6778{
Steven Rostedt73c51622009-03-11 13:42:01 -04006779 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306780 int ret = -ENOMEM;
6781
David Sharp750912f2010-12-08 13:46:47 -08006782
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306783 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
6784 goto out;
6785
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006786 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306787 goto out_free_buffer_mask;
6788
Steven Rostedt07d777f2011-09-22 14:01:55 -04006789 /* Only allocate trace_printk buffers if a trace_printk exists */
6790 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04006791 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04006792 trace_printk_init_buffers();
6793
Steven Rostedt73c51622009-03-11 13:42:01 -04006794 /* To save memory, keep the ring buffer size to its minimum */
6795 if (ring_buffer_expanded)
6796 ring_buf_size = trace_buf_size;
6797 else
6798 ring_buf_size = 1;
6799
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306800 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006801 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006802
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006803 raw_spin_lock_init(&global_trace.start_lock);
6804
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006805 /* Used for event triggers */
6806 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
6807 if (!temp_buffer)
6808 goto out_free_cpumask;
6809
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006810 if (trace_create_savedcmd() < 0)
6811 goto out_free_temp_buffer;
6812
Steven Rostedtab464282008-05-12 21:21:00 +02006813 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006814 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006815 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
6816 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006817 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006818 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04006819
Steven Rostedt499e5472012-02-22 15:50:28 -05006820 if (global_trace.buffer_disabled)
6821 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006822
Steven Rostedte1e232c2014-02-10 23:38:46 -05006823 if (trace_boot_clock) {
6824 ret = tracing_set_clock(&global_trace, trace_boot_clock);
6825 if (ret < 0)
6826 pr_warning("Trace clock %s not defined, going back to default\n",
6827 trace_boot_clock);
6828 }
6829
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006830 /*
6831 * register_tracer() might reference current_trace, so it
6832 * needs to be set before we register anything. This is
6833 * just a bootstrap of current_trace anyway.
6834 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006835 global_trace.current_trace = &nop_trace;
6836
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006837 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6838
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05006839 ftrace_init_global_array_ops(&global_trace);
6840
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04006841 register_tracer(&nop_trace);
6842
Steven Rostedt60a11772008-05-12 21:20:44 +02006843 /* All seems OK, enable tracing */
6844 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04006845
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006846 atomic_notifier_chain_register(&panic_notifier_list,
6847 &trace_panic_notifier);
6848
6849 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006850
Steven Rostedtae63b312012-05-03 23:09:03 -04006851 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
6852
6853 INIT_LIST_HEAD(&global_trace.systems);
6854 INIT_LIST_HEAD(&global_trace.events);
6855 list_add(&global_trace.list, &ftrace_trace_arrays);
6856
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006857 while (trace_boot_options) {
6858 char *option;
6859
6860 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006861 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04006862 }
6863
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006864 register_snapshot_cmd();
6865
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01006866 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006867
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006868out_free_savedcmd:
6869 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04006870out_free_temp_buffer:
6871 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306872out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006873 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10306874out_free_buffer_mask:
6875 free_cpumask_var(tracing_buffer_mask);
6876out:
6877 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006878}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006879
6880__init static int clear_boot_tracer(void)
6881{
6882 /*
6883 * The default tracer at boot buffer is an init section.
6884 * This function is called in lateinit. If we did not
6885 * find the boot tracer, then clear it out, to prevent
6886 * later registration from accessing the buffer that is
6887 * about to be freed.
6888 */
6889 if (!default_bootup_tracer)
6890 return 0;
6891
6892 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6893 default_bootup_tracer);
6894 default_bootup_tracer = NULL;
6895
6896 return 0;
6897}
6898
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006899early_initcall(tracer_alloc_buffers);
6900fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006901late_initcall(clear_boot_tracer);