blob: 57b4220d96a9d14244441b192b1735b8735b7001 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Li Zefan020e5f82009-07-01 10:47:05 +080050int ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010066/* For tracers that don't implement custom flags */
67static struct tracer_opt dummy_tracer_opt[] = {
68 { }
69};
70
71static struct tracer_flags dummy_tracer_flags = {
72 .val = 0,
73 .opts = dummy_tracer_opt
74};
75
76static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77{
78 return 0;
79}
Steven Rostedt0f048702008-11-05 16:05:44 -050080
81/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040082 * To prevent the comm cache from being overwritten when no
83 * tracing is active, only save the comm when a trace event
84 * occurred.
85 */
86static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88/*
Steven Rostedt0f048702008-11-05 16:05:44 -050089 * Kill all tracing for good (never come back).
90 * It is initialized to 1 but will turn to zero if the initialization
91 * of the tracer is successful. But that is the only place that sets
92 * this back to zero.
93 */
Hannes Eder4fd27352009-02-10 19:44:12 +010094static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -050095
Christoph Lameter9288f992009-10-07 19:17:45 -040096DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -040097
Jason Wessel955b61e2010-08-05 09:22:23 -050098cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +020099
Steven Rostedt944ac422008-10-23 19:26:08 -0400100/*
101 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
102 *
103 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
104 * is set, then ftrace_dump is called. This will output the contents
105 * of the ftrace buffers to the console. This is very useful for
106 * capturing traces that lead to crashes and outputing it to a
107 * serial console.
108 *
109 * It is default off, but you can enable it with either specifying
110 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200111 * /proc/sys/kernel/ftrace_dump_on_oops
112 * Set 1 if you want to dump buffers of all CPUs
113 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400114 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200115
116enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400117
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500118static int tracing_set_tracer(const char *buf);
119
Li Zefanee6c2c12009-09-18 14:06:47 +0800120#define MAX_TRACER_SIZE 100
121static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500122static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100123
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200124static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100125{
Li Zefanee6c2c12009-09-18 14:06:47 +0800126 strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500127 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400128 /* We are using ftrace early, expand it */
129 ring_buffer_expanded = 1;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100130 return 1;
131}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200132__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100133
Steven Rostedt944ac422008-10-23 19:26:08 -0400134static int __init set_ftrace_dump_on_oops(char *str)
135{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200136 if (*str++ != '=' || !*str) {
137 ftrace_dump_on_oops = DUMP_ALL;
138 return 1;
139 }
140
141 if (!strcmp("orig_cpu", str)) {
142 ftrace_dump_on_oops = DUMP_ORIG;
143 return 1;
144 }
145
146 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400147}
148__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200149
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400150
151static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
152static char *trace_boot_options __initdata;
153
154static int __init set_trace_boot_options(char *str)
155{
156 strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
157 trace_boot_options = trace_boot_options_buf;
158 return 0;
159}
160__setup("trace_options=", set_trace_boot_options);
161
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800162unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200163{
164 nsec += 500;
165 do_div(nsec, 1000);
166 return nsec;
167}
168
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200169/*
170 * The global_trace is the descriptor that holds the tracing
171 * buffers for the live tracing. For each CPU, it contains
172 * a link list of pages that will store trace entries. The
173 * page descriptor of the pages in the memory is used to hold
174 * the link list by linking the lru item in the page descriptor
175 * to each of the pages in the buffer per CPU.
176 *
177 * For each active CPU there is a data field that holds the
178 * pages for the buffer for that CPU. Each CPU has the same number
179 * of pages allocated for its buffer.
180 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200181static struct trace_array global_trace;
182
Steven Rostedtae63b312012-05-03 23:09:03 -0400183LIST_HEAD(ftrace_trace_arrays);
184
Steven Rostedte77405a2009-09-02 14:17:06 -0400185int filter_current_check_discard(struct ring_buffer *buffer,
186 struct ftrace_event_call *call, void *rec,
Tom Zanussieb02ce02009-04-08 03:15:54 -0500187 struct ring_buffer_event *event)
188{
Steven Rostedte77405a2009-09-02 14:17:06 -0400189 return filter_check_discard(call, rec, buffer, event);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500190}
Steven Rostedt17c873e2009-04-10 18:12:50 -0400191EXPORT_SYMBOL_GPL(filter_current_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500192
Steven Rostedt37886f62009-03-17 17:22:06 -0400193cycle_t ftrace_now(int cpu)
194{
195 u64 ts;
196
197 /* Early boot up does not have a buffer yet */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500198 if (!global_trace.trace_buffer.buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400199 return trace_clock_local();
200
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500201 ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
202 ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400203
204 return ts;
205}
206
Steven Rostedt90369902008-11-05 16:05:44 -0500207int tracing_is_enabled(void)
208{
Steven Rostedt0fb96562012-05-11 14:25:30 -0400209 return tracing_is_on();
Steven Rostedt90369902008-11-05 16:05:44 -0500210}
211
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200212/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400213 * trace_buf_size is the size in bytes that is allocated
214 * for a buffer. Note, the number of bytes is always rounded
215 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400216 *
217 * This number is purposely set to a low number of 16384.
218 * If the dump on oops happens, it will be much appreciated
219 * to not have to wait for all that output. Anyway this can be
220 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200221 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400222#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400223
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400224static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200225
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200226/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200227static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200228
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200229/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200230 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200231 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200232static DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200233
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800234/*
235 * serialize the access of the ring buffer
236 *
237 * ring buffer serializes readers, but it is low level protection.
238 * The validity of the events (which returns by ring_buffer_peek() ..etc)
239 * are not protected by ring buffer.
240 *
241 * The content of events may become garbage if we allow other process consumes
242 * these events concurrently:
243 * A) the page of the consumed events may become a normal page
244 * (not reader page) in ring buffer, and this page will be rewrited
245 * by events producer.
246 * B) The page of the consumed events may become a page for splice_read,
247 * and this page will be returned to system.
248 *
249 * These primitives allow multi process access to different cpu ring buffer
250 * concurrently.
251 *
252 * These primitives don't distinguish read-only and read-consume access.
253 * Multi read-only access are also serialized.
254 */
255
256#ifdef CONFIG_SMP
257static DECLARE_RWSEM(all_cpu_access_lock);
258static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
259
260static inline void trace_access_lock(int cpu)
261{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500262 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800263 /* gain it for accessing the whole ring buffer. */
264 down_write(&all_cpu_access_lock);
265 } else {
266 /* gain it for accessing a cpu ring buffer. */
267
Steven Rostedtae3b5092013-01-23 15:22:59 -0500268 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800269 down_read(&all_cpu_access_lock);
270
271 /* Secondly block other access to this @cpu ring buffer. */
272 mutex_lock(&per_cpu(cpu_access_lock, cpu));
273 }
274}
275
276static inline void trace_access_unlock(int cpu)
277{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500278 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800279 up_write(&all_cpu_access_lock);
280 } else {
281 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
282 up_read(&all_cpu_access_lock);
283 }
284}
285
286static inline void trace_access_lock_init(void)
287{
288 int cpu;
289
290 for_each_possible_cpu(cpu)
291 mutex_init(&per_cpu(cpu_access_lock, cpu));
292}
293
294#else
295
296static DEFINE_MUTEX(access_lock);
297
298static inline void trace_access_lock(int cpu)
299{
300 (void)cpu;
301 mutex_lock(&access_lock);
302}
303
304static inline void trace_access_unlock(int cpu)
305{
306 (void)cpu;
307 mutex_unlock(&access_lock);
308}
309
310static inline void trace_access_lock_init(void)
311{
312}
313
314#endif
315
Steven Rostedtee6bce52008-11-12 17:52:37 -0500316/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500317unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400318 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500319 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700320 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS;
Ingo Molnar4e655512008-05-12 21:20:52 +0200321
Steven Rostedt0d5c6e12012-11-01 20:54:21 -0400322/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500323 * tracing_on - enable tracing buffers
324 *
325 * This function enables tracing buffers that may have been
326 * disabled with tracing_off.
327 */
328void tracing_on(void)
329{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500330 if (global_trace.trace_buffer.buffer)
331 ring_buffer_record_on(global_trace.trace_buffer.buffer);
Steven Rostedt499e5472012-02-22 15:50:28 -0500332 /*
333 * This flag is only looked at when buffers haven't been
334 * allocated yet. We don't really care about the race
335 * between setting this flag and actually turning
336 * on the buffer.
337 */
338 global_trace.buffer_disabled = 0;
339}
340EXPORT_SYMBOL_GPL(tracing_on);
341
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500342#ifdef CONFIG_TRACER_SNAPSHOT
343/**
344 * trace_snapshot - take a snapshot of the current buffer.
345 *
346 * This causes a swap between the snapshot buffer and the current live
347 * tracing buffer. You can use this to take snapshots of the live
348 * trace when some condition is triggered, but continue to trace.
349 *
350 * Note, make sure to allocate the snapshot with either
351 * a tracing_snapshot_alloc(), or by doing it manually
352 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
353 *
354 * If the snapshot buffer is not allocated, it will stop tracing.
355 * Basically making a permanent snapshot.
356 */
357void tracing_snapshot(void)
358{
359 struct trace_array *tr = &global_trace;
360 struct tracer *tracer = tr->current_trace;
361 unsigned long flags;
362
363 if (!tr->allocated_snapshot) {
364 trace_printk("*** SNAPSHOT NOT ALLOCATED ***\n");
365 trace_printk("*** stopping trace here! ***\n");
366 tracing_off();
367 return;
368 }
369
370 /* Note, snapshot can not be used when the tracer uses it */
371 if (tracer->use_max_tr) {
372 trace_printk("*** LATENCY TRACER ACTIVE ***\n");
373 trace_printk("*** Can not use snapshot (sorry) ***\n");
374 return;
375 }
376
377 local_irq_save(flags);
378 update_max_tr(tr, current, smp_processor_id());
379 local_irq_restore(flags);
380}
381
382static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
383 struct trace_buffer *size_buf, int cpu_id);
384
385/**
386 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
387 *
388 * This is similar to trace_snapshot(), but it will allocate the
389 * snapshot buffer if it isn't already allocated. Use this only
390 * where it is safe to sleep, as the allocation may sleep.
391 *
392 * This causes a swap between the snapshot buffer and the current live
393 * tracing buffer. You can use this to take snapshots of the live
394 * trace when some condition is triggered, but continue to trace.
395 */
396void tracing_snapshot_alloc(void)
397{
398 struct trace_array *tr = &global_trace;
399 int ret;
400
401 if (!tr->allocated_snapshot) {
402
403 /* allocate spare buffer */
404 ret = resize_buffer_duplicate_size(&tr->max_buffer,
405 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
406 if (WARN_ON(ret < 0))
407 return;
408
409 tr->allocated_snapshot = true;
410 }
411
412 tracing_snapshot();
413}
414#else
415void tracing_snapshot(void)
416{
417 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
418}
419void tracing_snapshot_alloc(void)
420{
421 /* Give warning */
422 tracing_snapshot();
423}
424#endif /* CONFIG_TRACER_SNAPSHOT */
425
Steven Rostedt499e5472012-02-22 15:50:28 -0500426/**
427 * tracing_off - turn off tracing buffers
428 *
429 * This function stops the tracing buffers from recording data.
430 * It does not disable any overhead the tracers themselves may
431 * be causing. This function simply causes all recording to
432 * the ring buffers to fail.
433 */
434void tracing_off(void)
435{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500436 if (global_trace.trace_buffer.buffer)
437 ring_buffer_record_off(global_trace.trace_buffer.buffer);
Steven Rostedt499e5472012-02-22 15:50:28 -0500438 /*
439 * This flag is only looked at when buffers haven't been
440 * allocated yet. We don't really care about the race
441 * between setting this flag and actually turning
442 * on the buffer.
443 */
444 global_trace.buffer_disabled = 1;
445}
446EXPORT_SYMBOL_GPL(tracing_off);
447
448/**
449 * tracing_is_on - show state of ring buffers enabled
450 */
451int tracing_is_on(void)
452{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500453 if (global_trace.trace_buffer.buffer)
454 return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
Steven Rostedt499e5472012-02-22 15:50:28 -0500455 return !global_trace.buffer_disabled;
456}
457EXPORT_SYMBOL_GPL(tracing_is_on);
458
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400459static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200460{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400461 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200462
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200463 if (!str)
464 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800465 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200466 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800467 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200468 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400469 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200470 return 1;
471}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400472__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200473
Tim Bird0e950172010-02-25 15:36:43 -0800474static int __init set_tracing_thresh(char *str)
475{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800476 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800477 int ret;
478
479 if (!str)
480 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200481 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800482 if (ret < 0)
483 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800484 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800485 return 1;
486}
487__setup("tracing_thresh=", set_tracing_thresh);
488
Steven Rostedt57f50be2008-05-12 21:20:44 +0200489unsigned long nsecs_to_usecs(unsigned long nsecs)
490{
491 return nsecs / 1000;
492}
493
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200494/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200495static const char *trace_options[] = {
496 "print-parent",
497 "sym-offset",
498 "sym-addr",
499 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200500 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200501 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200502 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200503 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200504 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100505 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500506 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500507 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500508 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200509 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200510 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100511 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200512 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500513 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400514 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400515 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800516 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800517 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400518 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500519 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700520 "markers",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200521 NULL
522};
523
Zhaolei5079f322009-08-25 16:12:56 +0800524static struct {
525 u64 (*func)(void);
526 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800527 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800528} trace_clocks[] = {
David Sharp8be07092012-11-13 12:18:22 -0800529 { trace_clock_local, "local", 1 },
530 { trace_clock_global, "global", 1 },
531 { trace_clock_counter, "counter", 0 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800532 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800533};
534
535int trace_clock_id;
536
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200537/*
538 * trace_parser_get_init - gets the buffer for trace parser
539 */
540int trace_parser_get_init(struct trace_parser *parser, int size)
541{
542 memset(parser, 0, sizeof(*parser));
543
544 parser->buffer = kmalloc(size, GFP_KERNEL);
545 if (!parser->buffer)
546 return 1;
547
548 parser->size = size;
549 return 0;
550}
551
552/*
553 * trace_parser_put - frees the buffer for trace parser
554 */
555void trace_parser_put(struct trace_parser *parser)
556{
557 kfree(parser->buffer);
558}
559
560/*
561 * trace_get_user - reads the user input string separated by space
562 * (matched by isspace(ch))
563 *
564 * For each string found the 'struct trace_parser' is updated,
565 * and the function returns.
566 *
567 * Returns number of bytes read.
568 *
569 * See kernel/trace/trace.h for 'struct trace_parser' details.
570 */
571int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
572 size_t cnt, loff_t *ppos)
573{
574 char ch;
575 size_t read = 0;
576 ssize_t ret;
577
578 if (!*ppos)
579 trace_parser_clear(parser);
580
581 ret = get_user(ch, ubuf++);
582 if (ret)
583 goto out;
584
585 read++;
586 cnt--;
587
588 /*
589 * The parser is not finished with the last write,
590 * continue reading the user input without skipping spaces.
591 */
592 if (!parser->cont) {
593 /* skip white space */
594 while (cnt && isspace(ch)) {
595 ret = get_user(ch, ubuf++);
596 if (ret)
597 goto out;
598 read++;
599 cnt--;
600 }
601
602 /* only spaces were written */
603 if (isspace(ch)) {
604 *ppos += read;
605 ret = read;
606 goto out;
607 }
608
609 parser->idx = 0;
610 }
611
612 /* read the non-space input */
613 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800614 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200615 parser->buffer[parser->idx++] = ch;
616 else {
617 ret = -EINVAL;
618 goto out;
619 }
620 ret = get_user(ch, ubuf++);
621 if (ret)
622 goto out;
623 read++;
624 cnt--;
625 }
626
627 /* We either got finished input or we have to wait for another call. */
628 if (isspace(ch)) {
629 parser->buffer[parser->idx] = 0;
630 parser->cont = false;
631 } else {
632 parser->cont = true;
633 parser->buffer[parser->idx++] = ch;
634 }
635
636 *ppos += read;
637 ret = read;
638
639out:
640 return ret;
641}
642
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200643ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
644{
645 int len;
646 int ret;
647
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500648 if (!cnt)
649 return 0;
650
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200651 if (s->len <= s->readpos)
652 return -EBUSY;
653
654 len = s->len - s->readpos;
655 if (cnt > len)
656 cnt = len;
657 ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500658 if (ret == cnt)
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200659 return -EFAULT;
660
Steven Rostedt2dc5d122009-03-04 19:10:05 -0500661 cnt -= ret;
662
Steven Rostedte74da522009-03-04 20:31:11 -0500663 s->readpos += cnt;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +0200664 return cnt;
Steven Rostedt214023c2008-05-12 21:20:46 +0200665}
666
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200667static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200668{
669 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200670
671 if (s->len <= s->readpos)
672 return -EBUSY;
673
674 len = s->len - s->readpos;
675 if (cnt > len)
676 cnt = len;
Dan Carpenter5a26c8f2012-04-20 09:31:45 +0300677 memcpy(buf, s->buffer + s->readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200678
Steven Rostedte74da522009-03-04 20:31:11 -0500679 s->readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200680 return cnt;
681}
682
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400683/*
684 * ftrace_max_lock is used to protect the swapping of buffers
685 * when taking a max snapshot. The buffers themselves are
686 * protected by per_cpu spinlocks. But the action of the swap
687 * needs its own lock.
688 *
Thomas Gleixner445c8952009-12-02 19:49:50 +0100689 * This is defined as a arch_spinlock_t in order to help
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400690 * with performance when lockdep debugging is enabled.
691 *
692 * It is also used in other places outside the update_max_tr
693 * so it needs to be defined outside of the
694 * CONFIG_TRACER_MAX_TRACE.
695 */
Thomas Gleixner445c8952009-12-02 19:49:50 +0100696static arch_spinlock_t ftrace_max_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +0100697 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400698
Tim Bird0e950172010-02-25 15:36:43 -0800699unsigned long __read_mostly tracing_thresh;
700
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400701#ifdef CONFIG_TRACER_MAX_TRACE
702unsigned long __read_mostly tracing_max_latency;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400703
704/*
705 * Copy the new maximum trace into the separate maximum-trace
706 * structure. (this way the maximum trace is permanently saved,
707 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
708 */
709static void
710__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
711{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500712 struct trace_buffer *trace_buf = &tr->trace_buffer;
713 struct trace_buffer *max_buf = &tr->max_buffer;
714 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
715 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400716
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500717 max_buf->cpu = cpu;
718 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400719
Steven Rostedt8248ac02009-09-02 12:27:41 -0400720 max_data->saved_latency = tracing_max_latency;
721 max_data->critical_start = data->critical_start;
722 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400723
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -0300724 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -0400725 max_data->pid = tsk->pid;
726 max_data->uid = task_uid(tsk);
727 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
728 max_data->policy = tsk->policy;
729 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400730
731 /* record this tasks comm */
732 tracing_record_cmdline(tsk);
733}
734
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200735/**
736 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
737 * @tr: tracer
738 * @tsk: the task with the latency
739 * @cpu: The cpu that initiated the trace.
740 *
741 * Flip the buffers between the @tr and the max_tr and record information
742 * about which task was the cause of this latency.
743 */
Ingo Molnare309b412008-05-12 21:20:51 +0200744void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200745update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
746{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -0400747 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200748
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400749 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -0400750 return;
751
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200752 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -0500753
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -0500754 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +0900755 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400756 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900757 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +0900758 }
Steven Rostedt34600f02013-01-22 13:35:11 -0500759
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100760 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400761
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500762 buf = tr->trace_buffer.buffer;
763 tr->trace_buffer.buffer = tr->max_buffer.buffer;
764 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400765
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200766 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100767 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200768}
769
770/**
771 * update_max_tr_single - only copy one trace over, and reset the rest
772 * @tr - tracer
773 * @tsk - task with the latency
774 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200775 *
776 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200777 */
Ingo Molnare309b412008-05-12 21:20:51 +0200778void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200779update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
780{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400781 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200782
Steven Rostedt2b6080f2012-05-11 13:29:49 -0400783 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -0400784 return;
785
Steven Rostedt4c11d7a2008-05-12 21:20:43 +0200786 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -0500787 if (WARN_ON_ONCE(!tr->allocated_snapshot))
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900788 return;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +0900789
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100790 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200791
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500792 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400793
Steven Rostedte8165db2009-09-03 19:13:05 -0400794 if (ret == -EBUSY) {
795 /*
796 * We failed to swap the buffer due to a commit taking
797 * place on this CPU. We fail to record, but we reset
798 * the max trace buffer (no one writes directly to it)
799 * and flag that it failed.
800 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500801 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -0400802 "Failed to swap buffers due to commit in progress\n");
803 }
804
Steven Rostedte8165db2009-09-03 19:13:05 -0400805 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200806
807 __update_max_tr(tr, tsk, cpu);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +0100808 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200809}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -0400810#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200811
Steven Rostedt0d5c6e12012-11-01 20:54:21 -0400812static void default_wait_pipe(struct trace_iterator *iter)
813{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -0500814 /* Iterators are static, they should be filled or empty */
815 if (trace_buffer_iter(iter, iter->cpu_file))
816 return;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -0400817
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500818 ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -0400819}
820
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -0500821#ifdef CONFIG_FTRACE_STARTUP_TEST
822static int run_tracer_selftest(struct tracer *type)
823{
824 struct trace_array *tr = &global_trace;
825 struct tracer *saved_tracer = tr->current_trace;
826 int ret;
827
828 if (!type->selftest || tracing_selftest_disabled)
829 return 0;
830
831 /*
832 * Run a selftest on this tracer.
833 * Here we reset the trace buffer, and set the current
834 * tracer to be this tracer. The tracer can then run some
835 * internal tracing to verify that everything is in order.
836 * If we fail, we do not register this tracer.
837 */
838 tracing_reset_online_cpus(&tr->trace_buffer);
839
840 tr->current_trace = type;
841
842#ifdef CONFIG_TRACER_MAX_TRACE
843 if (type->use_max_tr) {
844 /* If we expanded the buffers, make sure the max is expanded too */
845 if (ring_buffer_expanded)
846 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
847 RING_BUFFER_ALL_CPUS);
848 tr->allocated_snapshot = true;
849 }
850#endif
851
852 /* the test is responsible for initializing and enabling */
853 pr_info("Testing tracer %s: ", type->name);
854 ret = type->selftest(type, tr);
855 /* the test is responsible for resetting too */
856 tr->current_trace = saved_tracer;
857 if (ret) {
858 printk(KERN_CONT "FAILED!\n");
859 /* Add the warning after printing 'FAILED' */
860 WARN_ON(1);
861 return -1;
862 }
863 /* Only reset on passing, to avoid touching corrupted buffers */
864 tracing_reset_online_cpus(&tr->trace_buffer);
865
866#ifdef CONFIG_TRACER_MAX_TRACE
867 if (type->use_max_tr) {
868 tr->allocated_snapshot = false;
869
870 /* Shrink the max buffer again */
871 if (ring_buffer_expanded)
872 ring_buffer_resize(tr->max_buffer.buffer, 1,
873 RING_BUFFER_ALL_CPUS);
874 }
875#endif
876
877 printk(KERN_CONT "PASSED\n");
878 return 0;
879}
880#else
881static inline int run_tracer_selftest(struct tracer *type)
882{
883 return 0;
884}
885#endif /* CONFIG_FTRACE_STARTUP_TEST */
886
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200887/**
888 * register_tracer - register a tracer with the ftrace system.
889 * @type - the plugin for the tracer
890 *
891 * Register a new plugin tracer.
892 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200893int register_tracer(struct tracer *type)
894{
895 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200896 int ret = 0;
897
898 if (!type->name) {
899 pr_info("Tracer must have a name\n");
900 return -1;
901 }
902
Dan Carpenter24a461d2010-07-10 12:06:44 +0200903 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +0800904 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
905 return -1;
906 }
907
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200908 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +0100909
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +0100910 tracing_selftest_running = true;
911
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200912 for (t = trace_types; t; t = t->next) {
913 if (strcmp(type->name, t->name) == 0) {
914 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +0800915 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200916 type->name);
917 ret = -1;
918 goto out;
919 }
920 }
921
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +0100922 if (!type->set_flag)
923 type->set_flag = &dummy_set_flag;
924 if (!type->flags)
925 type->flags = &dummy_tracer_flags;
926 else
927 if (!type->flags->opts)
928 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +0100929 if (!type->wait_pipe)
930 type->wait_pipe = default_wait_pipe;
931
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -0500932 ret = run_tracer_selftest(type);
933 if (ret < 0)
934 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200935
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200936 type->next = trace_types;
937 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +0200938
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200939 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +0100940 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200941 mutex_unlock(&trace_types_lock);
942
Steven Rostedtdac74942009-02-05 01:13:38 -0500943 if (ret || !default_bootup_tracer)
944 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500945
Li Zefanee6c2c12009-09-18 14:06:47 +0800946 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -0500947 goto out_unlock;
948
949 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
950 /* Do we want this tracer to start on bootup? */
951 tracing_set_tracer(type->name);
952 default_bootup_tracer = NULL;
953 /* disable other selftests, since this will break it. */
954 tracing_selftest_disabled = 1;
955#ifdef CONFIG_FTRACE_STARTUP_TEST
956 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
957 type->name);
958#endif
959
960 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200961 return ret;
962}
963
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500964void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -0400965{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500966 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -0400967
Hiraku Toyookaa5416412012-12-19 16:02:34 +0900968 if (!buffer)
969 return;
970
Steven Rostedtf6339032009-09-04 12:35:16 -0400971 ring_buffer_record_disable(buffer);
972
973 /* Make sure all commits have finished */
974 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -0400975 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -0400976
977 ring_buffer_record_enable(buffer);
978}
979
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500980void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +0200981{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500982 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +0200983 int cpu;
984
Hiraku Toyookaa5416412012-12-19 16:02:34 +0900985 if (!buffer)
986 return;
987
Steven Rostedt621968c2009-09-04 12:02:35 -0400988 ring_buffer_record_disable(buffer);
989
990 /* Make sure all commits have finished */
991 synchronize_sched();
992
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -0500993 buf->time_start = ftrace_now(buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +0200994
995 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -0400996 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -0400997
998 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +0200999}
1000
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001001void tracing_reset_current(int cpu)
1002{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001003 tracing_reset(&global_trace.trace_buffer, cpu);
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001004}
1005
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001006void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001007{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001008 struct trace_array *tr;
1009
1010 mutex_lock(&trace_types_lock);
1011 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001012 tracing_reset_online_cpus(&tr->trace_buffer);
1013#ifdef CONFIG_TRACER_MAX_TRACE
1014 tracing_reset_online_cpus(&tr->max_buffer);
1015#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001016 }
1017 mutex_unlock(&trace_types_lock);
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001018}
1019
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001020#define SAVED_CMDLINES 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001021#define NO_CMDLINE_MAP UINT_MAX
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001022static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1023static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
1024static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
1025static int cmdline_idx;
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001026static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001027
Steven Rostedt25b0b442008-05-12 21:21:00 +02001028/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001029static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001030
1031static void trace_init_cmdlines(void)
1032{
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001033 memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
1034 memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001035 cmdline_idx = 0;
1036}
1037
Carsten Emdeb5130b12009-09-13 01:43:07 +02001038int is_tracing_stopped(void)
1039{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001040 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001041}
1042
Steven Rostedt0f048702008-11-05 16:05:44 -05001043/**
Steven Rostedt69bb54e2008-11-21 12:59:38 -05001044 * ftrace_off_permanent - disable all ftrace code permanently
1045 *
1046 * This should only be called when a serious anomally has
1047 * been detected. This will turn off the function tracing,
1048 * ring buffers, and other tracing utilites. It takes no
1049 * locks and can be called from any context.
1050 */
1051void ftrace_off_permanent(void)
1052{
1053 tracing_disabled = 1;
1054 ftrace_stop();
1055 tracing_off_permanent();
1056}
1057
1058/**
Steven Rostedt0f048702008-11-05 16:05:44 -05001059 * tracing_start - quick start of the tracer
1060 *
1061 * If tracing is enabled but was stopped by tracing_stop,
1062 * this will start the tracer back up.
1063 */
1064void tracing_start(void)
1065{
1066 struct ring_buffer *buffer;
1067 unsigned long flags;
1068
1069 if (tracing_disabled)
1070 return;
1071
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001072 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1073 if (--global_trace.stop_count) {
1074 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001075 /* Someone screwed up their debugging */
1076 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001077 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001078 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001079 goto out;
1080 }
1081
Steven Rostedta2f80712010-03-12 19:56:00 -05001082 /* Prevent the buffers from switching */
1083 arch_spin_lock(&ftrace_max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001084
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001085 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001086 if (buffer)
1087 ring_buffer_record_enable(buffer);
1088
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001089#ifdef CONFIG_TRACER_MAX_TRACE
1090 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001091 if (buffer)
1092 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001093#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001094
Steven Rostedta2f80712010-03-12 19:56:00 -05001095 arch_spin_unlock(&ftrace_max_lock);
1096
Steven Rostedt0f048702008-11-05 16:05:44 -05001097 ftrace_start();
1098 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001099 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1100}
1101
1102static void tracing_start_tr(struct trace_array *tr)
1103{
1104 struct ring_buffer *buffer;
1105 unsigned long flags;
1106
1107 if (tracing_disabled)
1108 return;
1109
1110 /* If global, we need to also start the max tracer */
1111 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1112 return tracing_start();
1113
1114 raw_spin_lock_irqsave(&tr->start_lock, flags);
1115
1116 if (--tr->stop_count) {
1117 if (tr->stop_count < 0) {
1118 /* Someone screwed up their debugging */
1119 WARN_ON_ONCE(1);
1120 tr->stop_count = 0;
1121 }
1122 goto out;
1123 }
1124
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001125 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001126 if (buffer)
1127 ring_buffer_record_enable(buffer);
1128
1129 out:
1130 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001131}
1132
1133/**
1134 * tracing_stop - quick stop of the tracer
1135 *
1136 * Light weight way to stop tracing. Use in conjunction with
1137 * tracing_start.
1138 */
1139void tracing_stop(void)
1140{
1141 struct ring_buffer *buffer;
1142 unsigned long flags;
1143
1144 ftrace_stop();
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001145 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1146 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001147 goto out;
1148
Steven Rostedta2f80712010-03-12 19:56:00 -05001149 /* Prevent the buffers from switching */
1150 arch_spin_lock(&ftrace_max_lock);
1151
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001152 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001153 if (buffer)
1154 ring_buffer_record_disable(buffer);
1155
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001156#ifdef CONFIG_TRACER_MAX_TRACE
1157 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001158 if (buffer)
1159 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001160#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001161
Steven Rostedta2f80712010-03-12 19:56:00 -05001162 arch_spin_unlock(&ftrace_max_lock);
1163
Steven Rostedt0f048702008-11-05 16:05:44 -05001164 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001165 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1166}
1167
1168static void tracing_stop_tr(struct trace_array *tr)
1169{
1170 struct ring_buffer *buffer;
1171 unsigned long flags;
1172
1173 /* If global, we need to also stop the max tracer */
1174 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1175 return tracing_stop();
1176
1177 raw_spin_lock_irqsave(&tr->start_lock, flags);
1178 if (tr->stop_count++)
1179 goto out;
1180
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001181 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001182 if (buffer)
1183 ring_buffer_record_disable(buffer);
1184
1185 out:
1186 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001187}
1188
Ingo Molnare309b412008-05-12 21:20:51 +02001189void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001190
Ingo Molnare309b412008-05-12 21:20:51 +02001191static void trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001192{
Carsten Emdea635cf02009-03-18 09:00:41 +01001193 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001194
1195 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1196 return;
1197
1198 /*
1199 * It's not the end of the world if we don't get
1200 * the lock, but we also don't want to spin
1201 * nor do we want to disable interrupts,
1202 * so if we miss here, then better luck next time.
1203 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001204 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001205 return;
1206
1207 idx = map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001208 if (idx == NO_CMDLINE_MAP) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001209 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1210
Carsten Emdea635cf02009-03-18 09:00:41 +01001211 /*
1212 * Check whether the cmdline buffer at idx has a pid
1213 * mapped. We are going to overwrite that entry so we
1214 * need to clear the map_pid_to_cmdline. Otherwise we
1215 * would read the new comm for the old pid.
1216 */
1217 pid = map_cmdline_to_pid[idx];
1218 if (pid != NO_CMDLINE_MAP)
1219 map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001220
Carsten Emdea635cf02009-03-18 09:00:41 +01001221 map_cmdline_to_pid[idx] = tsk->pid;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001222 map_pid_to_cmdline[tsk->pid] = idx;
1223
1224 cmdline_idx = idx;
1225 }
1226
1227 memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1228
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001229 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001230}
1231
Steven Rostedt4ca53082009-03-16 19:20:15 -04001232void trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001233{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001234 unsigned map;
1235
Steven Rostedt4ca53082009-03-16 19:20:15 -04001236 if (!pid) {
1237 strcpy(comm, "<idle>");
1238 return;
1239 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001240
Steven Rostedt74bf4072010-01-25 15:11:53 -05001241 if (WARN_ON_ONCE(pid < 0)) {
1242 strcpy(comm, "<XXX>");
1243 return;
1244 }
1245
Steven Rostedt4ca53082009-03-16 19:20:15 -04001246 if (pid > PID_MAX_DEFAULT) {
1247 strcpy(comm, "<...>");
1248 return;
1249 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001250
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001251 preempt_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001252 arch_spin_lock(&trace_cmdline_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001253 map = map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001254 if (map != NO_CMDLINE_MAP)
1255 strcpy(comm, saved_cmdlines[map]);
1256 else
1257 strcpy(comm, "<...>");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001258
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001259 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001260 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001261}
1262
Ingo Molnare309b412008-05-12 21:20:51 +02001263void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001264{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001265 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001266 return;
1267
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001268 if (!__this_cpu_read(trace_cmdline_save))
1269 return;
1270
1271 __this_cpu_write(trace_cmdline_save, false);
1272
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001273 trace_save_cmdline(tsk);
1274}
1275
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001276void
Steven Rostedt38697052008-10-01 13:14:09 -04001277tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1278 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001279{
1280 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001281
Steven Rostedt777e2082008-09-29 23:02:42 -04001282 entry->preempt_count = pc & 0xff;
1283 entry->pid = (tsk) ? tsk->pid : 0;
1284 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001285#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001286 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001287#else
1288 TRACE_FLAG_IRQS_NOSUPPORT |
1289#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001290 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1291 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1292 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1293}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001294EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001295
Steven Rostedte77405a2009-09-02 14:17:06 -04001296struct ring_buffer_event *
1297trace_buffer_lock_reserve(struct ring_buffer *buffer,
1298 int type,
1299 unsigned long len,
1300 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001301{
1302 struct ring_buffer_event *event;
1303
Steven Rostedte77405a2009-09-02 14:17:06 -04001304 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001305 if (event != NULL) {
1306 struct trace_entry *ent = ring_buffer_event_data(event);
1307
1308 tracing_generic_entry_update(ent, flags, pc);
1309 ent->type = type;
1310 }
1311
1312 return event;
1313}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001314
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001315void
1316__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1317{
1318 __this_cpu_write(trace_cmdline_save, true);
1319 ring_buffer_unlock_commit(buffer, event);
1320}
1321
Steven Rostedte77405a2009-09-02 14:17:06 -04001322static inline void
1323__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1324 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001325 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001326{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001327 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001328
Steven Rostedte77405a2009-09-02 14:17:06 -04001329 ftrace_trace_stack(buffer, flags, 6, pc);
1330 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001331}
1332
Steven Rostedte77405a2009-09-02 14:17:06 -04001333void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1334 struct ring_buffer_event *event,
1335 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001336{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001337 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001338}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001339EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001340
Steven Rostedtef5580d2009-02-27 19:38:04 -05001341struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001342trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1343 struct ftrace_event_file *ftrace_file,
1344 int type, unsigned long len,
1345 unsigned long flags, int pc)
1346{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001347 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001348 return trace_buffer_lock_reserve(*current_rb,
1349 type, len, flags, pc);
1350}
1351EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1352
1353struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001354trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1355 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001356 unsigned long flags, int pc)
1357{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001358 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001359 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001360 type, len, flags, pc);
1361}
Steven Rostedt94487d62009-05-05 19:22:53 -04001362EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001363
Steven Rostedte77405a2009-09-02 14:17:06 -04001364void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1365 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001366 unsigned long flags, int pc)
1367{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001368 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001369}
Steven Rostedt94487d62009-05-05 19:22:53 -04001370EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001371
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001372void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1373 struct ring_buffer_event *event,
1374 unsigned long flags, int pc,
1375 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001376{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001377 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001378
1379 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1380 ftrace_trace_userstack(buffer, flags, pc);
1381}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001382EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001383
Steven Rostedte77405a2009-09-02 14:17:06 -04001384void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1385 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001386{
Steven Rostedte77405a2009-09-02 14:17:06 -04001387 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001388}
Steven Rostedt12acd472009-04-17 16:01:56 -04001389EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001390
Ingo Molnare309b412008-05-12 21:20:51 +02001391void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001392trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001393 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1394 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001395{
Tom Zanussie1112b42009-03-31 00:48:49 -05001396 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001397 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001398 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001399 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001400
Steven Rostedtd7690412008-10-01 00:29:53 -04001401 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001402 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001403 return;
1404
Steven Rostedte77405a2009-09-02 14:17:06 -04001405 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001406 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001407 if (!event)
1408 return;
1409 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001410 entry->ip = ip;
1411 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001412
Steven Rostedte77405a2009-09-02 14:17:06 -04001413 if (!filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001414 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001415}
1416
Ingo Molnare309b412008-05-12 21:20:51 +02001417void
Ingo Molnar2e0f5762008-05-12 21:20:49 +02001418ftrace(struct trace_array *tr, struct trace_array_cpu *data,
Steven Rostedt38697052008-10-01 13:14:09 -04001419 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1420 int pc)
Ingo Molnar2e0f5762008-05-12 21:20:49 +02001421{
1422 if (likely(!atomic_read(&data->disabled)))
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001423 trace_function(tr, ip, parent_ip, flags, pc);
Ingo Molnar2e0f5762008-05-12 21:20:49 +02001424}
1425
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001426#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001427
1428#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1429struct ftrace_stack {
1430 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1431};
1432
1433static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1434static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1435
Steven Rostedte77405a2009-09-02 14:17:06 -04001436static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001437 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001438 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001439{
Tom Zanussie1112b42009-03-31 00:48:49 -05001440 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001441 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001442 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001443 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001444 int use_stack;
1445 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001446
1447 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001448 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001449
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001450 /*
1451 * Since events can happen in NMIs there's no safe way to
1452 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1453 * or NMI comes in, it will just have to use the default
1454 * FTRACE_STACK_SIZE.
1455 */
1456 preempt_disable_notrace();
1457
Shan Wei82146522012-11-19 13:21:01 +08001458 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001459 /*
1460 * We don't need any atomic variables, just a barrier.
1461 * If an interrupt comes in, we don't care, because it would
1462 * have exited and put the counter back to what we want.
1463 * We just need a barrier to keep gcc from moving things
1464 * around.
1465 */
1466 barrier();
1467 if (use_stack == 1) {
1468 trace.entries = &__get_cpu_var(ftrace_stack).calls[0];
1469 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1470
1471 if (regs)
1472 save_stack_trace_regs(regs, &trace);
1473 else
1474 save_stack_trace(&trace);
1475
1476 if (trace.nr_entries > size)
1477 size = trace.nr_entries;
1478 } else
1479 /* From now on, use_stack is a boolean */
1480 use_stack = 0;
1481
1482 size *= sizeof(unsigned long);
1483
1484 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1485 sizeof(*entry) + size, flags, pc);
1486 if (!event)
1487 goto out;
1488 entry = ring_buffer_event_data(event);
1489
1490 memset(&entry->caller, 0, size);
1491
1492 if (use_stack)
1493 memcpy(&entry->caller, trace.entries,
1494 trace.nr_entries * sizeof(unsigned long));
1495 else {
1496 trace.max_entries = FTRACE_STACK_ENTRIES;
1497 trace.entries = entry->caller;
1498 if (regs)
1499 save_stack_trace_regs(regs, &trace);
1500 else
1501 save_stack_trace(&trace);
1502 }
1503
1504 entry->size = trace.nr_entries;
1505
Steven Rostedte77405a2009-09-02 14:17:06 -04001506 if (!filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001507 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001508
1509 out:
1510 /* Again, don't let gcc optimize things here */
1511 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001512 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001513 preempt_enable_notrace();
1514
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001515}
1516
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001517void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1518 int skip, int pc, struct pt_regs *regs)
1519{
1520 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1521 return;
1522
1523 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1524}
1525
Steven Rostedte77405a2009-09-02 14:17:06 -04001526void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1527 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001528{
1529 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1530 return;
1531
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001532 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001533}
1534
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001535void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1536 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001537{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001538 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001539}
1540
Steven Rostedt03889382009-12-11 09:48:22 -05001541/**
1542 * trace_dump_stack - record a stack back trace in the trace buffer
1543 */
1544void trace_dump_stack(void)
1545{
1546 unsigned long flags;
1547
1548 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001549 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001550
1551 local_save_flags(flags);
1552
1553 /* skipping 3 traces, seems to get us at the caller of this function */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001554 __ftrace_trace_stack(global_trace.trace_buffer.buffer, flags, 3,
1555 preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001556}
1557
Steven Rostedt91e86e52010-11-10 12:56:12 +01001558static DEFINE_PER_CPU(int, user_stack_count);
1559
Steven Rostedte77405a2009-09-02 14:17:06 -04001560void
1561ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001562{
Tom Zanussie1112b42009-03-31 00:48:49 -05001563 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001564 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001565 struct userstack_entry *entry;
1566 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001567
1568 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1569 return;
1570
Steven Rostedtb6345872010-03-12 20:03:30 -05001571 /*
1572 * NMIs can not handle page faults, even with fix ups.
1573 * The save user stack can (and often does) fault.
1574 */
1575 if (unlikely(in_nmi()))
1576 return;
1577
Steven Rostedt91e86e52010-11-10 12:56:12 +01001578 /*
1579 * prevent recursion, since the user stack tracing may
1580 * trigger other kernel events.
1581 */
1582 preempt_disable();
1583 if (__this_cpu_read(user_stack_count))
1584 goto out;
1585
1586 __this_cpu_inc(user_stack_count);
1587
Steven Rostedte77405a2009-09-02 14:17:06 -04001588 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001589 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001590 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001591 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001592 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001593
Steven Rostedt48659d32009-09-11 11:36:23 -04001594 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001595 memset(&entry->caller, 0, sizeof(entry->caller));
1596
1597 trace.nr_entries = 0;
1598 trace.max_entries = FTRACE_STACK_ENTRIES;
1599 trace.skip = 0;
1600 trace.entries = entry->caller;
1601
1602 save_stack_trace_user(&trace);
Steven Rostedte77405a2009-09-02 14:17:06 -04001603 if (!filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001604 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001605
Li Zefan1dbd1952010-12-09 15:47:56 +08001606 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001607 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001608 out:
1609 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001610}
1611
Hannes Eder4fd27352009-02-10 19:44:12 +01001612#ifdef UNUSED
1613static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001614{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001615 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001616}
Hannes Eder4fd27352009-02-10 19:44:12 +01001617#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001618
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001619#endif /* CONFIG_STACKTRACE */
1620
Steven Rostedt07d777f2011-09-22 14:01:55 -04001621/* created for use with alloc_percpu */
1622struct trace_buffer_struct {
1623 char buffer[TRACE_BUF_SIZE];
1624};
1625
1626static struct trace_buffer_struct *trace_percpu_buffer;
1627static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1628static struct trace_buffer_struct *trace_percpu_irq_buffer;
1629static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1630
1631/*
1632 * The buffer used is dependent on the context. There is a per cpu
1633 * buffer for normal context, softirq contex, hard irq context and
1634 * for NMI context. Thise allows for lockless recording.
1635 *
1636 * Note, if the buffers failed to be allocated, then this returns NULL
1637 */
1638static char *get_trace_buf(void)
1639{
1640 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001641
1642 /*
1643 * If we have allocated per cpu buffers, then we do not
1644 * need to do any locking.
1645 */
1646 if (in_nmi())
1647 percpu_buffer = trace_percpu_nmi_buffer;
1648 else if (in_irq())
1649 percpu_buffer = trace_percpu_irq_buffer;
1650 else if (in_softirq())
1651 percpu_buffer = trace_percpu_sirq_buffer;
1652 else
1653 percpu_buffer = trace_percpu_buffer;
1654
1655 if (!percpu_buffer)
1656 return NULL;
1657
Shan Weid8a03492012-11-13 09:53:04 +08001658 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001659}
1660
1661static int alloc_percpu_trace_buffer(void)
1662{
1663 struct trace_buffer_struct *buffers;
1664 struct trace_buffer_struct *sirq_buffers;
1665 struct trace_buffer_struct *irq_buffers;
1666 struct trace_buffer_struct *nmi_buffers;
1667
1668 buffers = alloc_percpu(struct trace_buffer_struct);
1669 if (!buffers)
1670 goto err_warn;
1671
1672 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1673 if (!sirq_buffers)
1674 goto err_sirq;
1675
1676 irq_buffers = alloc_percpu(struct trace_buffer_struct);
1677 if (!irq_buffers)
1678 goto err_irq;
1679
1680 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1681 if (!nmi_buffers)
1682 goto err_nmi;
1683
1684 trace_percpu_buffer = buffers;
1685 trace_percpu_sirq_buffer = sirq_buffers;
1686 trace_percpu_irq_buffer = irq_buffers;
1687 trace_percpu_nmi_buffer = nmi_buffers;
1688
1689 return 0;
1690
1691 err_nmi:
1692 free_percpu(irq_buffers);
1693 err_irq:
1694 free_percpu(sirq_buffers);
1695 err_sirq:
1696 free_percpu(buffers);
1697 err_warn:
1698 WARN(1, "Could not allocate percpu trace_printk buffer");
1699 return -ENOMEM;
1700}
1701
Steven Rostedt81698832012-10-11 10:15:05 -04001702static int buffers_allocated;
1703
Steven Rostedt07d777f2011-09-22 14:01:55 -04001704void trace_printk_init_buffers(void)
1705{
Steven Rostedt07d777f2011-09-22 14:01:55 -04001706 if (buffers_allocated)
1707 return;
1708
1709 if (alloc_percpu_trace_buffer())
1710 return;
1711
1712 pr_info("ftrace: Allocated trace_printk buffers\n");
1713
Steven Rostedtb382ede62012-10-10 21:44:34 -04001714 /* Expand the buffers to set size */
1715 tracing_update_buffers();
1716
Steven Rostedt07d777f2011-09-22 14:01:55 -04001717 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04001718
1719 /*
1720 * trace_printk_init_buffers() can be called by modules.
1721 * If that happens, then we need to start cmdline recording
1722 * directly here. If the global_trace.buffer is already
1723 * allocated here, then this was called by module code.
1724 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001725 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04001726 tracing_start_cmdline_record();
1727}
1728
1729void trace_printk_start_comm(void)
1730{
1731 /* Start tracing comms if trace printk is set */
1732 if (!buffers_allocated)
1733 return;
1734 tracing_start_cmdline_record();
1735}
1736
1737static void trace_printk_start_stop_comm(int enabled)
1738{
1739 if (!buffers_allocated)
1740 return;
1741
1742 if (enabled)
1743 tracing_start_cmdline_record();
1744 else
1745 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04001746}
1747
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001748/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001749 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001750 *
1751 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04001752int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001753{
Tom Zanussie1112b42009-03-31 00:48:49 -05001754 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001755 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04001756 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001757 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001758 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001759 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001760 char *tbuffer;
1761 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001762
1763 if (unlikely(tracing_selftest_running || tracing_disabled))
1764 return 0;
1765
1766 /* Don't pollute graph traces with trace_vprintk internals */
1767 pause_graph_tracing();
1768
1769 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04001770 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001771
Steven Rostedt07d777f2011-09-22 14:01:55 -04001772 tbuffer = get_trace_buf();
1773 if (!tbuffer) {
1774 len = 0;
1775 goto out;
1776 }
1777
1778 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1779
1780 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001781 goto out;
1782
Steven Rostedt07d777f2011-09-22 14:01:55 -04001783 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001784 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001785 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001786 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1787 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001788 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04001789 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001790 entry = ring_buffer_event_data(event);
1791 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001792 entry->fmt = fmt;
1793
Steven Rostedt07d777f2011-09-22 14:01:55 -04001794 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Steven Rostedtd9313692010-01-06 17:27:11 -05001795 if (!filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001796 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05001797 ftrace_trace_stack(buffer, flags, 6, pc);
1798 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001799
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001800out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04001801 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001802 unpause_graph_tracing();
1803
1804 return len;
1805}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001806EXPORT_SYMBOL_GPL(trace_vbprintk);
1807
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001808static int
1809__trace_array_vprintk(struct ring_buffer *buffer,
1810 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001811{
Tom Zanussie1112b42009-03-31 00:48:49 -05001812 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001813 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001814 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001815 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001816 unsigned long flags;
1817 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001818
1819 if (tracing_disabled || tracing_selftest_running)
1820 return 0;
1821
Steven Rostedt07d777f2011-09-22 14:01:55 -04001822 /* Don't pollute graph traces with trace_vprintk internals */
1823 pause_graph_tracing();
1824
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001825 pc = preempt_count();
1826 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001827
Steven Rostedt07d777f2011-09-22 14:01:55 -04001828
1829 tbuffer = get_trace_buf();
1830 if (!tbuffer) {
1831 len = 0;
1832 goto out;
1833 }
1834
1835 len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1836 if (len > TRACE_BUF_SIZE)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001837 goto out;
1838
Steven Rostedt07d777f2011-09-22 14:01:55 -04001839 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001840 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04001841 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04001842 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001843 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04001844 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001845 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01001846 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001847
Steven Rostedt07d777f2011-09-22 14:01:55 -04001848 memcpy(&entry->buf, tbuffer, len);
Carsten Emdec13d2f72009-11-16 20:56:13 +01001849 entry->buf[len] = '\0';
Steven Rostedtd9313692010-01-06 17:27:11 -05001850 if (!filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001851 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04001852 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05001853 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001854 out:
1855 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04001856 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01001857
1858 return len;
1859}
Steven Rostedt659372d2009-09-03 19:11:07 -04001860
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001861int trace_array_vprintk(struct trace_array *tr,
1862 unsigned long ip, const char *fmt, va_list args)
1863{
1864 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
1865}
1866
1867int trace_array_printk(struct trace_array *tr,
1868 unsigned long ip, const char *fmt, ...)
1869{
1870 int ret;
1871 va_list ap;
1872
1873 if (!(trace_flags & TRACE_ITER_PRINTK))
1874 return 0;
1875
1876 va_start(ap, fmt);
1877 ret = trace_array_vprintk(tr, ip, fmt, ap);
1878 va_end(ap);
1879 return ret;
1880}
1881
1882int trace_array_printk_buf(struct ring_buffer *buffer,
1883 unsigned long ip, const char *fmt, ...)
1884{
1885 int ret;
1886 va_list ap;
1887
1888 if (!(trace_flags & TRACE_ITER_PRINTK))
1889 return 0;
1890
1891 va_start(ap, fmt);
1892 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
1893 va_end(ap);
1894 return ret;
1895}
1896
Steven Rostedt659372d2009-09-03 19:11:07 -04001897int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1898{
Steven Rostedta813a152009-10-09 01:41:35 -04001899 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04001900}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01001901EXPORT_SYMBOL_GPL(trace_vprintk);
1902
Robert Richtere2ac8ef2008-11-12 12:59:32 +01001903static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04001904{
Steven Rostedt6d158a82012-06-27 20:46:14 -04001905 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
1906
Steven Rostedt5a90f572008-09-03 17:42:51 -04001907 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04001908 if (buf_iter)
1909 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04001910}
1911
Ingo Molnare309b412008-05-12 21:20:51 +02001912static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04001913peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1914 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001915{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001916 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04001917 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001918
Steven Rostedtd7690412008-10-01 00:29:53 -04001919 if (buf_iter)
1920 event = ring_buffer_iter_peek(buf_iter, ts);
1921 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001922 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04001923 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04001924
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001925 if (event) {
1926 iter->ent_size = ring_buffer_event_length(event);
1927 return ring_buffer_event_data(event);
1928 }
1929 iter->ent_size = 0;
1930 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001931}
Steven Rostedtd7690412008-10-01 00:29:53 -04001932
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001933static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04001934__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1935 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001936{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001937 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001938 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08001939 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001940 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001941 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001942 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04001943 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001944 int cpu;
1945
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001946 /*
1947 * If we are in a per_cpu trace file, don't bother by iterating over
1948 * all cpu and peek directly.
1949 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05001950 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001951 if (ring_buffer_empty_cpu(buffer, cpu_file))
1952 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04001953 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01001954 if (ent_cpu)
1955 *ent_cpu = cpu_file;
1956
1957 return ent;
1958 }
1959
Steven Rostedtab464282008-05-12 21:21:00 +02001960 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001961
1962 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001963 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001964
Steven Rostedtbc21b472010-03-31 19:49:26 -04001965 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001966
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02001967 /*
1968 * Pick the entry with the smallest timestamp:
1969 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001970 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001971 next = ent;
1972 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001973 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04001974 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04001975 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001976 }
1977 }
1978
Steven Rostedt12b5da32012-03-27 10:43:28 -04001979 iter->ent_size = next_size;
1980
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001981 if (ent_cpu)
1982 *ent_cpu = next_cpu;
1983
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001984 if (ent_ts)
1985 *ent_ts = next_ts;
1986
Steven Rostedtbc21b472010-03-31 19:49:26 -04001987 if (missing_events)
1988 *missing_events = next_lost;
1989
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001990 return next;
1991}
1992
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001993/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02001994struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1995 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02001996{
Steven Rostedtbc21b472010-03-31 19:49:26 -04001997 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04001998}
Ingo Molnar8c523a92008-05-12 21:20:46 +02001999
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002000/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002001void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002002{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002003 iter->ent = __find_next_entry(iter, &iter->cpu,
2004 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002005
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002006 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002007 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002008
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002009 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002010}
2011
Ingo Molnare309b412008-05-12 21:20:51 +02002012static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002013{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002014 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002015 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002016}
2017
Ingo Molnare309b412008-05-12 21:20:51 +02002018static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002019{
2020 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002021 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002022 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002023
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002024 WARN_ON_ONCE(iter->leftover);
2025
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002026 (*pos)++;
2027
2028 /* can't go backwards */
2029 if (iter->idx > i)
2030 return NULL;
2031
2032 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002033 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002034 else
2035 ent = iter;
2036
2037 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002038 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002039
2040 iter->pos = *pos;
2041
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002042 return ent;
2043}
2044
Jason Wessel955b61e2010-08-05 09:22:23 -05002045void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002046{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002047 struct ring_buffer_event *event;
2048 struct ring_buffer_iter *buf_iter;
2049 unsigned long entries = 0;
2050 u64 ts;
2051
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002052 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002053
Steven Rostedt6d158a82012-06-27 20:46:14 -04002054 buf_iter = trace_buffer_iter(iter, cpu);
2055 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002056 return;
2057
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002058 ring_buffer_iter_reset(buf_iter);
2059
2060 /*
2061 * We could have the case with the max latency tracers
2062 * that a reset never took place on a cpu. This is evident
2063 * by the timestamp being before the start of the buffer.
2064 */
2065 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002066 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002067 break;
2068 entries++;
2069 ring_buffer_read(buf_iter, NULL);
2070 }
2071
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002072 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002073}
2074
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002075/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002076 * The current tracer is copied to avoid a global locking
2077 * all around.
2078 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002079static void *s_start(struct seq_file *m, loff_t *pos)
2080{
2081 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002082 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002083 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002084 void *p = NULL;
2085 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002086 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002087
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002088 /*
2089 * copy the tracer to avoid using a global lock all around.
2090 * iter->trace is a copy of current_trace, the pointer to the
2091 * name may be used instead of a strcmp(), as iter->trace->name
2092 * will point to the same string as current_trace->name.
2093 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002094 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002095 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2096 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002097 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002098
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002099#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002100 if (iter->snapshot && iter->trace->use_max_tr)
2101 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002102#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002103
2104 if (!iter->snapshot)
2105 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002106
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002107 if (*pos != iter->pos) {
2108 iter->ent = NULL;
2109 iter->cpu = 0;
2110 iter->idx = -1;
2111
Steven Rostedtae3b5092013-01-23 15:22:59 -05002112 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002113 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002114 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002115 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002116 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002117
Lai Jiangshanac91d852010-03-02 17:54:50 +08002118 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002119 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2120 ;
2121
2122 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002123 /*
2124 * If we overflowed the seq_file before, then we want
2125 * to just reuse the trace_seq buffer again.
2126 */
2127 if (iter->leftover)
2128 p = iter;
2129 else {
2130 l = *pos - 1;
2131 p = s_next(m, p, &l);
2132 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002133 }
2134
Lai Jiangshan4f535962009-05-18 19:35:34 +08002135 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002136 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002137 return p;
2138}
2139
2140static void s_stop(struct seq_file *m, void *p)
2141{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002142 struct trace_iterator *iter = m->private;
2143
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002144#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002145 if (iter->snapshot && iter->trace->use_max_tr)
2146 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002147#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002148
2149 if (!iter->snapshot)
2150 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002151
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002152 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002153 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002154}
2155
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002156static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002157get_total_entries(struct trace_buffer *buf,
2158 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002159{
2160 unsigned long count;
2161 int cpu;
2162
2163 *total = 0;
2164 *entries = 0;
2165
2166 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002167 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002168 /*
2169 * If this buffer has skipped entries, then we hold all
2170 * entries for the trace and we need to ignore the
2171 * ones before the time stamp.
2172 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002173 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2174 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002175 /* total is the same as the entries */
2176 *total += count;
2177 } else
2178 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002179 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002180 *entries += count;
2181 }
2182}
2183
Ingo Molnare309b412008-05-12 21:20:51 +02002184static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002185{
Michael Ellermana6168352008-08-20 16:36:11 -07002186 seq_puts(m, "# _------=> CPU# \n");
2187 seq_puts(m, "# / _-----=> irqs-off \n");
2188 seq_puts(m, "# | / _----=> need-resched \n");
2189 seq_puts(m, "# || / _---=> hardirq/softirq \n");
2190 seq_puts(m, "# ||| / _--=> preempt-depth \n");
Steven Rostedte6e1e252011-03-09 10:41:56 -05002191 seq_puts(m, "# |||| / delay \n");
2192 seq_puts(m, "# cmd pid ||||| time | caller \n");
2193 seq_puts(m, "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002194}
2195
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002196static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002197{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002198 unsigned long total;
2199 unsigned long entries;
2200
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002201 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002202 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2203 entries, total, num_online_cpus());
2204 seq_puts(m, "#\n");
2205}
2206
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002207static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002208{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002209 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002210 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
Michael Ellermana6168352008-08-20 16:36:11 -07002211 seq_puts(m, "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002212}
2213
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002214static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002215{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002216 print_event_info(buf, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002217 seq_puts(m, "# _-----=> irqs-off\n");
2218 seq_puts(m, "# / _----=> need-resched\n");
2219 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2220 seq_puts(m, "# || / _--=> preempt-depth\n");
2221 seq_puts(m, "# ||| / delay\n");
2222 seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n");
2223 seq_puts(m, "# | | | |||| | |\n");
2224}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002225
Jiri Olsa62b915f2010-04-02 19:01:22 +02002226void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002227print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2228{
2229 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002230 struct trace_buffer *buf = iter->trace_buffer;
2231 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002232 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002233 unsigned long entries;
2234 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002235 const char *name = "preemption";
2236
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002237 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002238
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002239 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002240
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002241 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002242 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002243 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002244 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002245 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002246 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002247 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002248 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002249 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002250 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002251#if defined(CONFIG_PREEMPT_NONE)
2252 "server",
2253#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2254 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002255#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002256 "preempt",
2257#else
2258 "unknown",
2259#endif
2260 /* These are reserved for later use */
2261 0, 0, 0, 0);
2262#ifdef CONFIG_SMP
2263 seq_printf(m, " #P:%d)\n", num_online_cpus());
2264#else
2265 seq_puts(m, ")\n");
2266#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002267 seq_puts(m, "# -----------------\n");
2268 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002269 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002270 data->comm, data->pid,
2271 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002272 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002273 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002274
2275 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002276 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002277 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2278 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002279 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002280 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2281 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002282 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002283 }
2284
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002285 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002286}
2287
Steven Rostedta3097202008-11-07 22:36:02 -05002288static void test_cpu_buff_start(struct trace_iterator *iter)
2289{
2290 struct trace_seq *s = &iter->seq;
2291
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002292 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2293 return;
2294
2295 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2296 return;
2297
Rusty Russell44623442009-01-01 10:12:23 +10302298 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002299 return;
2300
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002301 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002302 return;
2303
Rusty Russell44623442009-01-01 10:12:23 +10302304 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002305
2306 /* Don't print started cpu buffer for the first entry of the trace */
2307 if (iter->idx > 1)
2308 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2309 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002310}
2311
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002312static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002313{
Steven Rostedt214023c2008-05-12 21:20:46 +02002314 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002315 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002316 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002317 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002318
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002319 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002320
Steven Rostedta3097202008-11-07 22:36:02 -05002321 test_cpu_buff_start(iter);
2322
Steven Rostedtf633cef2008-12-23 23:24:13 -05002323 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002324
2325 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt27d48be2009-03-04 21:57:29 -05002326 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2327 if (!trace_print_lat_context(iter))
2328 goto partial;
2329 } else {
2330 if (!trace_print_context(iter))
2331 goto partial;
2332 }
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002333 }
2334
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002335 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002336 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002337
2338 if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2339 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002340
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002341 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002342partial:
2343 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002344}
2345
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002346static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002347{
2348 struct trace_seq *s = &iter->seq;
2349 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002350 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002351
2352 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002353
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002354 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002355 if (!trace_seq_printf(s, "%d %d %llu ",
2356 entry->pid, iter->cpu, iter->ts))
2357 goto partial;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002358 }
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002359
Steven Rostedtf633cef2008-12-23 23:24:13 -05002360 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002361 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002362 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002363
2364 if (!trace_seq_printf(s, "%d ?\n", entry->type))
2365 goto partial;
Steven Rostedt7104f302008-10-01 10:52:51 -04002366
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002367 return TRACE_TYPE_HANDLED;
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002368partial:
2369 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002370}
2371
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002372static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002373{
2374 struct trace_seq *s = &iter->seq;
2375 unsigned char newline = '\n';
2376 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002377 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002378
2379 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002380
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002381 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2382 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2383 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2384 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2385 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002386
Steven Rostedtf633cef2008-12-23 23:24:13 -05002387 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002388 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002389 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002390 if (ret != TRACE_TYPE_HANDLED)
2391 return ret;
2392 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002393
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002394 SEQ_PUT_FIELD_RET(s, newline);
2395
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002396 return TRACE_TYPE_HANDLED;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002397}
2398
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002399static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002400{
2401 struct trace_seq *s = &iter->seq;
2402 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002403 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002404
2405 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002406
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002407 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2408 SEQ_PUT_FIELD_RET(s, entry->pid);
Steven Rostedt1830b52d2009-02-07 19:38:43 -05002409 SEQ_PUT_FIELD_RET(s, iter->cpu);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002410 SEQ_PUT_FIELD_RET(s, iter->ts);
2411 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002412
Steven Rostedtf633cef2008-12-23 23:24:13 -05002413 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002414 return event ? event->funcs->binary(iter, 0, event) :
2415 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002416}
2417
Jiri Olsa62b915f2010-04-02 19:01:22 +02002418int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002419{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002420 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002421 int cpu;
2422
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002423 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002424 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002425 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002426 buf_iter = trace_buffer_iter(iter, cpu);
2427 if (buf_iter) {
2428 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002429 return 0;
2430 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002431 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002432 return 0;
2433 }
2434 return 1;
2435 }
2436
Steven Rostedtab464282008-05-12 21:21:00 +02002437 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002438 buf_iter = trace_buffer_iter(iter, cpu);
2439 if (buf_iter) {
2440 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002441 return 0;
2442 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002443 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002444 return 0;
2445 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002446 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002447
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002448 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002449}
2450
Lai Jiangshan4f535962009-05-18 19:35:34 +08002451/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002452enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002453{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002454 enum print_line_t ret;
2455
Jiri Olsaee5e51f2011-03-25 12:05:18 +01002456 if (iter->lost_events &&
2457 !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2458 iter->cpu, iter->lost_events))
2459 return TRACE_TYPE_PARTIAL_LINE;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002460
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002461 if (iter->trace && iter->trace->print_line) {
2462 ret = iter->trace->print_line(iter);
2463 if (ret != TRACE_TYPE_UNHANDLED)
2464 return ret;
2465 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002466
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002467 if (iter->ent->type == TRACE_BPRINT &&
2468 trace_flags & TRACE_ITER_PRINTK &&
2469 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002470 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002471
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002472 if (iter->ent->type == TRACE_PRINT &&
2473 trace_flags & TRACE_ITER_PRINTK &&
2474 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002475 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002476
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002477 if (trace_flags & TRACE_ITER_BIN)
2478 return print_bin_fmt(iter);
2479
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002480 if (trace_flags & TRACE_ITER_HEX)
2481 return print_hex_fmt(iter);
2482
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002483 if (trace_flags & TRACE_ITER_RAW)
2484 return print_raw_fmt(iter);
2485
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002486 return print_trace_fmt(iter);
2487}
2488
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002489void trace_latency_header(struct seq_file *m)
2490{
2491 struct trace_iterator *iter = m->private;
2492
2493 /* print nothing if the buffers are empty */
2494 if (trace_empty(iter))
2495 return;
2496
2497 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2498 print_trace_header(m, iter);
2499
2500 if (!(trace_flags & TRACE_ITER_VERBOSE))
2501 print_lat_help_header(m);
2502}
2503
Jiri Olsa62b915f2010-04-02 19:01:22 +02002504void trace_default_header(struct seq_file *m)
2505{
2506 struct trace_iterator *iter = m->private;
2507
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002508 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2509 return;
2510
Jiri Olsa62b915f2010-04-02 19:01:22 +02002511 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2512 /* print nothing if the buffers are empty */
2513 if (trace_empty(iter))
2514 return;
2515 print_trace_header(m, iter);
2516 if (!(trace_flags & TRACE_ITER_VERBOSE))
2517 print_lat_help_header(m);
2518 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002519 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2520 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002521 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002522 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002523 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002524 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002525 }
2526}
2527
Steven Rostedte0a413f2011-09-29 21:26:16 -04002528static void test_ftrace_alive(struct seq_file *m)
2529{
2530 if (!ftrace_is_dead())
2531 return;
2532 seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2533 seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n");
2534}
2535
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002536#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002537static void show_snapshot_main_help(struct seq_file *m)
2538{
2539 seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n");
2540 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2541 seq_printf(m, "# Takes a snapshot of the main buffer.\n");
2542 seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n");
2543 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2544 seq_printf(m, "# is not a '0' or '1')\n");
2545}
2546
2547static void show_snapshot_percpu_help(struct seq_file *m)
2548{
2549 seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
2550#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2551 seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n");
2552 seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n");
2553#else
2554 seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n");
2555 seq_printf(m, "# Must use main snapshot file to allocate.\n");
2556#endif
2557 seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n");
2558 seq_printf(m, "# (Doesn't have to be '2' works with any number that\n");
2559 seq_printf(m, "# is not a '0' or '1')\n");
2560}
2561
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002562static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2563{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002564 if (iter->tr->allocated_snapshot)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002565 seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
2566 else
2567 seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
2568
2569 seq_printf(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002570 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2571 show_snapshot_main_help(m);
2572 else
2573 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002574}
2575#else
2576/* Should never be called */
2577static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2578#endif
2579
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002580static int s_show(struct seq_file *m, void *v)
2581{
2582 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002583 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002584
2585 if (iter->ent == NULL) {
2586 if (iter->tr) {
2587 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2588 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002589 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002590 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002591 if (iter->snapshot && trace_empty(iter))
2592 print_snapshot_help(m, iter);
2593 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002594 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002595 else
2596 trace_default_header(m);
2597
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002598 } else if (iter->leftover) {
2599 /*
2600 * If we filled the seq_file buffer earlier, we
2601 * want to just show it now.
2602 */
2603 ret = trace_print_seq(m, &iter->seq);
2604
2605 /* ret should this time be zero, but you never know */
2606 iter->leftover = ret;
2607
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002608 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002609 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002610 ret = trace_print_seq(m, &iter->seq);
2611 /*
2612 * If we overflow the seq_file buffer, then it will
2613 * ask us for this data again at start up.
2614 * Use that instead.
2615 * ret is 0 if seq_file write succeeded.
2616 * -1 otherwise.
2617 */
2618 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002619 }
2620
2621 return 0;
2622}
2623
James Morris88e9d342009-09-22 16:43:43 -07002624static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002625 .start = s_start,
2626 .next = s_next,
2627 .stop = s_stop,
2628 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002629};
2630
Ingo Molnare309b412008-05-12 21:20:51 +02002631static struct trace_iterator *
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002632__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002633{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002634 struct trace_cpu *tc = inode->i_private;
2635 struct trace_array *tr = tc->tr;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002636 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02002637 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002638
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002639 if (tracing_disabled)
2640 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02002641
Jiri Olsa50e18b92012-04-25 10:23:39 +02002642 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002643 if (!iter)
2644 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002645
Steven Rostedt6d158a82012-06-27 20:46:14 -04002646 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2647 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002648 if (!iter->buffer_iter)
2649 goto release;
2650
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002651 /*
2652 * We make a copy of the current tracer to avoid concurrent
2653 * changes on it while we are reading.
2654 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002655 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002656 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002657 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002658 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002659
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002660 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002661
Li Zefan79f55992009-06-15 14:58:26 +08002662 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002663 goto fail;
2664
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002665 iter->tr = tr;
2666
2667#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002668 /* Currently only the top directory has a snapshot */
2669 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002670 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002671 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002672#endif
2673 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002674 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002675 iter->pos = -1;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002676 mutex_init(&iter->mutex);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002677 iter->cpu_file = tc->cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002678
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002679 /* Notify the tracer early; before we stop tracing. */
2680 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01002681 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002682
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002683 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002684 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002685 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2686
David Sharp8be07092012-11-13 12:18:22 -08002687 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
2688 if (trace_clocks[trace_clock_id].in_ns)
2689 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
2690
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002691 /* stop the trace while dumping if we are not opening "snapshot" */
2692 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002693 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002694
Steven Rostedtae3b5092013-01-23 15:22:59 -05002695 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002696 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002697 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002698 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002699 }
2700 ring_buffer_read_prepare_sync();
2701 for_each_tracing_cpu(cpu) {
2702 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002703 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002704 }
2705 } else {
2706 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002707 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002708 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07002709 ring_buffer_read_prepare_sync();
2710 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002711 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002712 }
2713
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05002714 tr->ref++;
2715
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002716 mutex_unlock(&trace_types_lock);
2717
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002718 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002719
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002720 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002721 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002722 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04002723 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03002724release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02002725 seq_release_private(inode, file);
2726 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002727}
2728
2729int tracing_open_generic(struct inode *inode, struct file *filp)
2730{
Steven Rostedt60a11772008-05-12 21:20:44 +02002731 if (tracing_disabled)
2732 return -ENODEV;
2733
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002734 filp->private_data = inode->i_private;
2735 return 0;
2736}
2737
Hannes Eder4fd27352009-02-10 19:44:12 +01002738static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002739{
matt mooney907f2782010-09-27 19:04:53 -07002740 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002741 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002742 struct trace_array *tr;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002743 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002744
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002745 if (!(file->f_mode & FMODE_READ))
2746 return 0;
2747
2748 iter = m->private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002749 tr = iter->tr;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002750
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002751 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05002752
2753 WARN_ON(!tr->ref);
2754 tr->ref--;
2755
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002756 for_each_tracing_cpu(cpu) {
2757 if (iter->buffer_iter[cpu])
2758 ring_buffer_read_finish(iter->buffer_iter[cpu]);
2759 }
2760
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002761 if (iter->trace && iter->trace->close)
2762 iter->trace->close(iter);
2763
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002764 if (!iter->snapshot)
2765 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002766 tracing_start_tr(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002767 mutex_unlock(&trace_types_lock);
2768
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002769 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002770 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002771 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04002772 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02002773 seq_release_private(inode, file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002774 return 0;
2775}
2776
2777static int tracing_open(struct inode *inode, struct file *file)
2778{
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05002779 struct trace_iterator *iter;
2780 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002781
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002782 /* If this file was open for write, then erase contents */
2783 if ((file->f_mode & FMODE_WRITE) &&
Steven Rostedt8650ae32009-07-22 23:29:30 -04002784 (file->f_flags & O_TRUNC)) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002785 struct trace_cpu *tc = inode->i_private;
2786 struct trace_array *tr = tc->tr;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002787
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002788 if (tc->cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002789 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002790 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002791 tracing_reset(&tr->trace_buffer, tc->cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002792 }
2793
2794 if (file->f_mode & FMODE_READ) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002795 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002796 if (IS_ERR(iter))
2797 ret = PTR_ERR(iter);
2798 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2799 iter->iter_flags |= TRACE_FILE_LAT_FMT;
2800 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002801 return ret;
2802}
2803
Ingo Molnare309b412008-05-12 21:20:51 +02002804static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002805t_next(struct seq_file *m, void *v, loff_t *pos)
2806{
Li Zefanf129e962009-06-24 09:53:44 +08002807 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002808
2809 (*pos)++;
2810
2811 if (t)
2812 t = t->next;
2813
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002814 return t;
2815}
2816
2817static void *t_start(struct seq_file *m, loff_t *pos)
2818{
Li Zefanf129e962009-06-24 09:53:44 +08002819 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002820 loff_t l = 0;
2821
2822 mutex_lock(&trace_types_lock);
Li Zefanf129e962009-06-24 09:53:44 +08002823 for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002824 ;
2825
2826 return t;
2827}
2828
2829static void t_stop(struct seq_file *m, void *p)
2830{
2831 mutex_unlock(&trace_types_lock);
2832}
2833
2834static int t_show(struct seq_file *m, void *v)
2835{
2836 struct tracer *t = v;
2837
2838 if (!t)
2839 return 0;
2840
2841 seq_printf(m, "%s", t->name);
2842 if (t->next)
2843 seq_putc(m, ' ');
2844 else
2845 seq_putc(m, '\n');
2846
2847 return 0;
2848}
2849
James Morris88e9d342009-09-22 16:43:43 -07002850static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002851 .start = t_start,
2852 .next = t_next,
2853 .stop = t_stop,
2854 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002855};
2856
2857static int show_traces_open(struct inode *inode, struct file *file)
2858{
Steven Rostedt60a11772008-05-12 21:20:44 +02002859 if (tracing_disabled)
2860 return -ENODEV;
2861
Li Zefanf129e962009-06-24 09:53:44 +08002862 return seq_open(file, &show_traces_seq_ops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002863}
2864
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002865static ssize_t
2866tracing_write_stub(struct file *filp, const char __user *ubuf,
2867 size_t count, loff_t *ppos)
2868{
2869 return count;
2870}
2871
Slava Pestov364829b2010-11-24 15:13:16 -08002872static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2873{
2874 if (file->f_mode & FMODE_READ)
2875 return seq_lseek(file, offset, origin);
2876 else
2877 return 0;
2878}
2879
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002880static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002881 .open = tracing_open,
2882 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04002883 .write = tracing_write_stub,
Slava Pestov364829b2010-11-24 15:13:16 -08002884 .llseek = tracing_seek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02002885 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002886};
2887
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002888static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02002889 .open = show_traces_open,
2890 .read = seq_read,
2891 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02002892 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02002893};
2894
Ingo Molnar36dfe922008-05-12 21:20:52 +02002895/*
2896 * Only trace on a CPU if the bitmask is set:
2897 */
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302898static cpumask_var_t tracing_cpumask;
Ingo Molnar36dfe922008-05-12 21:20:52 +02002899
2900/*
2901 * The tracer itself will not take this lock, but still we want
2902 * to provide a consistent cpumask to user-space:
2903 */
2904static DEFINE_MUTEX(tracing_cpumask_update_lock);
2905
2906/*
2907 * Temporary storage for the character representation of the
2908 * CPU bitmask (and one more byte for the newline):
2909 */
2910static char mask_str[NR_CPUS + 1];
2911
Ingo Molnarc7078de2008-05-12 21:20:52 +02002912static ssize_t
2913tracing_cpumask_read(struct file *filp, char __user *ubuf,
2914 size_t count, loff_t *ppos)
2915{
Ingo Molnar36dfe922008-05-12 21:20:52 +02002916 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02002917
2918 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002919
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302920 len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002921 if (count - len < 2) {
2922 count = -EINVAL;
2923 goto out_err;
2924 }
2925 len += sprintf(mask_str + len, "\n");
2926 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2927
2928out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02002929 mutex_unlock(&tracing_cpumask_update_lock);
2930
2931 return count;
2932}
2933
2934static ssize_t
2935tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2936 size_t count, loff_t *ppos)
2937{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002938 struct trace_array *tr = filp->private_data;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302939 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002940 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302941
2942 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2943 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02002944
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302945 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002946 if (err)
2947 goto err_unlock;
2948
Li Zefan215368e2009-06-15 10:56:42 +08002949 mutex_lock(&tracing_cpumask_update_lock);
2950
Steven Rostedta5e25882008-12-02 15:34:05 -05002951 local_irq_disable();
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002952 arch_spin_lock(&ftrace_max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02002953 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02002954 /*
2955 * Increase/decrease the disabled counter if we are
2956 * about to flip a bit in the cpumask:
2957 */
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302958 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2959 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002960 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
2961 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002962 }
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302963 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2964 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002965 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
2966 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002967 }
2968 }
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01002969 arch_spin_unlock(&ftrace_max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05002970 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02002971
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302972 cpumask_copy(tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002973
Ingo Molnarc7078de2008-05-12 21:20:52 +02002974 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10302975 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02002976
Ingo Molnarc7078de2008-05-12 21:20:52 +02002977 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02002978
2979err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08002980 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02002981
2982 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02002983}
2984
Steven Rostedt5e2336a2009-03-05 21:44:55 -05002985static const struct file_operations tracing_cpumask_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02002986 .open = tracing_open_generic,
2987 .read = tracing_cpumask_read,
2988 .write = tracing_cpumask_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02002989 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002990};
2991
Li Zefanfdb372e2009-12-08 11:15:59 +08002992static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002993{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002994 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002995 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002996 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002997 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01002998
Steven Rostedtd8e83d22009-02-26 23:55:58 -05002999 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003000 tracer_flags = tr->current_trace->flags->val;
3001 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003002
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003003 for (i = 0; trace_options[i]; i++) {
3004 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003005 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003006 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003007 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003008 }
3009
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003010 for (i = 0; trace_opts[i].name; i++) {
3011 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003012 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003013 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003014 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003015 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003016 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003017
Li Zefanfdb372e2009-12-08 11:15:59 +08003018 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003019}
3020
Li Zefan8d18eaa2009-12-08 11:17:06 +08003021static int __set_tracer_option(struct tracer *trace,
3022 struct tracer_flags *tracer_flags,
3023 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003024{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003025 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003026
Li Zefan8d18eaa2009-12-08 11:17:06 +08003027 ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003028 if (ret)
3029 return ret;
3030
3031 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003032 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003033 else
Zhaolei77708412009-08-07 18:53:21 +08003034 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003035 return 0;
3036}
3037
Li Zefan8d18eaa2009-12-08 11:17:06 +08003038/* Try to assign a tracer specific option */
3039static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
3040{
3041 struct tracer_flags *tracer_flags = trace->flags;
3042 struct tracer_opt *opts = NULL;
3043 int i;
3044
3045 for (i = 0; tracer_flags->opts[i].name; i++) {
3046 opts = &tracer_flags->opts[i];
3047
3048 if (strcmp(cmp, opts->name) == 0)
3049 return __set_tracer_option(trace, trace->flags,
3050 opts, neg);
3051 }
3052
3053 return -EINVAL;
3054}
3055
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003056/* Some tracers require overwrite to stay enabled */
3057int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3058{
3059 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3060 return -1;
3061
3062 return 0;
3063}
3064
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003065int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003066{
3067 /* do nothing if flag is already set */
3068 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003069 return 0;
3070
3071 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003072 if (tr->current_trace->flag_changed)
3073 if (tr->current_trace->flag_changed(tr->current_trace, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003074 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003075
3076 if (enabled)
3077 trace_flags |= mask;
3078 else
3079 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003080
3081 if (mask == TRACE_ITER_RECORD_CMD)
3082 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003083
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003084 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003085 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003086#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003087 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003088#endif
3089 }
Steven Rostedt81698832012-10-11 10:15:05 -04003090
3091 if (mask == TRACE_ITER_PRINTK)
3092 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003093
3094 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003095}
3096
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003097static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003098{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003099 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003100 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003101 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003102 int i;
3103
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003104 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003105
Li Zefan8d18eaa2009-12-08 11:17:06 +08003106 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003107 neg = 1;
3108 cmp += 2;
3109 }
3110
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003111 mutex_lock(&trace_types_lock);
3112
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003113 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003114 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003115 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003116 break;
3117 }
3118 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003119
3120 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003121 if (!trace_options[i])
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003122 ret = set_tracer_option(tr->current_trace, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003123
3124 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003125
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003126 return ret;
3127}
3128
3129static ssize_t
3130tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3131 size_t cnt, loff_t *ppos)
3132{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003133 struct seq_file *m = filp->private_data;
3134 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003135 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003136 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003137
3138 if (cnt >= sizeof(buf))
3139 return -EINVAL;
3140
3141 if (copy_from_user(&buf, ubuf, cnt))
3142 return -EFAULT;
3143
Steven Rostedta8dd2172013-01-09 20:54:17 -05003144 buf[cnt] = 0;
3145
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003146 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003147 if (ret < 0)
3148 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003149
Jiri Olsacf8517c2009-10-23 19:36:16 -04003150 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003151
3152 return cnt;
3153}
3154
Li Zefanfdb372e2009-12-08 11:15:59 +08003155static int tracing_trace_options_open(struct inode *inode, struct file *file)
3156{
3157 if (tracing_disabled)
3158 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003159
3160 return single_open(file, tracing_trace_options_show, inode->i_private);
Li Zefanfdb372e2009-12-08 11:15:59 +08003161}
3162
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003163static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003164 .open = tracing_trace_options_open,
3165 .read = seq_read,
3166 .llseek = seq_lseek,
3167 .release = single_release,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003168 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003169};
3170
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003171static const char readme_msg[] =
3172 "tracing mini-HOWTO:\n\n"
GeunSik Lim156f5a72009-06-02 15:01:37 +09003173 "# mount -t debugfs nodev /sys/kernel/debug\n\n"
3174 "# cat /sys/kernel/debug/tracing/available_tracers\n"
Geunsik Lim1e42e832012-02-08 19:05:36 +09003175 "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
GeunSik Lim156f5a72009-06-02 15:01:37 +09003176 "# cat /sys/kernel/debug/tracing/current_tracer\n"
Nikanth Karthikesanbc2b6872009-03-23 11:58:31 +05303177 "nop\n"
Geunsik Lim1e42e832012-02-08 19:05:36 +09003178 "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
GeunSik Lim156f5a72009-06-02 15:01:37 +09003179 "# cat /sys/kernel/debug/tracing/current_tracer\n"
Geunsik Lim1e42e832012-02-08 19:05:36 +09003180 "wakeup\n"
GeunSik Lim156f5a72009-06-02 15:01:37 +09003181 "# cat /sys/kernel/debug/tracing/trace_options\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003182 "noprint-parent nosym-offset nosym-addr noverbose\n"
GeunSik Lim156f5a72009-06-02 15:01:37 +09003183 "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
Geunsik Lim9b5f8b32011-08-12 14:30:22 +09003184 "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
GeunSik Lim156f5a72009-06-02 15:01:37 +09003185 "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
Geunsik Lim9b5f8b32011-08-12 14:30:22 +09003186 "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003187;
3188
3189static ssize_t
3190tracing_readme_read(struct file *filp, char __user *ubuf,
3191 size_t cnt, loff_t *ppos)
3192{
3193 return simple_read_from_buffer(ubuf, cnt, ppos,
3194 readme_msg, strlen(readme_msg));
3195}
3196
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003197static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003198 .open = tracing_open_generic,
3199 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003200 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003201};
3202
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003203static ssize_t
Avadh Patel69abe6a2009-04-10 16:04:48 -04003204tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
3205 size_t cnt, loff_t *ppos)
3206{
3207 char *buf_comm;
3208 char *file_buf;
3209 char *buf;
3210 int len = 0;
3211 int pid;
3212 int i;
3213
3214 file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
3215 if (!file_buf)
3216 return -ENOMEM;
3217
3218 buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
3219 if (!buf_comm) {
3220 kfree(file_buf);
3221 return -ENOMEM;
3222 }
3223
3224 buf = file_buf;
3225
3226 for (i = 0; i < SAVED_CMDLINES; i++) {
3227 int r;
3228
3229 pid = map_cmdline_to_pid[i];
3230 if (pid == -1 || pid == NO_CMDLINE_MAP)
3231 continue;
3232
3233 trace_find_cmdline(pid, buf_comm);
3234 r = sprintf(buf, "%d %s\n", pid, buf_comm);
3235 buf += r;
3236 len += r;
3237 }
3238
3239 len = simple_read_from_buffer(ubuf, cnt, ppos,
3240 file_buf, len);
3241
3242 kfree(file_buf);
3243 kfree(buf_comm);
3244
3245 return len;
3246}
3247
3248static const struct file_operations tracing_saved_cmdlines_fops = {
3249 .open = tracing_open_generic,
3250 .read = tracing_saved_cmdlines_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003251 .llseek = generic_file_llseek,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003252};
3253
3254static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003255tracing_set_trace_read(struct file *filp, char __user *ubuf,
3256 size_t cnt, loff_t *ppos)
3257{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003258 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08003259 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003260 int r;
3261
3262 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003263 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003264 mutex_unlock(&trace_types_lock);
3265
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003266 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003267}
3268
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003269int tracer_init(struct tracer *t, struct trace_array *tr)
3270{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003271 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003272 return t->init(tr);
3273}
3274
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003275static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003276{
3277 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05003278
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003279 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003280 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003281}
3282
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003283#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09003284/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003285static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
3286 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09003287{
3288 int cpu, ret = 0;
3289
3290 if (cpu_id == RING_BUFFER_ALL_CPUS) {
3291 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003292 ret = ring_buffer_resize(trace_buf->buffer,
3293 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003294 if (ret < 0)
3295 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003296 per_cpu_ptr(trace_buf->data, cpu)->entries =
3297 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003298 }
3299 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003300 ret = ring_buffer_resize(trace_buf->buffer,
3301 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09003302 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003303 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
3304 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09003305 }
3306
3307 return ret;
3308}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003309#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09003310
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003311static int __tracing_resize_ring_buffer(struct trace_array *tr,
3312 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04003313{
3314 int ret;
3315
3316 /*
3317 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04003318 * we use the size that was given, and we can forget about
3319 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04003320 */
3321 ring_buffer_expanded = 1;
3322
Steven Rostedtb382ede62012-10-10 21:44:34 -04003323 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003324 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04003325 return 0;
3326
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003327 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003328 if (ret < 0)
3329 return ret;
3330
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003331#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003332 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
3333 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003334 goto out;
3335
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003336 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003337 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003338 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
3339 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04003340 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04003341 /*
3342 * AARGH! We are left with different
3343 * size max buffer!!!!
3344 * The max buffer is our "snapshot" buffer.
3345 * When a tracer needs a snapshot (one of the
3346 * latency tracers), it swaps the max buffer
3347 * with the saved snap shot. We succeeded to
3348 * update the size of the main buffer, but failed to
3349 * update the size of the max buffer. But when we tried
3350 * to reset the main buffer to the original size, we
3351 * failed there too. This is very unlikely to
3352 * happen, but if it does, warn and kill all
3353 * tracing.
3354 */
Steven Rostedt73c51622009-03-11 13:42:01 -04003355 WARN_ON(1);
3356 tracing_disabled = 1;
3357 }
3358 return ret;
3359 }
3360
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003361 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003362 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003363 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003364 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003365
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003366 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003367#endif /* CONFIG_TRACER_MAX_TRACE */
3368
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003369 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003370 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003371 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003372 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04003373
3374 return ret;
3375}
3376
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003377static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
3378 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003379{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07003380 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003381
3382 mutex_lock(&trace_types_lock);
3383
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003384 if (cpu_id != RING_BUFFER_ALL_CPUS) {
3385 /* make sure, this cpu is enabled in the mask */
3386 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3387 ret = -EINVAL;
3388 goto out;
3389 }
3390 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003391
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003392 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003393 if (ret < 0)
3394 ret = -ENOMEM;
3395
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003396out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07003397 mutex_unlock(&trace_types_lock);
3398
3399 return ret;
3400}
3401
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003402
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003403/**
3404 * tracing_update_buffers - used by tracing facility to expand ring buffers
3405 *
3406 * To save on memory when the tracing is never used on a system with it
3407 * configured in. The ring buffers are set to a minimum size. But once
3408 * a user starts to use the tracing facility, then they need to grow
3409 * to their default size.
3410 *
3411 * This function is to be called when a tracer is about to be used.
3412 */
3413int tracing_update_buffers(void)
3414{
3415 int ret = 0;
3416
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003417 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003418 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003419 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003420 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003421 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04003422
3423 return ret;
3424}
3425
Steven Rostedt577b7852009-02-26 23:43:05 -05003426struct trace_option_dentry;
3427
3428static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003429create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05003430
3431static void
3432destroy_trace_option_files(struct trace_option_dentry *topts);
3433
Steven Rostedtb2821ae2009-02-02 21:38:32 -05003434static int tracing_set_tracer(const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003435{
Steven Rostedt577b7852009-02-26 23:43:05 -05003436 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003437 struct trace_array *tr = &global_trace;
3438 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003439#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003440 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003441#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003442 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003443
Steven Rostedt1027fcb2009-03-12 11:33:20 -04003444 mutex_lock(&trace_types_lock);
3445
Steven Rostedt73c51622009-03-11 13:42:01 -04003446 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003447 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003448 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04003449 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01003450 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04003451 ret = 0;
3452 }
3453
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003454 for (t = trace_types; t; t = t->next) {
3455 if (strcmp(t->name, buf) == 0)
3456 break;
3457 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003458 if (!t) {
3459 ret = -EINVAL;
3460 goto out;
3461 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003462 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003463 goto out;
3464
Steven Rostedt9f029e82008-11-12 15:24:24 -05003465 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003466
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003467 tr->current_trace->enabled = false;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003468
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003469 if (tr->current_trace->reset)
3470 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05003471
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003472 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003473 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05003474
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003475#ifdef CONFIG_TRACER_MAX_TRACE
3476 had_max_tr = tr->allocated_snapshot;
3477
Steven Rostedt34600f02013-01-22 13:35:11 -05003478 if (had_max_tr && !t->use_max_tr) {
3479 /*
3480 * We need to make sure that the update_max_tr sees that
3481 * current_trace changed to nop_trace to keep it from
3482 * swapping the buffers after we resize it.
3483 * The update_max_tr is called from interrupts disabled
3484 * so a synchronized_sched() is sufficient.
3485 */
3486 synchronize_sched();
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003487 /*
3488 * We don't free the ring buffer. instead, resize it because
3489 * The max_tr ring buffer has some state (e.g. ring->clock) and
3490 * we want preserve it.
3491 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003492 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
3493 set_buffer_entries(&tr->max_buffer, 1);
3494 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003495 tr->allocated_snapshot = false;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003496 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003497#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05003498 destroy_trace_option_files(topts);
3499
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003500 topts = create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003501
3502#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05003503 if (t->use_max_tr && !had_max_tr) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08003504 /* we need to make per cpu buffer sizes equivalent */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003505 ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer,
Hiraku Toyookad60da502012-10-17 11:56:16 +09003506 RING_BUFFER_ALL_CPUS);
3507 if (ret < 0)
3508 goto out;
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003509 tr->allocated_snapshot = true;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09003510 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003511#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05003512
Frederic Weisbecker1c800252008-11-16 05:57:26 +01003513 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02003514 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01003515 if (ret)
3516 goto out;
3517 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003518
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003519 tr->current_trace = t;
3520 tr->current_trace->enabled = true;
Steven Rostedt9f029e82008-11-12 15:24:24 -05003521 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003522 out:
3523 mutex_unlock(&trace_types_lock);
3524
Peter Zijlstrad9e54072008-11-01 19:57:37 +01003525 return ret;
3526}
3527
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003528static ssize_t
3529tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3530 size_t cnt, loff_t *ppos)
3531{
Li Zefanee6c2c12009-09-18 14:06:47 +08003532 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003533 int i;
3534 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01003535 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003536
Steven Rostedt60063a62008-10-28 10:44:24 -04003537 ret = cnt;
3538
Li Zefanee6c2c12009-09-18 14:06:47 +08003539 if (cnt > MAX_TRACER_SIZE)
3540 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003541
3542 if (copy_from_user(&buf, ubuf, cnt))
3543 return -EFAULT;
3544
3545 buf[cnt] = 0;
3546
3547 /* strip ending whitespace. */
3548 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3549 buf[i] = 0;
3550
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01003551 err = tracing_set_tracer(buf);
3552 if (err)
3553 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003554
Jiri Olsacf8517c2009-10-23 19:36:16 -04003555 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003556
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02003557 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003558}
3559
3560static ssize_t
3561tracing_max_lat_read(struct file *filp, char __user *ubuf,
3562 size_t cnt, loff_t *ppos)
3563{
3564 unsigned long *ptr = filp->private_data;
3565 char buf[64];
3566 int r;
3567
Steven Rostedtcffae432008-05-12 21:21:00 +02003568 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003569 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02003570 if (r > sizeof(buf))
3571 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003572 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003573}
3574
3575static ssize_t
3576tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3577 size_t cnt, loff_t *ppos)
3578{
Hannes Eder5e398412009-02-10 19:44:34 +01003579 unsigned long *ptr = filp->private_data;
Hannes Eder5e398412009-02-10 19:44:34 +01003580 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02003581 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003582
Peter Huewe22fe9b52011-06-07 21:58:27 +02003583 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3584 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02003585 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003586
3587 *ptr = val * 1000;
3588
3589 return cnt;
3590}
3591
Steven Rostedtb3806b42008-05-12 21:20:46 +02003592static int tracing_open_pipe(struct inode *inode, struct file *filp)
3593{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003594 struct trace_cpu *tc = inode->i_private;
3595 struct trace_array *tr = tc->tr;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003596 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003597 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003598
3599 if (tracing_disabled)
3600 return -ENODEV;
3601
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003602 mutex_lock(&trace_types_lock);
3603
Steven Rostedtb3806b42008-05-12 21:20:46 +02003604 /* create a buffer to store the information to pass to userspace */
3605 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003606 if (!iter) {
3607 ret = -ENOMEM;
3608 goto out;
3609 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02003610
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003611 /*
3612 * We make a copy of the current tracer to avoid concurrent
3613 * changes on it while we are reading.
3614 */
3615 iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3616 if (!iter->trace) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003617 ret = -ENOMEM;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003618 goto fail;
3619 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003620 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003621
3622 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3623 ret = -ENOMEM;
3624 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10303625 }
3626
Steven Rostedta3097202008-11-07 22:36:02 -05003627 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10303628 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05003629
Steven Rostedt112f38a72009-06-01 15:16:05 -04003630 if (trace_flags & TRACE_ITER_LATENCY_FMT)
3631 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3632
David Sharp8be07092012-11-13 12:18:22 -08003633 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3634 if (trace_clocks[trace_clock_id].in_ns)
3635 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3636
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003637 iter->cpu_file = tc->cpu;
3638 iter->tr = tc->tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003639 iter->trace_buffer = &tc->tr->trace_buffer;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003640 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003641 filp->private_data = iter;
3642
Steven Rostedt107bad82008-05-12 21:21:01 +02003643 if (iter->trace->pipe_open)
3644 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02003645
Arnd Bergmannb4447862010-07-07 23:40:11 +02003646 nonseekable_open(inode, filp);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003647out:
3648 mutex_unlock(&trace_types_lock);
3649 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003650
3651fail:
3652 kfree(iter->trace);
3653 kfree(iter);
3654 mutex_unlock(&trace_types_lock);
3655 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003656}
3657
3658static int tracing_release_pipe(struct inode *inode, struct file *file)
3659{
3660 struct trace_iterator *iter = file->private_data;
3661
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003662 mutex_lock(&trace_types_lock);
3663
Steven Rostedt29bf4a52009-12-09 12:37:43 -05003664 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05003665 iter->trace->pipe_close(iter);
3666
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003667 mutex_unlock(&trace_types_lock);
3668
Rusty Russell44623442009-01-01 10:12:23 +10303669 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003670 mutex_destroy(&iter->mutex);
3671 kfree(iter->trace);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003672 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003673
3674 return 0;
3675}
3676
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003677static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05003678trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003679{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05003680 /* Iterators are static, they should be filled or empty */
3681 if (trace_buffer_iter(iter, iter->cpu_file))
3682 return POLLIN | POLLRDNORM;
3683
3684 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003685 /*
3686 * Always select as readable when in blocking mode
3687 */
3688 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05003689 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003690 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05003691 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02003692}
3693
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05003694static unsigned int
3695tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3696{
3697 struct trace_iterator *iter = filp->private_data;
3698
3699 return trace_poll(iter, filp, poll_table);
3700}
3701
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01003702/*
3703 * This is a make-shift waitqueue.
3704 * A tracer might use this callback on some rare cases:
3705 *
3706 * 1) the current tracer might hold the runqueue lock when it wakes up
3707 * a reader, hence a deadlock (sched, function, and function graph tracers)
3708 * 2) the function tracers, trace all functions, we don't want
3709 * the overhead of calling wake_up and friends
3710 * (and tracing them too)
3711 *
3712 * Anyway, this is really very primitive wakeup.
3713 */
3714void poll_wait_pipe(struct trace_iterator *iter)
3715{
3716 set_current_state(TASK_INTERRUPTIBLE);
3717 /* sleep for 100 msecs, and try again. */
3718 schedule_timeout(HZ / 10);
3719}
3720
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003721/* Must be called with trace_types_lock mutex held. */
3722static int tracing_wait_pipe(struct file *filp)
3723{
3724 struct trace_iterator *iter = filp->private_data;
3725
3726 while (trace_empty(iter)) {
3727
3728 if ((filp->f_flags & O_NONBLOCK)) {
3729 return -EAGAIN;
3730 }
3731
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003732 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003733
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01003734 iter->trace->wait_pipe(iter);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003735
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003736 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003737
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01003738 if (signal_pending(current))
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003739 return -EINTR;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003740
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003741 /*
Liu Bo250bfd32013-01-14 10:54:11 +08003742 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003743 * We still block if tracing is disabled, but we have never
3744 * read anything. This allows a user to cat this file, and
3745 * then enable tracing. But after we have read something,
3746 * we give an EOF when tracing is again disabled.
3747 *
3748 * iter->pos will be 0 if we haven't read anything.
3749 */
Liu Bo250bfd32013-01-14 10:54:11 +08003750 if (!tracing_is_enabled() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003751 break;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003752 }
3753
3754 return 1;
3755}
3756
Steven Rostedtb3806b42008-05-12 21:20:46 +02003757/*
3758 * Consumer reader.
3759 */
3760static ssize_t
3761tracing_read_pipe(struct file *filp, char __user *ubuf,
3762 size_t cnt, loff_t *ppos)
3763{
3764 struct trace_iterator *iter = filp->private_data;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003765 struct trace_array *tr = iter->tr;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003766 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003767
3768 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003769 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3770 if (sret != -EBUSY)
3771 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003772
Steven Rostedtf9520752009-03-02 14:04:40 -05003773 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003774
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003775 /* copy the tracer to avoid using a global lock all around */
Steven Rostedt107bad82008-05-12 21:21:01 +02003776 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003777 if (unlikely(iter->trace->name != tr->current_trace->name))
3778 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003779 mutex_unlock(&trace_types_lock);
3780
3781 /*
3782 * Avoid more than one consumer on a single file descriptor
3783 * This is just a matter of traces coherency, the ring buffer itself
3784 * is protected.
3785 */
3786 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02003787 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003788 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3789 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02003790 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02003791 }
3792
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02003793waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003794 sret = tracing_wait_pipe(filp);
3795 if (sret <= 0)
3796 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003797
3798 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003799 if (trace_empty(iter)) {
3800 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02003801 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02003802 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02003803
3804 if (cnt >= PAGE_SIZE)
3805 cnt = PAGE_SIZE - 1;
3806
Steven Rostedt53d0aa72008-05-12 21:21:01 +02003807 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02003808 memset(&iter->seq, 0,
3809 sizeof(struct trace_iterator) -
3810 offsetof(struct trace_iterator, seq));
Steven Rostedt4823ed72008-05-12 21:21:01 +02003811 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003812
Lai Jiangshan4f535962009-05-18 19:35:34 +08003813 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003814 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05003815 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003816 enum print_line_t ret;
Steven Rostedt088b1e422008-05-12 21:20:48 +02003817 int len = iter->seq.len;
3818
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003819 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02003820 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02003821 /* don't print partial lines */
3822 iter->seq.len = len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003823 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02003824 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01003825 if (ret != TRACE_TYPE_NO_CONSUME)
3826 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003827
3828 if (iter->seq.len >= cnt)
3829 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01003830
3831 /*
3832 * Setting the full flag means we reached the trace_seq buffer
3833 * size and we should leave by partial output condition above.
3834 * One of the trace_seq_* functions is not used properly.
3835 */
3836 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3837 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02003838 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003839 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003840 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02003841
Steven Rostedtb3806b42008-05-12 21:20:46 +02003842 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003843 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3844 if (iter->seq.readpos >= iter->seq.len)
Steven Rostedtf9520752009-03-02 14:04:40 -05003845 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02003846
3847 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003848 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02003849 * entries, go back to wait for more entries.
3850 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003851 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02003852 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003853
Steven Rostedt107bad82008-05-12 21:21:01 +02003854out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003855 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02003856
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02003857 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02003858}
3859
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003860static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3861 struct pipe_buffer *buf)
3862{
3863 __free_page(buf->page);
3864}
3865
3866static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3867 unsigned int idx)
3868{
3869 __free_page(spd->pages[idx]);
3870}
3871
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08003872static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05003873 .can_merge = 0,
3874 .map = generic_pipe_buf_map,
3875 .unmap = generic_pipe_buf_unmap,
3876 .confirm = generic_pipe_buf_confirm,
3877 .release = tracing_pipe_buf_release,
3878 .steal = generic_pipe_buf_steal,
3879 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003880};
3881
Steven Rostedt34cd4992009-02-09 12:06:29 -05003882static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01003883tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05003884{
3885 size_t count;
3886 int ret;
3887
3888 /* Seq buffer is page-sized, exactly what we need. */
3889 for (;;) {
3890 count = iter->seq.len;
3891 ret = print_trace_line(iter);
3892 count = iter->seq.len - count;
3893 if (rem < count) {
3894 rem = 0;
3895 iter->seq.len -= count;
3896 break;
3897 }
3898 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3899 iter->seq.len -= count;
3900 break;
3901 }
3902
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08003903 if (ret != TRACE_TYPE_NO_CONSUME)
3904 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05003905 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05003906 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05003907 rem = 0;
3908 iter->ent = NULL;
3909 break;
3910 }
3911 }
3912
3913 return rem;
3914}
3915
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003916static ssize_t tracing_splice_read_pipe(struct file *filp,
3917 loff_t *ppos,
3918 struct pipe_inode_info *pipe,
3919 size_t len,
3920 unsigned int flags)
3921{
Jens Axboe35f3d142010-05-20 10:43:18 +02003922 struct page *pages_def[PIPE_DEF_BUFFERS];
3923 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003924 struct trace_iterator *iter = filp->private_data;
3925 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02003926 .pages = pages_def,
3927 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05003928 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02003929 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05003930 .flags = flags,
3931 .ops = &tracing_pipe_buf_ops,
3932 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003933 };
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003934 struct trace_array *tr = iter->tr;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003935 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05003936 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003937 unsigned int i;
3938
Jens Axboe35f3d142010-05-20 10:43:18 +02003939 if (splice_grow_spd(pipe, &spd))
3940 return -ENOMEM;
3941
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003942 /* copy the tracer to avoid using a global lock all around */
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003943 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003944 if (unlikely(iter->trace->name != tr->current_trace->name))
3945 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003946 mutex_unlock(&trace_types_lock);
3947
3948 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003949
3950 if (iter->trace->splice_read) {
3951 ret = iter->trace->splice_read(iter, filp,
3952 ppos, pipe, len, flags);
3953 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05003954 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003955 }
3956
3957 ret = tracing_wait_pipe(filp);
3958 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05003959 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003960
Jason Wessel955b61e2010-08-05 09:22:23 -05003961 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003962 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05003963 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003964 }
3965
Lai Jiangshan4f535962009-05-18 19:35:34 +08003966 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003967 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003968
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003969 /* Fill as many pages as possible. */
Jens Axboe35f3d142010-05-20 10:43:18 +02003970 for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
3971 spd.pages[i] = alloc_page(GFP_KERNEL);
3972 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05003973 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003974
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01003975 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003976
3977 /* Copy the data into the page, so we can start over. */
3978 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02003979 page_address(spd.pages[i]),
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003980 iter->seq.len);
3981 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02003982 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003983 break;
3984 }
Jens Axboe35f3d142010-05-20 10:43:18 +02003985 spd.partial[i].offset = 0;
3986 spd.partial[i].len = iter->seq.len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003987
Steven Rostedtf9520752009-03-02 14:04:40 -05003988 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003989 }
3990
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08003991 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08003992 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003993 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02003994
3995 spd.nr_pages = i;
3996
Jens Axboe35f3d142010-05-20 10:43:18 +02003997 ret = splice_to_pipe(pipe, &spd);
3998out:
Eric Dumazet047fe362012-06-12 15:24:40 +02003999 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004000 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004001
Steven Rostedt34cd4992009-02-09 12:06:29 -05004002out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004003 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004004 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004005}
4006
Steven Rostedta98a3c32008-05-12 21:20:59 +02004007static ssize_t
4008tracing_entries_read(struct file *filp, char __user *ubuf,
4009 size_t cnt, loff_t *ppos)
4010{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004011 struct trace_cpu *tc = filp->private_data;
4012 struct trace_array *tr = tc->tr;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004013 char buf[64];
4014 int r = 0;
4015 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004016
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004017 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004018
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004019 if (tc->cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004020 int cpu, buf_size_same;
4021 unsigned long size;
4022
4023 size = 0;
4024 buf_size_same = 1;
4025 /* check if all cpu sizes are same */
4026 for_each_tracing_cpu(cpu) {
4027 /* fill in the size from first enabled cpu */
4028 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004029 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4030 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004031 buf_size_same = 0;
4032 break;
4033 }
4034 }
4035
4036 if (buf_size_same) {
4037 if (!ring_buffer_expanded)
4038 r = sprintf(buf, "%lu (expanded: %lu)\n",
4039 size >> 10,
4040 trace_buf_size >> 10);
4041 else
4042 r = sprintf(buf, "%lu\n", size >> 10);
4043 } else
4044 r = sprintf(buf, "X\n");
4045 } else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004046 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004047
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004048 mutex_unlock(&trace_types_lock);
4049
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004050 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4051 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004052}
4053
4054static ssize_t
4055tracing_entries_write(struct file *filp, const char __user *ubuf,
4056 size_t cnt, loff_t *ppos)
4057{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004058 struct trace_cpu *tc = filp->private_data;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004059 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004060 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004061
Peter Huewe22fe9b52011-06-07 21:58:27 +02004062 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4063 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004064 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004065
4066 /* must have at least 1 entry */
4067 if (!val)
4068 return -EINVAL;
4069
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004070 /* value is in KB */
4071 val <<= 10;
4072
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004073 ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004074 if (ret < 0)
4075 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004076
Jiri Olsacf8517c2009-10-23 19:36:16 -04004077 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004078
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004079 return cnt;
4080}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004081
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004082static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004083tracing_total_entries_read(struct file *filp, char __user *ubuf,
4084 size_t cnt, loff_t *ppos)
4085{
4086 struct trace_array *tr = filp->private_data;
4087 char buf[64];
4088 int r, cpu;
4089 unsigned long size = 0, expanded_size = 0;
4090
4091 mutex_lock(&trace_types_lock);
4092 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004093 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004094 if (!ring_buffer_expanded)
4095 expanded_size += trace_buf_size >> 10;
4096 }
4097 if (ring_buffer_expanded)
4098 r = sprintf(buf, "%lu\n", size);
4099 else
4100 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
4101 mutex_unlock(&trace_types_lock);
4102
4103 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4104}
4105
4106static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004107tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
4108 size_t cnt, loff_t *ppos)
4109{
4110 /*
4111 * There is no need to read what the user has written, this function
4112 * is just to make sure that there is no error when "echo" is used
4113 */
4114
4115 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004116
4117 return cnt;
4118}
4119
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004120static int
4121tracing_free_buffer_release(struct inode *inode, struct file *filp)
4122{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004123 struct trace_array *tr = inode->i_private;
4124
Steven Rostedtcf30cf62011-06-14 22:44:07 -04004125 /* disable tracing ? */
4126 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
4127 tracing_off();
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004128 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004129 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004130
4131 return 0;
4132}
4133
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004134static ssize_t
4135tracing_mark_write(struct file *filp, const char __user *ubuf,
4136 size_t cnt, loff_t *fpos)
4137{
Steven Rostedtd696b582011-09-22 11:50:27 -04004138 unsigned long addr = (unsigned long)ubuf;
4139 struct ring_buffer_event *event;
4140 struct ring_buffer *buffer;
4141 struct print_entry *entry;
4142 unsigned long irq_flags;
4143 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004144 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04004145 int nr_pages = 1;
4146 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04004147 int offset;
4148 int size;
4149 int len;
4150 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004151 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004152
Steven Rostedtc76f0692008-11-07 22:36:02 -05004153 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004154 return -EINVAL;
4155
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07004156 if (!(trace_flags & TRACE_ITER_MARKERS))
4157 return -EINVAL;
4158
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004159 if (cnt > TRACE_BUF_SIZE)
4160 cnt = TRACE_BUF_SIZE;
4161
Steven Rostedtd696b582011-09-22 11:50:27 -04004162 /*
4163 * Userspace is injecting traces into the kernel trace buffer.
4164 * We want to be as non intrusive as possible.
4165 * To do so, we do not want to allocate any special buffers
4166 * or take any locks, but instead write the userspace data
4167 * straight into the ring buffer.
4168 *
4169 * First we need to pin the userspace buffer into memory,
4170 * which, most likely it is, because it just referenced it.
4171 * But there's no guarantee that it is. By using get_user_pages_fast()
4172 * and kmap_atomic/kunmap_atomic() we can get access to the
4173 * pages directly. We then write the data directly into the
4174 * ring buffer.
4175 */
4176 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004177
Steven Rostedtd696b582011-09-22 11:50:27 -04004178 /* check if we cross pages */
4179 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
4180 nr_pages = 2;
4181
4182 offset = addr & (PAGE_SIZE - 1);
4183 addr &= PAGE_MASK;
4184
4185 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
4186 if (ret < nr_pages) {
4187 while (--ret >= 0)
4188 put_page(pages[ret]);
4189 written = -EFAULT;
4190 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004191 }
4192
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004193 for (i = 0; i < nr_pages; i++)
4194 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04004195
4196 local_save_flags(irq_flags);
4197 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004198 buffer = global_trace.trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04004199 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
4200 irq_flags, preempt_count());
4201 if (!event) {
4202 /* Ring buffer disabled, return as if not open for write */
4203 written = -EBADF;
4204 goto out_unlock;
4205 }
4206
4207 entry = ring_buffer_event_data(event);
4208 entry->ip = _THIS_IP_;
4209
4210 if (nr_pages == 2) {
4211 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004212 memcpy(&entry->buf, map_page[0] + offset, len);
4213 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04004214 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004215 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04004216
4217 if (entry->buf[cnt - 1] != '\n') {
4218 entry->buf[cnt] = '\n';
4219 entry->buf[cnt + 1] = '\0';
4220 } else
4221 entry->buf[cnt] = '\0';
4222
Steven Rostedt7ffbd482012-10-11 12:14:25 -04004223 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04004224
4225 written = cnt;
4226
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004227 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004228
Steven Rostedtd696b582011-09-22 11:50:27 -04004229 out_unlock:
Steven Rostedt6edb2a82012-05-11 23:28:49 -04004230 for (i = 0; i < nr_pages; i++){
4231 kunmap_atomic(map_page[i]);
4232 put_page(pages[i]);
4233 }
Steven Rostedtd696b582011-09-22 11:50:27 -04004234 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02004235 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004236}
4237
Li Zefan13f16d22009-12-08 11:16:11 +08004238static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08004239{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004240 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004241 int i;
4242
4243 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08004244 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08004245 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004246 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
4247 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08004248 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08004249
Li Zefan13f16d22009-12-08 11:16:11 +08004250 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08004251}
4252
4253static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
4254 size_t cnt, loff_t *fpos)
4255{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004256 struct seq_file *m = filp->private_data;
4257 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08004258 char buf[64];
4259 const char *clockstr;
4260 int i;
4261
4262 if (cnt >= sizeof(buf))
4263 return -EINVAL;
4264
4265 if (copy_from_user(&buf, ubuf, cnt))
4266 return -EFAULT;
4267
4268 buf[cnt] = 0;
4269
4270 clockstr = strstrip(buf);
4271
4272 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4273 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4274 break;
4275 }
4276 if (i == ARRAY_SIZE(trace_clocks))
4277 return -EINVAL;
4278
Zhaolei5079f322009-08-25 16:12:56 +08004279 mutex_lock(&trace_types_lock);
4280
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004281 tr->clock_id = i;
4282
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004283 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08004284
David Sharp60303ed2012-10-11 16:27:52 -07004285 /*
4286 * New clock may not be consistent with the previous clock.
4287 * Reset the buffer so that it doesn't have incomparable timestamps.
4288 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004289 tracing_reset_online_cpus(&global_trace.trace_buffer);
4290
4291#ifdef CONFIG_TRACER_MAX_TRACE
4292 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
4293 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
4294 tracing_reset_online_cpus(&global_trace.max_buffer);
4295#endif
David Sharp60303ed2012-10-11 16:27:52 -07004296
Zhaolei5079f322009-08-25 16:12:56 +08004297 mutex_unlock(&trace_types_lock);
4298
4299 *fpos += cnt;
4300
4301 return cnt;
4302}
4303
Li Zefan13f16d22009-12-08 11:16:11 +08004304static int tracing_clock_open(struct inode *inode, struct file *file)
4305{
4306 if (tracing_disabled)
4307 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004308
4309 return single_open(file, tracing_clock_show, inode->i_private);
Li Zefan13f16d22009-12-08 11:16:11 +08004310}
4311
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004312struct ftrace_buffer_info {
4313 struct trace_iterator iter;
4314 void *spare;
4315 unsigned int read;
4316};
4317
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004318#ifdef CONFIG_TRACER_SNAPSHOT
4319static int tracing_snapshot_open(struct inode *inode, struct file *file)
4320{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004321 struct trace_cpu *tc = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004322 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004323 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004324 int ret = 0;
4325
4326 if (file->f_mode & FMODE_READ) {
4327 iter = __tracing_open(inode, file, true);
4328 if (IS_ERR(iter))
4329 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004330 } else {
4331 /* Writes still need the seq_file to hold the private data */
4332 m = kzalloc(sizeof(*m), GFP_KERNEL);
4333 if (!m)
4334 return -ENOMEM;
4335 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
4336 if (!iter) {
4337 kfree(m);
4338 return -ENOMEM;
4339 }
4340 iter->tr = tc->tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004341 iter->trace_buffer = &tc->tr->max_buffer;
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004342 iter->cpu_file = tc->cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004343 m->private = iter;
4344 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004345 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004346
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004347 return ret;
4348}
4349
4350static ssize_t
4351tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
4352 loff_t *ppos)
4353{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004354 struct seq_file *m = filp->private_data;
4355 struct trace_iterator *iter = m->private;
4356 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004357 unsigned long val;
4358 int ret;
4359
4360 ret = tracing_update_buffers();
4361 if (ret < 0)
4362 return ret;
4363
4364 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4365 if (ret)
4366 return ret;
4367
4368 mutex_lock(&trace_types_lock);
4369
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004370 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004371 ret = -EBUSY;
4372 goto out;
4373 }
4374
4375 switch (val) {
4376 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004377 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4378 ret = -EINVAL;
4379 break;
4380 }
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004381 if (tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004382 /* free spare buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004383 ring_buffer_resize(tr->max_buffer.buffer, 1,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004384 RING_BUFFER_ALL_CPUS);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004385 set_buffer_entries(&tr->max_buffer, 1);
4386 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004387 tr->allocated_snapshot = false;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004388 }
4389 break;
4390 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004391/* Only allow per-cpu swap if the ring buffer supports it */
4392#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
4393 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
4394 ret = -EINVAL;
4395 break;
4396 }
4397#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004398 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004399 /* allocate spare buffer */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004400 ret = resize_buffer_duplicate_size(&tr->max_buffer,
4401 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004402 if (ret < 0)
4403 break;
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004404 tr->allocated_snapshot = true;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004405 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004406 local_irq_disable();
4407 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004408 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004409 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004410 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05004411 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004412 local_irq_enable();
4413 break;
4414 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004415 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05004416 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
4417 tracing_reset_online_cpus(&tr->max_buffer);
4418 else
4419 tracing_reset(&tr->max_buffer, iter->cpu_file);
4420 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004421 break;
4422 }
4423
4424 if (ret >= 0) {
4425 *ppos += cnt;
4426 ret = cnt;
4427 }
4428out:
4429 mutex_unlock(&trace_types_lock);
4430 return ret;
4431}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004432
4433static int tracing_snapshot_release(struct inode *inode, struct file *file)
4434{
4435 struct seq_file *m = file->private_data;
4436
4437 if (file->f_mode & FMODE_READ)
4438 return tracing_release(inode, file);
4439
4440 /* If write only, the seq_file is just a stub */
4441 if (m)
4442 kfree(m->private);
4443 kfree(m);
4444
4445 return 0;
4446}
4447
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004448static int tracing_buffers_open(struct inode *inode, struct file *filp);
4449static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
4450 size_t count, loff_t *ppos);
4451static int tracing_buffers_release(struct inode *inode, struct file *file);
4452static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4453 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
4454
4455static int snapshot_raw_open(struct inode *inode, struct file *filp)
4456{
4457 struct ftrace_buffer_info *info;
4458 int ret;
4459
4460 ret = tracing_buffers_open(inode, filp);
4461 if (ret < 0)
4462 return ret;
4463
4464 info = filp->private_data;
4465
4466 if (info->iter.trace->use_max_tr) {
4467 tracing_buffers_release(inode, filp);
4468 return -EBUSY;
4469 }
4470
4471 info->iter.snapshot = true;
4472 info->iter.trace_buffer = &info->iter.tr->max_buffer;
4473
4474 return ret;
4475}
4476
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004477#endif /* CONFIG_TRACER_SNAPSHOT */
4478
4479
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004480static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004481 .open = tracing_open_generic,
4482 .read = tracing_max_lat_read,
4483 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004484 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004485};
4486
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004487static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004488 .open = tracing_open_generic,
4489 .read = tracing_set_trace_read,
4490 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004491 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004492};
4493
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004494static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004495 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004496 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004497 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004498 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004499 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004500 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02004501};
4502
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004503static const struct file_operations tracing_entries_fops = {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004504 .open = tracing_open_generic,
Steven Rostedta98a3c32008-05-12 21:20:59 +02004505 .read = tracing_entries_read,
4506 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004507 .llseek = generic_file_llseek,
Steven Rostedta98a3c32008-05-12 21:20:59 +02004508};
4509
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004510static const struct file_operations tracing_total_entries_fops = {
4511 .open = tracing_open_generic,
4512 .read = tracing_total_entries_read,
4513 .llseek = generic_file_llseek,
4514};
4515
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004516static const struct file_operations tracing_free_buffer_fops = {
4517 .write = tracing_free_buffer_write,
4518 .release = tracing_free_buffer_release,
4519};
4520
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004521static const struct file_operations tracing_mark_fops = {
Frédéric Weisbecker43a15382008-09-21 20:16:30 +02004522 .open = tracing_open_generic,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004523 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004524 .llseek = generic_file_llseek,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03004525};
4526
Zhaolei5079f322009-08-25 16:12:56 +08004527static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08004528 .open = tracing_clock_open,
4529 .read = seq_read,
4530 .llseek = seq_lseek,
4531 .release = single_release,
Zhaolei5079f322009-08-25 16:12:56 +08004532 .write = tracing_clock_write,
4533};
4534
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004535#ifdef CONFIG_TRACER_SNAPSHOT
4536static const struct file_operations snapshot_fops = {
4537 .open = tracing_snapshot_open,
4538 .read = seq_read,
4539 .write = tracing_snapshot_write,
4540 .llseek = tracing_seek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004541 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004542};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09004543
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004544static const struct file_operations snapshot_raw_fops = {
4545 .open = snapshot_raw_open,
4546 .read = tracing_buffers_read,
4547 .release = tracing_buffers_release,
4548 .splice_read = tracing_buffers_splice_read,
4549 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05004550};
4551
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004552#endif /* CONFIG_TRACER_SNAPSHOT */
4553
Steven Rostedt2cadf912008-12-01 22:20:19 -05004554static int tracing_buffers_open(struct inode *inode, struct file *filp)
4555{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004556 struct trace_cpu *tc = inode->i_private;
4557 struct trace_array *tr = tc->tr;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004558 struct ftrace_buffer_info *info;
4559
4560 if (tracing_disabled)
4561 return -ENODEV;
4562
4563 info = kzalloc(sizeof(*info), GFP_KERNEL);
4564 if (!info)
4565 return -ENOMEM;
4566
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004567 mutex_lock(&trace_types_lock);
4568
4569 tr->ref++;
4570
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004571 info->iter.tr = tr;
4572 info->iter.cpu_file = tc->cpu;
Steven Rostedtb6273442013-02-28 13:44:11 -05004573 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004574 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004575 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004576 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004577 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004578
4579 filp->private_data = info;
4580
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004581 mutex_unlock(&trace_types_lock);
4582
Lai Jiangshand1e7e022009-04-02 15:16:56 +08004583 return nonseekable_open(inode, filp);
Steven Rostedt2cadf912008-12-01 22:20:19 -05004584}
4585
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004586static unsigned int
4587tracing_buffers_poll(struct file *filp, poll_table *poll_table)
4588{
4589 struct ftrace_buffer_info *info = filp->private_data;
4590 struct trace_iterator *iter = &info->iter;
4591
4592 return trace_poll(iter, filp, poll_table);
4593}
4594
Steven Rostedt2cadf912008-12-01 22:20:19 -05004595static ssize_t
4596tracing_buffers_read(struct file *filp, char __user *ubuf,
4597 size_t count, loff_t *ppos)
4598{
4599 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004600 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004601 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004602 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004603
Steven Rostedt2dc5d122009-03-04 19:10:05 -05004604 if (!count)
4605 return 0;
4606
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004607 mutex_lock(&trace_types_lock);
4608
4609#ifdef CONFIG_TRACER_MAX_TRACE
4610 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4611 size = -EBUSY;
4612 goto out_unlock;
4613 }
4614#endif
4615
Lai Jiangshanddd538f2009-04-02 15:16:59 +08004616 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004617 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
4618 iter->cpu_file);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004619 size = -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08004620 if (!info->spare)
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004621 goto out_unlock;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08004622
Steven Rostedt2cadf912008-12-01 22:20:19 -05004623 /* Do we have previous read data to read? */
4624 if (info->read < PAGE_SIZE)
4625 goto read;
4626
Steven Rostedtb6273442013-02-28 13:44:11 -05004627 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004628 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004629 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05004630 &info->spare,
4631 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004632 iter->cpu_file, 0);
4633 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05004634
4635 if (ret < 0) {
4636 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004637 if ((filp->f_flags & O_NONBLOCK)) {
4638 size = -EAGAIN;
4639 goto out_unlock;
4640 }
4641 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05004642 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004643 mutex_lock(&trace_types_lock);
4644 if (signal_pending(current)) {
4645 size = -EINTR;
4646 goto out_unlock;
4647 }
Steven Rostedtb6273442013-02-28 13:44:11 -05004648 goto again;
4649 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004650 size = 0;
4651 goto out_unlock;
Steven Rostedtb6273442013-02-28 13:44:11 -05004652 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05004653
Steven Rostedt436fc282011-10-14 10:44:25 -04004654 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05004655 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05004656 size = PAGE_SIZE - info->read;
4657 if (size > count)
4658 size = count;
4659
4660 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004661 if (ret == size) {
4662 size = -EFAULT;
4663 goto out_unlock;
4664 }
Steven Rostedt2dc5d122009-03-04 19:10:05 -05004665 size -= ret;
4666
Steven Rostedt2cadf912008-12-01 22:20:19 -05004667 *ppos += size;
4668 info->read += size;
4669
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004670 out_unlock:
4671 mutex_unlock(&trace_types_lock);
4672
Steven Rostedt2cadf912008-12-01 22:20:19 -05004673 return size;
4674}
4675
4676static int tracing_buffers_release(struct inode *inode, struct file *file)
4677{
4678 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004679 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004680
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004681 mutex_lock(&trace_types_lock);
4682
4683 WARN_ON(!iter->tr->ref);
4684 iter->tr->ref--;
4685
Lai Jiangshanddd538f2009-04-02 15:16:59 +08004686 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004687 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05004688 kfree(info);
4689
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05004690 mutex_unlock(&trace_types_lock);
4691
Steven Rostedt2cadf912008-12-01 22:20:19 -05004692 return 0;
4693}
4694
4695struct buffer_ref {
4696 struct ring_buffer *buffer;
4697 void *page;
4698 int ref;
4699};
4700
4701static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
4702 struct pipe_buffer *buf)
4703{
4704 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4705
4706 if (--ref->ref)
4707 return;
4708
4709 ring_buffer_free_read_page(ref->buffer, ref->page);
4710 kfree(ref);
4711 buf->private = 0;
4712}
4713
Steven Rostedt2cadf912008-12-01 22:20:19 -05004714static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
4715 struct pipe_buffer *buf)
4716{
4717 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4718
4719 ref->ref++;
4720}
4721
4722/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004723static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05004724 .can_merge = 0,
4725 .map = generic_pipe_buf_map,
4726 .unmap = generic_pipe_buf_unmap,
4727 .confirm = generic_pipe_buf_confirm,
4728 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09004729 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05004730 .get = buffer_pipe_buf_get,
4731};
4732
4733/*
4734 * Callback from splice_to_pipe(), if we need to release some pages
4735 * at the end of the spd in case we error'ed out in filling the pipe.
4736 */
4737static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4738{
4739 struct buffer_ref *ref =
4740 (struct buffer_ref *)spd->partial[i].private;
4741
4742 if (--ref->ref)
4743 return;
4744
4745 ring_buffer_free_read_page(ref->buffer, ref->page);
4746 kfree(ref);
4747 spd->partial[i].private = 0;
4748}
4749
4750static ssize_t
4751tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4752 struct pipe_inode_info *pipe, size_t len,
4753 unsigned int flags)
4754{
4755 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004756 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02004757 struct partial_page partial_def[PIPE_DEF_BUFFERS];
4758 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05004759 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004760 .pages = pages_def,
4761 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02004762 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05004763 .flags = flags,
4764 .ops = &buffer_pipe_buf_ops,
4765 .spd_release = buffer_spd_release,
4766 };
4767 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04004768 int entries, size, i;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004769 ssize_t ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004770
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004771 mutex_lock(&trace_types_lock);
4772
4773#ifdef CONFIG_TRACER_MAX_TRACE
4774 if (iter->snapshot && iter->tr->current_trace->use_max_tr) {
4775 ret = -EBUSY;
4776 goto out;
4777 }
4778#endif
4779
4780 if (splice_grow_spd(pipe, &spd)) {
4781 ret = -ENOMEM;
4782 goto out;
4783 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004784
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08004785 if (*ppos & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004786 ret = -EINVAL;
4787 goto out;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08004788 }
4789
4790 if (len & (PAGE_SIZE - 1)) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004791 if (len < PAGE_SIZE) {
4792 ret = -EINVAL;
4793 goto out;
4794 }
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08004795 len &= PAGE_MASK;
4796 }
4797
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004798 again:
4799 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004800 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04004801
Jens Axboe35f3d142010-05-20 10:43:18 +02004802 for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05004803 struct page *page;
4804 int r;
4805
4806 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
4807 if (!ref)
4808 break;
4809
Steven Rostedt7267fa62009-04-29 00:16:21 -04004810 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004811 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004812 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05004813 if (!ref->page) {
4814 kfree(ref);
4815 break;
4816 }
4817
4818 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004819 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05004820 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07004821 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05004822 kfree(ref);
4823 break;
4824 }
4825
4826 /*
4827 * zero out any left over data, this is going to
4828 * user land.
4829 */
4830 size = ring_buffer_page_len(ref->page);
4831 if (size < PAGE_SIZE)
4832 memset(ref->page + size, 0, PAGE_SIZE - size);
4833
4834 page = virt_to_page(ref->page);
4835
4836 spd.pages[i] = page;
4837 spd.partial[i].len = PAGE_SIZE;
4838 spd.partial[i].offset = 0;
4839 spd.partial[i].private = (unsigned long)ref;
4840 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08004841 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04004842
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004843 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05004844 }
4845
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004846 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05004847 spd.nr_pages = i;
4848
4849 /* did we read anything? */
4850 if (!spd.nr_pages) {
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004851 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05004852 ret = -EAGAIN;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004853 goto out;
4854 }
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004855 mutex_unlock(&trace_types_lock);
Steven Rostedtb6273442013-02-28 13:44:11 -05004856 iter->trace->wait_pipe(iter);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004857 mutex_lock(&trace_types_lock);
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004858 if (signal_pending(current)) {
4859 ret = -EINTR;
4860 goto out;
4861 }
4862 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05004863 }
4864
4865 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02004866 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004867out:
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05004868 mutex_unlock(&trace_types_lock);
4869
Steven Rostedt2cadf912008-12-01 22:20:19 -05004870 return ret;
4871}
4872
4873static const struct file_operations tracing_buffers_fops = {
4874 .open = tracing_buffers_open,
4875 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004876 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05004877 .release = tracing_buffers_release,
4878 .splice_read = tracing_buffers_splice_read,
4879 .llseek = no_llseek,
4880};
4881
Steven Rostedtc8d77182009-04-29 18:03:45 -04004882static ssize_t
4883tracing_stats_read(struct file *filp, char __user *ubuf,
4884 size_t count, loff_t *ppos)
4885{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004886 struct trace_cpu *tc = filp->private_data;
4887 struct trace_array *tr = tc->tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004888 struct trace_buffer *trace_buf = &tr->trace_buffer;
Steven Rostedtc8d77182009-04-29 18:03:45 -04004889 struct trace_seq *s;
4890 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07004891 unsigned long long t;
4892 unsigned long usec_rem;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004893 int cpu = tc->cpu;
Steven Rostedtc8d77182009-04-29 18:03:45 -04004894
Li Zefane4f2d102009-06-15 10:57:28 +08004895 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04004896 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01004897 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04004898
4899 trace_seq_init(s);
4900
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004901 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04004902 trace_seq_printf(s, "entries: %ld\n", cnt);
4903
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004904 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04004905 trace_seq_printf(s, "overrun: %ld\n", cnt);
4906
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004907 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04004908 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
4909
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004910 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07004911 trace_seq_printf(s, "bytes: %ld\n", cnt);
4912
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08004913 if (trace_clocks[trace_clock_id].in_ns) {
4914 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004915 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08004916 usec_rem = do_div(t, USEC_PER_SEC);
4917 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
4918 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07004919
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004920 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08004921 usec_rem = do_div(t, USEC_PER_SEC);
4922 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4923 } else {
4924 /* counter or tsc mode for trace_clock */
4925 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004926 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08004927
4928 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004929 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08004930 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07004931
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004932 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07004933 trace_seq_printf(s, "dropped events: %ld\n", cnt);
4934
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004935 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05004936 trace_seq_printf(s, "read events: %ld\n", cnt);
4937
Steven Rostedtc8d77182009-04-29 18:03:45 -04004938 count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4939
4940 kfree(s);
4941
4942 return count;
4943}
4944
4945static const struct file_operations tracing_stats_fops = {
4946 .open = tracing_open_generic,
4947 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004948 .llseek = generic_file_llseek,
Steven Rostedtc8d77182009-04-29 18:03:45 -04004949};
4950
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004951#ifdef CONFIG_DYNAMIC_FTRACE
4952
Steven Rostedtb807c3d2008-10-30 16:08:33 -04004953int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004954{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04004955 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004956}
4957
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004958static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04004959tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004960 size_t cnt, loff_t *ppos)
4961{
Steven Rostedta26a2a22008-10-31 00:03:22 -04004962 static char ftrace_dyn_info_buffer[1024];
4963 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004964 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04004965 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04004966 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004967 int r;
4968
Steven Rostedtb807c3d2008-10-30 16:08:33 -04004969 mutex_lock(&dyn_info_mutex);
4970 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004971
Steven Rostedta26a2a22008-10-31 00:03:22 -04004972 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04004973 buf[r++] = '\n';
4974
4975 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4976
4977 mutex_unlock(&dyn_info_mutex);
4978
4979 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004980}
4981
Steven Rostedt5e2336a2009-03-05 21:44:55 -05004982static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004983 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04004984 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02004985 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004986};
4987#endif
4988
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004989struct dentry *tracing_init_dentry_tr(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004990{
4991 static int once;
4992
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004993 if (tr->dir)
4994 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004995
Frederic Weisbecker3e1f60b2009-03-22 23:10:45 +01004996 if (!debugfs_initialized())
4997 return NULL;
4998
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004999 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
5000 tr->dir = debugfs_create_dir("tracing", NULL);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005001
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005002 if (!tr->dir && !once) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005003 once = 1;
5004 pr_warning("Could not create debugfs directory 'tracing'\n");
5005 return NULL;
5006 }
5007
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005008 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005009}
5010
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005011struct dentry *tracing_init_dentry(void)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005012{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005013 return tracing_init_dentry_tr(&global_trace);
5014}
5015
5016static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
5017{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005018 struct dentry *d_tracer;
5019
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005020 if (tr->percpu_dir)
5021 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005022
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005023 d_tracer = tracing_init_dentry_tr(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005024 if (!d_tracer)
5025 return NULL;
5026
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005027 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005028
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005029 WARN_ONCE(!tr->percpu_dir,
5030 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005031
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005032 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005033}
5034
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005035static void
5036tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005037{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005038 struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005039 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005040 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04005041 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005042
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09005043 if (!d_percpu)
5044 return;
5045
Steven Rostedtdd49a382010-10-20 21:51:26 -04005046 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005047 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
5048 if (!d_cpu) {
5049 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
5050 return;
5051 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005052
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01005053 /* per cpu trace_pipe */
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005054 trace_create_file("trace_pipe", 0444, d_cpu,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005055 (void *)&data->trace_cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005056
5057 /* per cpu trace */
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005058 trace_create_file("trace", 0644, d_cpu,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005059 (void *)&data->trace_cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04005060
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005061 trace_create_file("trace_pipe_raw", 0444, d_cpu,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005062 (void *)&data->trace_cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005063
5064 trace_create_file("stats", 0444, d_cpu,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005065 (void *)&data->trace_cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005066
5067 trace_create_file("buffer_size_kb", 0444, d_cpu,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005068 (void *)&data->trace_cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005069
5070#ifdef CONFIG_TRACER_SNAPSHOT
5071 trace_create_file("snapshot", 0644, d_cpu,
5072 (void *)&data->trace_cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005073
5074 trace_create_file("snapshot_raw", 0444, d_cpu,
5075 (void *)&data->trace_cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005076#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005077}
5078
Steven Rostedt60a11772008-05-12 21:20:44 +02005079#ifdef CONFIG_FTRACE_SELFTEST
5080/* Let selftest have access to static functions in this file */
5081#include "trace_selftest.c"
5082#endif
5083
Steven Rostedt577b7852009-02-26 23:43:05 -05005084struct trace_option_dentry {
5085 struct tracer_opt *opt;
5086 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005087 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005088 struct dentry *entry;
5089};
5090
5091static ssize_t
5092trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
5093 loff_t *ppos)
5094{
5095 struct trace_option_dentry *topt = filp->private_data;
5096 char *buf;
5097
5098 if (topt->flags->val & topt->opt->bit)
5099 buf = "1\n";
5100 else
5101 buf = "0\n";
5102
5103 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5104}
5105
5106static ssize_t
5107trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
5108 loff_t *ppos)
5109{
5110 struct trace_option_dentry *topt = filp->private_data;
5111 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05005112 int ret;
5113
Peter Huewe22fe9b52011-06-07 21:58:27 +02005114 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5115 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05005116 return ret;
5117
Li Zefan8d18eaa2009-12-08 11:17:06 +08005118 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05005119 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08005120
5121 if (!!(topt->flags->val & topt->opt->bit) != val) {
5122 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005123 ret = __set_tracer_option(topt->tr->current_trace, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05005124 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08005125 mutex_unlock(&trace_types_lock);
5126 if (ret)
5127 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05005128 }
5129
5130 *ppos += cnt;
5131
5132 return cnt;
5133}
5134
5135
5136static const struct file_operations trace_options_fops = {
5137 .open = tracing_open_generic,
5138 .read = trace_options_read,
5139 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005140 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05005141};
5142
Steven Rostedta8259072009-02-26 22:19:12 -05005143static ssize_t
5144trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
5145 loff_t *ppos)
5146{
5147 long index = (long)filp->private_data;
5148 char *buf;
5149
5150 if (trace_flags & (1 << index))
5151 buf = "1\n";
5152 else
5153 buf = "0\n";
5154
5155 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
5156}
5157
5158static ssize_t
5159trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
5160 loff_t *ppos)
5161{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005162 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05005163 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05005164 unsigned long val;
5165 int ret;
5166
Peter Huewe22fe9b52011-06-07 21:58:27 +02005167 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5168 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05005169 return ret;
5170
Zhaoleif2d84b62009-08-07 18:55:48 +08005171 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05005172 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005173
5174 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005175 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04005176 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05005177
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04005178 if (ret < 0)
5179 return ret;
5180
Steven Rostedta8259072009-02-26 22:19:12 -05005181 *ppos += cnt;
5182
5183 return cnt;
5184}
5185
Steven Rostedta8259072009-02-26 22:19:12 -05005186static const struct file_operations trace_options_core_fops = {
5187 .open = tracing_open_generic,
5188 .read = trace_options_core_read,
5189 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005190 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05005191};
5192
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005193struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04005194 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005195 struct dentry *parent,
5196 void *data,
5197 const struct file_operations *fops)
5198{
5199 struct dentry *ret;
5200
5201 ret = debugfs_create_file(name, mode, parent, data, fops);
5202 if (!ret)
5203 pr_warning("Could not create debugfs '%s' entry\n", name);
5204
5205 return ret;
5206}
5207
5208
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005209static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005210{
5211 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05005212
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005213 if (tr->options)
5214 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005215
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005216 d_tracer = tracing_init_dentry_tr(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005217 if (!d_tracer)
5218 return NULL;
5219
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005220 tr->options = debugfs_create_dir("options", d_tracer);
5221 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05005222 pr_warning("Could not create debugfs directory 'options'\n");
5223 return NULL;
5224 }
5225
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005226 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05005227}
5228
Steven Rostedt577b7852009-02-26 23:43:05 -05005229static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005230create_trace_option_file(struct trace_array *tr,
5231 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005232 struct tracer_flags *flags,
5233 struct tracer_opt *opt)
5234{
5235 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05005236
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005237 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05005238 if (!t_options)
5239 return;
5240
5241 topt->flags = flags;
5242 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005243 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05005244
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005245 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05005246 &trace_options_fops);
5247
Steven Rostedt577b7852009-02-26 23:43:05 -05005248}
5249
5250static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005251create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05005252{
5253 struct trace_option_dentry *topts;
5254 struct tracer_flags *flags;
5255 struct tracer_opt *opts;
5256 int cnt;
5257
5258 if (!tracer)
5259 return NULL;
5260
5261 flags = tracer->flags;
5262
5263 if (!flags || !flags->opts)
5264 return NULL;
5265
5266 opts = flags->opts;
5267
5268 for (cnt = 0; opts[cnt].name; cnt++)
5269 ;
5270
Steven Rostedt0cfe8242009-02-27 10:51:10 -05005271 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05005272 if (!topts)
5273 return NULL;
5274
5275 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005276 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05005277 &opts[cnt]);
5278
5279 return topts;
5280}
5281
5282static void
5283destroy_trace_option_files(struct trace_option_dentry *topts)
5284{
5285 int cnt;
5286
5287 if (!topts)
5288 return;
5289
5290 for (cnt = 0; topts[cnt].opt; cnt++) {
5291 if (topts[cnt].entry)
5292 debugfs_remove(topts[cnt].entry);
5293 }
5294
5295 kfree(topts);
5296}
5297
Steven Rostedta8259072009-02-26 22:19:12 -05005298static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005299create_trace_option_core_file(struct trace_array *tr,
5300 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05005301{
5302 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005303
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005304 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005305 if (!t_options)
5306 return NULL;
5307
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005308 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05005309 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05005310}
5311
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005312static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05005313{
5314 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05005315 int i;
5316
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005317 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05005318 if (!t_options)
5319 return;
5320
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005321 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005322 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05005323}
5324
Steven Rostedt499e5472012-02-22 15:50:28 -05005325static ssize_t
5326rb_simple_read(struct file *filp, char __user *ubuf,
5327 size_t cnt, loff_t *ppos)
5328{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005329 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005330 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05005331 char buf[64];
5332 int r;
5333
5334 if (buffer)
5335 r = ring_buffer_record_is_on(buffer);
5336 else
5337 r = 0;
5338
5339 r = sprintf(buf, "%d\n", r);
5340
5341 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5342}
5343
5344static ssize_t
5345rb_simple_write(struct file *filp, const char __user *ubuf,
5346 size_t cnt, loff_t *ppos)
5347{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04005348 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005349 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05005350 unsigned long val;
5351 int ret;
5352
5353 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5354 if (ret)
5355 return ret;
5356
5357 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005358 mutex_lock(&trace_types_lock);
5359 if (val) {
Steven Rostedt499e5472012-02-22 15:50:28 -05005360 ring_buffer_record_on(buffer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005361 if (tr->current_trace->start)
5362 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005363 } else {
Steven Rostedt499e5472012-02-22 15:50:28 -05005364 ring_buffer_record_off(buffer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005365 if (tr->current_trace->stop)
5366 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05005367 }
5368 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05005369 }
5370
5371 (*ppos)++;
5372
5373 return cnt;
5374}
5375
5376static const struct file_operations rb_simple_fops = {
5377 .open = tracing_open_generic,
5378 .read = rb_simple_read,
5379 .write = rb_simple_write,
5380 .llseek = default_llseek,
5381};
5382
Steven Rostedt277ba042012-08-03 16:10:49 -04005383struct dentry *trace_instance_dir;
5384
5385static void
5386init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
5387
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005388static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
5389{
5390 int cpu;
5391
5392 for_each_tracing_cpu(cpu) {
5393 memset(per_cpu_ptr(buf->data, cpu), 0, sizeof(struct trace_array_cpu));
5394 per_cpu_ptr(buf->data, cpu)->trace_cpu.cpu = cpu;
5395 per_cpu_ptr(buf->data, cpu)->trace_cpu.tr = tr;
5396 }
5397}
5398
5399static int allocate_trace_buffers(struct trace_array *tr, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04005400{
5401 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005402
5403 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5404
5405 tr->trace_buffer.buffer = ring_buffer_alloc(size, rb_flags);
5406 if (!tr->trace_buffer.buffer)
5407 goto out_free;
5408
5409 tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu);
5410 if (!tr->trace_buffer.data)
5411 goto out_free;
5412
5413 init_trace_buffers(tr, &tr->trace_buffer);
5414
5415 /* Allocate the first page for all buffers */
5416 set_buffer_entries(&tr->trace_buffer,
5417 ring_buffer_size(tr->trace_buffer.buffer, 0));
5418
5419#ifdef CONFIG_TRACER_MAX_TRACE
5420
5421 tr->max_buffer.buffer = ring_buffer_alloc(1, rb_flags);
5422 if (!tr->max_buffer.buffer)
5423 goto out_free;
5424
5425 tr->max_buffer.data = alloc_percpu(struct trace_array_cpu);
5426 if (!tr->max_buffer.data)
5427 goto out_free;
5428
5429 init_trace_buffers(tr, &tr->max_buffer);
5430
5431 set_buffer_entries(&tr->max_buffer, 1);
5432#endif
5433 return 0;
5434
5435 out_free:
5436 if (tr->trace_buffer.buffer)
5437 ring_buffer_free(tr->trace_buffer.buffer);
5438 free_percpu(tr->trace_buffer.data);
5439
5440#ifdef CONFIG_TRACER_MAX_TRACE
5441 if (tr->max_buffer.buffer)
5442 ring_buffer_free(tr->max_buffer.buffer);
5443 free_percpu(tr->max_buffer.data);
5444#endif
5445 return -ENOMEM;
5446}
5447
5448static int new_instance_create(const char *name)
5449{
Steven Rostedt277ba042012-08-03 16:10:49 -04005450 struct trace_array *tr;
5451 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04005452
5453 mutex_lock(&trace_types_lock);
5454
5455 ret = -EEXIST;
5456 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5457 if (tr->name && strcmp(tr->name, name) == 0)
5458 goto out_unlock;
5459 }
5460
5461 ret = -ENOMEM;
5462 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
5463 if (!tr)
5464 goto out_unlock;
5465
5466 tr->name = kstrdup(name, GFP_KERNEL);
5467 if (!tr->name)
5468 goto out_free_tr;
5469
5470 raw_spin_lock_init(&tr->start_lock);
5471
5472 tr->current_trace = &nop_trace;
5473
5474 INIT_LIST_HEAD(&tr->systems);
5475 INIT_LIST_HEAD(&tr->events);
5476
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005477 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04005478 goto out_free_tr;
5479
Steven Rostedt277ba042012-08-03 16:10:49 -04005480 /* Holder for file callbacks */
5481 tr->trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5482 tr->trace_cpu.tr = tr;
5483
5484 tr->dir = debugfs_create_dir(name, trace_instance_dir);
5485 if (!tr->dir)
5486 goto out_free_tr;
5487
5488 ret = event_trace_add_tracer(tr->dir, tr);
5489 if (ret)
5490 goto out_free_tr;
5491
5492 init_tracer_debugfs(tr, tr->dir);
5493
5494 list_add(&tr->list, &ftrace_trace_arrays);
5495
5496 mutex_unlock(&trace_types_lock);
5497
5498 return 0;
5499
5500 out_free_tr:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005501 if (tr->trace_buffer.buffer)
5502 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt277ba042012-08-03 16:10:49 -04005503 kfree(tr->name);
5504 kfree(tr);
5505
5506 out_unlock:
5507 mutex_unlock(&trace_types_lock);
5508
5509 return ret;
5510
5511}
5512
Steven Rostedt0c8916c2012-08-07 16:14:16 -04005513static int instance_delete(const char *name)
5514{
5515 struct trace_array *tr;
5516 int found = 0;
5517 int ret;
5518
5519 mutex_lock(&trace_types_lock);
5520
5521 ret = -ENODEV;
5522 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
5523 if (tr->name && strcmp(tr->name, name) == 0) {
5524 found = 1;
5525 break;
5526 }
5527 }
5528 if (!found)
5529 goto out_unlock;
5530
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005531 ret = -EBUSY;
5532 if (tr->ref)
5533 goto out_unlock;
5534
Steven Rostedt0c8916c2012-08-07 16:14:16 -04005535 list_del(&tr->list);
5536
5537 event_trace_del_tracer(tr);
5538 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005539 free_percpu(tr->trace_buffer.data);
5540 ring_buffer_free(tr->trace_buffer.buffer);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04005541
5542 kfree(tr->name);
5543 kfree(tr);
5544
5545 ret = 0;
5546
5547 out_unlock:
5548 mutex_unlock(&trace_types_lock);
5549
5550 return ret;
5551}
5552
Steven Rostedt277ba042012-08-03 16:10:49 -04005553static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
5554{
5555 struct dentry *parent;
5556 int ret;
5557
5558 /* Paranoid: Make sure the parent is the "instances" directory */
5559 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
5560 if (WARN_ON_ONCE(parent != trace_instance_dir))
5561 return -ENOENT;
5562
5563 /*
5564 * The inode mutex is locked, but debugfs_create_dir() will also
5565 * take the mutex. As the instances directory can not be destroyed
5566 * or changed in any other way, it is safe to unlock it, and
5567 * let the dentry try. If two users try to make the same dir at
5568 * the same time, then the new_instance_create() will determine the
5569 * winner.
5570 */
5571 mutex_unlock(&inode->i_mutex);
5572
5573 ret = new_instance_create(dentry->d_iname);
5574
5575 mutex_lock(&inode->i_mutex);
5576
5577 return ret;
5578}
5579
Steven Rostedt0c8916c2012-08-07 16:14:16 -04005580static int instance_rmdir(struct inode *inode, struct dentry *dentry)
5581{
5582 struct dentry *parent;
5583 int ret;
5584
5585 /* Paranoid: Make sure the parent is the "instances" directory */
5586 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
5587 if (WARN_ON_ONCE(parent != trace_instance_dir))
5588 return -ENOENT;
5589
5590 /* The caller did a dget() on dentry */
5591 mutex_unlock(&dentry->d_inode->i_mutex);
5592
5593 /*
5594 * The inode mutex is locked, but debugfs_create_dir() will also
5595 * take the mutex. As the instances directory can not be destroyed
5596 * or changed in any other way, it is safe to unlock it, and
5597 * let the dentry try. If two users try to make the same dir at
5598 * the same time, then the instance_delete() will determine the
5599 * winner.
5600 */
5601 mutex_unlock(&inode->i_mutex);
5602
5603 ret = instance_delete(dentry->d_iname);
5604
5605 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
5606 mutex_lock(&dentry->d_inode->i_mutex);
5607
5608 return ret;
5609}
5610
Steven Rostedt277ba042012-08-03 16:10:49 -04005611static const struct inode_operations instance_dir_inode_operations = {
5612 .lookup = simple_lookup,
5613 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04005614 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04005615};
5616
5617static __init void create_trace_instances(struct dentry *d_tracer)
5618{
5619 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
5620 if (WARN_ON(!trace_instance_dir))
5621 return;
5622
5623 /* Hijack the dir inode operations, to allow mkdir */
5624 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
5625}
5626
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005627static void
5628init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
5629{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05005630 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005631
5632 trace_create_file("trace_options", 0644, d_tracer,
5633 tr, &tracing_iter_fops);
5634
5635 trace_create_file("trace", 0644, d_tracer,
5636 (void *)&tr->trace_cpu, &tracing_fops);
5637
5638 trace_create_file("trace_pipe", 0444, d_tracer,
5639 (void *)&tr->trace_cpu, &tracing_pipe_fops);
5640
5641 trace_create_file("buffer_size_kb", 0644, d_tracer,
5642 (void *)&tr->trace_cpu, &tracing_entries_fops);
5643
5644 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
5645 tr, &tracing_total_entries_fops);
5646
5647 trace_create_file("free_buffer", 0644, d_tracer,
5648 tr, &tracing_free_buffer_fops);
5649
5650 trace_create_file("trace_marker", 0220, d_tracer,
5651 tr, &tracing_mark_fops);
5652
5653 trace_create_file("trace_clock", 0644, d_tracer, tr,
5654 &trace_clock_fops);
5655
5656 trace_create_file("tracing_on", 0644, d_tracer,
5657 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005658
5659#ifdef CONFIG_TRACER_SNAPSHOT
5660 trace_create_file("snapshot", 0644, d_tracer,
5661 (void *)&tr->trace_cpu, &snapshot_fops);
5662#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05005663
5664 for_each_tracing_cpu(cpu)
5665 tracing_init_debugfs_percpu(tr, cpu);
5666
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005667}
5668
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01005669static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005670{
5671 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005672
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005673 trace_access_lock_init();
5674
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005675 d_tracer = tracing_init_dentry();
5676
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005677 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005678
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005679 trace_create_file("tracing_cpumask", 0644, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005680 &global_trace, &tracing_cpumask_fops);
Ingo Molnarc7078de2008-05-12 21:20:52 +02005681
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005682 trace_create_file("available_tracers", 0444, d_tracer,
5683 &global_trace, &show_traces_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005684
Li Zefan339ae5d2009-04-17 10:34:30 +08005685 trace_create_file("current_tracer", 0644, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005686 &global_trace, &set_tracer_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005687
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04005688#ifdef CONFIG_TRACER_MAX_TRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005689 trace_create_file("tracing_max_latency", 0644, d_tracer,
5690 &tracing_max_latency, &tracing_max_lat_fops);
Tim Bird0e950172010-02-25 15:36:43 -08005691#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005692
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005693 trace_create_file("tracing_thresh", 0644, d_tracer,
5694 &tracing_thresh, &tracing_max_lat_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005695
Li Zefan339ae5d2009-04-17 10:34:30 +08005696 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005697 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02005698
Avadh Patel69abe6a2009-04-10 16:04:48 -04005699 trace_create_file("saved_cmdlines", 0444, d_tracer,
5700 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005701
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005702#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005703 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
5704 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005705#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01005706
Steven Rostedt277ba042012-08-03 16:10:49 -04005707 create_trace_instances(d_tracer);
5708
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005709 create_trace_options_dir(&global_trace);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01005710
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01005711 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005712}
5713
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005714static int trace_panic_handler(struct notifier_block *this,
5715 unsigned long event, void *unused)
5716{
Steven Rostedt944ac422008-10-23 19:26:08 -04005717 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02005718 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005719 return NOTIFY_OK;
5720}
5721
5722static struct notifier_block trace_panic_notifier = {
5723 .notifier_call = trace_panic_handler,
5724 .next = NULL,
5725 .priority = 150 /* priority: INT_MAX >= x >= 0 */
5726};
5727
5728static int trace_die_handler(struct notifier_block *self,
5729 unsigned long val,
5730 void *data)
5731{
5732 switch (val) {
5733 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04005734 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02005735 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005736 break;
5737 default:
5738 break;
5739 }
5740 return NOTIFY_OK;
5741}
5742
5743static struct notifier_block trace_die_notifier = {
5744 .notifier_call = trace_die_handler,
5745 .priority = 200
5746};
5747
5748/*
5749 * printk is set to max of 1024, we really don't need it that big.
5750 * Nothing should be printing 1000 characters anyway.
5751 */
5752#define TRACE_MAX_PRINT 1000
5753
5754/*
5755 * Define here KERN_TRACE so that we have one place to modify
5756 * it if we decide to change what log level the ftrace dump
5757 * should be at.
5758 */
Steven Rostedt428aee12009-01-14 12:24:42 -05005759#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005760
Jason Wessel955b61e2010-08-05 09:22:23 -05005761void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005762trace_printk_seq(struct trace_seq *s)
5763{
5764 /* Probably should print a warning here. */
5765 if (s->len >= 1000)
5766 s->len = 1000;
5767
5768 /* should be zero ended, but we are paranoid. */
5769 s->buffer[s->len] = 0;
5770
5771 printk(KERN_TRACE "%s", s->buffer);
5772
Steven Rostedtf9520752009-03-02 14:04:40 -05005773 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005774}
5775
Jason Wessel955b61e2010-08-05 09:22:23 -05005776void trace_init_global_iter(struct trace_iterator *iter)
5777{
5778 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005779 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05005780 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005781 iter->trace_buffer = &global_trace.trace_buffer;
Jason Wessel955b61e2010-08-05 09:22:23 -05005782}
5783
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02005784static void
5785__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005786{
Thomas Gleixner445c8952009-12-02 19:49:50 +01005787 static arch_spinlock_t ftrace_dump_lock =
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01005788 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005789 /* use static because iter can be a bit big for the stack */
5790 static struct trace_iterator iter;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01005791 unsigned int old_userobj;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005792 static int dump_ran;
Steven Rostedtd7690412008-10-01 00:29:53 -04005793 unsigned long flags;
5794 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005795
5796 /* only one dump */
Steven Rostedtcd891ae2009-04-28 11:39:34 -04005797 local_irq_save(flags);
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01005798 arch_spin_lock(&ftrace_dump_lock);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005799 if (dump_ran)
5800 goto out;
5801
5802 dump_ran = 1;
5803
Steven Rostedt0ee6b6c2009-01-14 14:50:19 -05005804 tracing_off();
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01005805
Steven Rostedte0a413f2011-09-29 21:26:16 -04005806 /* Did function tracer already get disabled? */
5807 if (ftrace_is_dead()) {
5808 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
5809 printk("# MAY BE MISSING FUNCTION EVENTS\n");
5810 }
5811
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01005812 if (disable_tracing)
5813 ftrace_kill();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005814
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08005815 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05005816 trace_init_global_iter(&iter);
5817
Steven Rostedtd7690412008-10-01 00:29:53 -04005818 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005819 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04005820 }
5821
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01005822 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
5823
Török Edwinb54d3de2008-11-22 13:28:48 +02005824 /* don't look at user memory in panic mode */
5825 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
5826
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02005827 switch (oops_dump_mode) {
5828 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05005829 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02005830 break;
5831 case DUMP_ORIG:
5832 iter.cpu_file = raw_smp_processor_id();
5833 break;
5834 case DUMP_NONE:
5835 goto out_enable;
5836 default:
5837 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05005838 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02005839 }
5840
5841 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005842
5843 /*
5844 * We need to stop all tracing on all CPUS to read the
5845 * the next buffer. This is a bit expensive, but is
5846 * not done often. We fill all what we can read,
5847 * and then release the locks again.
5848 */
5849
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005850 while (!trace_empty(&iter)) {
5851
5852 if (!cnt)
5853 printk(KERN_TRACE "---------------------------------\n");
5854
5855 cnt++;
5856
5857 /* reset all but tr, trace, and overruns */
5858 memset(&iter.seq, 0,
5859 sizeof(struct trace_iterator) -
5860 offsetof(struct trace_iterator, seq));
5861 iter.iter_flags |= TRACE_FILE_LAT_FMT;
5862 iter.pos = -1;
5863
Jason Wessel955b61e2010-08-05 09:22:23 -05005864 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005865 int ret;
5866
5867 ret = print_trace_line(&iter);
5868 if (ret != TRACE_TYPE_NO_CONSUME)
5869 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005870 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05005871 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005872
5873 trace_printk_seq(&iter.seq);
5874 }
5875
5876 if (!cnt)
5877 printk(KERN_TRACE " (ftrace buffer empty)\n");
5878 else
5879 printk(KERN_TRACE "---------------------------------\n");
5880
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02005881 out_enable:
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01005882 /* Re-enable tracing if requested */
5883 if (!disable_tracing) {
5884 trace_flags |= old_userobj;
5885
5886 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005887 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01005888 }
5889 tracing_on();
5890 }
5891
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005892 out:
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01005893 arch_spin_unlock(&ftrace_dump_lock);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04005894 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005895}
5896
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01005897/* By default: disable tracing after the dump */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02005898void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01005899{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02005900 __ftrace_dump(true, oops_dump_mode);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01005901}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07005902EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01005903
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005904__init static int tracer_alloc_buffers(void)
5905{
Steven Rostedt73c51622009-03-11 13:42:01 -04005906 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305907 int ret = -ENOMEM;
5908
David Sharp750912f2010-12-08 13:46:47 -08005909
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305910 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
5911 goto out;
5912
5913 if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
5914 goto out_free_buffer_mask;
5915
Steven Rostedt07d777f2011-09-22 14:01:55 -04005916 /* Only allocate trace_printk buffers if a trace_printk exists */
5917 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04005918 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04005919 trace_printk_init_buffers();
5920
Steven Rostedt73c51622009-03-11 13:42:01 -04005921 /* To save memory, keep the ring buffer size to its minimum */
5922 if (ring_buffer_expanded)
5923 ring_buf_size = trace_buf_size;
5924 else
5925 ring_buf_size = 1;
5926
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305927 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
5928 cpumask_copy(tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005929
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005930 raw_spin_lock_init(&global_trace.start_lock);
5931
Steven Rostedtab464282008-05-12 21:21:00 +02005932 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05005933 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04005934 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
5935 WARN_ON(1);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305936 goto out_free_cpumask;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04005937 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04005938
Steven Rostedt499e5472012-02-22 15:50:28 -05005939 if (global_trace.buffer_disabled)
5940 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04005941
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005942 trace_init_cmdlines();
5943
Frédéric Weisbecker43a15382008-09-21 20:16:30 +02005944 register_tracer(&nop_trace);
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05005945
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005946 global_trace.current_trace = &nop_trace;
5947
Steven Rostedt60a11772008-05-12 21:20:44 +02005948 /* All seems OK, enable tracing */
5949 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04005950
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005951 atomic_notifier_chain_register(&panic_notifier_list,
5952 &trace_panic_notifier);
5953
5954 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01005955
Steven Rostedtae63b312012-05-03 23:09:03 -04005956 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
5957
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005958 /* Holder for file callbacks */
5959 global_trace.trace_cpu.cpu = RING_BUFFER_ALL_CPUS;
5960 global_trace.trace_cpu.tr = &global_trace;
5961
Steven Rostedtae63b312012-05-03 23:09:03 -04005962 INIT_LIST_HEAD(&global_trace.systems);
5963 INIT_LIST_HEAD(&global_trace.events);
5964 list_add(&global_trace.list, &ftrace_trace_arrays);
5965
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005966 while (trace_boot_options) {
5967 char *option;
5968
5969 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005970 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04005971 }
5972
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01005973 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04005974
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305975out_free_cpumask:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005976 free_percpu(global_trace.trace_buffer.data);
5977#ifdef CONFIG_TRACER_MAX_TRACE
5978 free_percpu(global_trace.max_buffer.data);
5979#endif
Rusty Russell9e01c1b2009-01-01 10:12:22 +10305980 free_cpumask_var(tracing_cpumask);
5981out_free_buffer_mask:
5982 free_cpumask_var(tracing_buffer_mask);
5983out:
5984 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005985}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05005986
5987__init static int clear_boot_tracer(void)
5988{
5989 /*
5990 * The default tracer at boot buffer is an init section.
5991 * This function is called in lateinit. If we did not
5992 * find the boot tracer, then clear it out, to prevent
5993 * later registration from accessing the buffer that is
5994 * about to be freed.
5995 */
5996 if (!default_bootup_tracer)
5997 return 0;
5998
5999 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
6000 default_bootup_tracer);
6001 default_bootup_tracer = NULL;
6002
6003 return 0;
6004}
6005
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006006early_initcall(tracer_alloc_buffers);
6007fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05006008late_initcall(clear_boot_tracer);