blob: a499ec95fc61b89a9d17978179883bf534b815ad [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050028#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050046#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020047
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010048/*
Steven Rostedt73c51622009-03-11 13:42:01 -040049 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050052bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040053
54/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010056 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010058 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * at the same time, giving false positive or negative results.
60 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010061static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062
Steven Rostedtb2821ae2009-02-02 21:38:32 -050063/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
Li Zefan020e5f82009-07-01 10:47:05 +080066bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050068/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010072/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050082static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010084{
85 return 0;
86}
Steven Rostedt0f048702008-11-05 16:05:44 -050087
88/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040089 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
95/*
Steven Rostedt0f048702008-11-05 16:05:44 -050096 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100101static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500102
Christoph Lameter9288f992009-10-07 19:17:45 -0400103DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -0400104
Jason Wessel955b61e2010-08-05 09:22:23 -0500105cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200106
Steven Rostedt944ac422008-10-23 19:26:08 -0400107/*
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109 *
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
114 * serial console.
115 *
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400121 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200122
123enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400124
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400125/* When set, tracing will stop when a WARN*() is hit */
126int __disable_trace_on_warning;
127
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400128#ifdef CONFIG_TRACE_ENUM_MAP_FILE
129/* Map of enums to their values, for "enum_map" file */
130struct trace_enum_map_head {
131 struct module *mod;
132 unsigned long length;
133};
134
135union trace_enum_map_item;
136
137struct trace_enum_map_tail {
138 /*
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
141 */
142 union trace_enum_map_item *next;
143 const char *end; /* points to NULL */
144};
145
146static DEFINE_MUTEX(trace_enum_mutex);
147
148/*
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
154 */
155union trace_enum_map_item {
156 struct trace_enum_map map;
157 struct trace_enum_map_head head;
158 struct trace_enum_map_tail tail;
159};
160
161static union trace_enum_map_item *trace_enum_maps;
162#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500164static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500165
Li Zefanee6c2c12009-09-18 14:06:47 +0800166#define MAX_TRACER_SIZE 100
167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100169
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500170static bool allocate_snapshot;
171
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200172static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100173{
Chen Gang67012ab2013-04-08 12:06:44 +0800174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500175 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400176 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500177 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100178 return 1;
179}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200180__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100181
Steven Rostedt944ac422008-10-23 19:26:08 -0400182static int __init set_ftrace_dump_on_oops(char *str)
183{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
186 return 1;
187 }
188
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
191 return 1;
192 }
193
194 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400195}
196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200197
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400198static int __init stop_trace_on_warning(char *str)
199{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400202 return 1;
203}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200204__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400205
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400206static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500207{
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
211 return 1;
212}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400213__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500214
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400215
216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217static char *trace_boot_options __initdata;
218
219static int __init set_trace_boot_options(char *str)
220{
Chen Gang67012ab2013-04-08 12:06:44 +0800221 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400222 trace_boot_options = trace_boot_options_buf;
223 return 0;
224}
225__setup("trace_options=", set_trace_boot_options);
226
Steven Rostedte1e232c2014-02-10 23:38:46 -0500227static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
228static char *trace_boot_clock __initdata;
229
230static int __init set_trace_boot_clock(char *str)
231{
232 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
233 trace_boot_clock = trace_boot_clock_buf;
234 return 0;
235}
236__setup("trace_clock=", set_trace_boot_clock);
237
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500238static int __init set_tracepoint_printk(char *str)
239{
240 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 tracepoint_printk = 1;
242 return 1;
243}
244__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400245
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800246unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200247{
248 nsec += 500;
249 do_div(nsec, 1000);
250 return nsec;
251}
252
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200253/*
254 * The global_trace is the descriptor that holds the tracing
255 * buffers for the live tracing. For each CPU, it contains
256 * a link list of pages that will store trace entries. The
257 * page descriptor of the pages in the memory is used to hold
258 * the link list by linking the lru item in the page descriptor
259 * to each of the pages in the buffer per CPU.
260 *
261 * For each active CPU there is a data field that holds the
262 * pages for the buffer for that CPU. Each CPU has the same number
263 * of pages allocated for its buffer.
264 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200265static struct trace_array global_trace;
266
Steven Rostedtae63b312012-05-03 23:09:03 -0400267LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200268
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400269int trace_array_get(struct trace_array *this_tr)
270{
271 struct trace_array *tr;
272 int ret = -ENODEV;
273
274 mutex_lock(&trace_types_lock);
275 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
276 if (tr == this_tr) {
277 tr->ref++;
278 ret = 0;
279 break;
280 }
281 }
282 mutex_unlock(&trace_types_lock);
283
284 return ret;
285}
286
287static void __trace_array_put(struct trace_array *this_tr)
288{
289 WARN_ON(!this_tr->ref);
290 this_tr->ref--;
291}
292
293void trace_array_put(struct trace_array *this_tr)
294{
295 mutex_lock(&trace_types_lock);
296 __trace_array_put(this_tr);
297 mutex_unlock(&trace_types_lock);
298}
299
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400300int filter_check_discard(struct trace_event_file *file, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500301 struct ring_buffer *buffer,
302 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500303{
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400304 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
Tom Zanussif306cc82013-10-24 08:34:17 -0500305 !filter_match_preds(file->filter, rec)) {
306 ring_buffer_discard_commit(buffer, event);
307 return 1;
308 }
309
310 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500311}
Tom Zanussif306cc82013-10-24 08:34:17 -0500312EXPORT_SYMBOL_GPL(filter_check_discard);
313
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400314int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500315 struct ring_buffer *buffer,
316 struct ring_buffer_event *event)
317{
318 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
319 !filter_match_preds(call->filter, rec)) {
320 ring_buffer_discard_commit(buffer, event);
321 return 1;
322 }
323
324 return 0;
325}
326EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500327
Fabian Frederickad1438a2014-04-17 21:44:42 +0200328static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400329{
330 u64 ts;
331
332 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700333 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400334 return trace_clock_local();
335
Alexander Z Lam94571582013-08-02 18:36:16 -0700336 ts = ring_buffer_time_stamp(buf->buffer, cpu);
337 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400338
339 return ts;
340}
341
Alexander Z Lam94571582013-08-02 18:36:16 -0700342cycle_t ftrace_now(int cpu)
343{
344 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
345}
346
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400347/**
348 * tracing_is_enabled - Show if global_trace has been disabled
349 *
350 * Shows if the global trace has been enabled or not. It uses the
351 * mirror flag "buffer_disabled" to be used in fast paths such as for
352 * the irqsoff tracer. But it may be inaccurate due to races. If you
353 * need to know the accurate state, use tracing_is_on() which is a little
354 * slower, but accurate.
355 */
Steven Rostedt90369902008-11-05 16:05:44 -0500356int tracing_is_enabled(void)
357{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400358 /*
359 * For quick access (irqsoff uses this in fast path), just
360 * return the mirror variable of the state of the ring buffer.
361 * It's a little racy, but we don't really care.
362 */
363 smp_rmb();
364 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500365}
366
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200367/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400368 * trace_buf_size is the size in bytes that is allocated
369 * for a buffer. Note, the number of bytes is always rounded
370 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400371 *
372 * This number is purposely set to a low number of 16384.
373 * If the dump on oops happens, it will be much appreciated
374 * to not have to wait for all that output. Anyway this can be
375 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200376 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400377#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400378
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400379static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200380
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200381/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200382static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200383
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200384/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200385 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200386 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700387DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200388
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800389/*
390 * serialize the access of the ring buffer
391 *
392 * ring buffer serializes readers, but it is low level protection.
393 * The validity of the events (which returns by ring_buffer_peek() ..etc)
394 * are not protected by ring buffer.
395 *
396 * The content of events may become garbage if we allow other process consumes
397 * these events concurrently:
398 * A) the page of the consumed events may become a normal page
399 * (not reader page) in ring buffer, and this page will be rewrited
400 * by events producer.
401 * B) The page of the consumed events may become a page for splice_read,
402 * and this page will be returned to system.
403 *
404 * These primitives allow multi process access to different cpu ring buffer
405 * concurrently.
406 *
407 * These primitives don't distinguish read-only and read-consume access.
408 * Multi read-only access are also serialized.
409 */
410
411#ifdef CONFIG_SMP
412static DECLARE_RWSEM(all_cpu_access_lock);
413static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
414
415static inline void trace_access_lock(int cpu)
416{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500417 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800418 /* gain it for accessing the whole ring buffer. */
419 down_write(&all_cpu_access_lock);
420 } else {
421 /* gain it for accessing a cpu ring buffer. */
422
Steven Rostedtae3b5092013-01-23 15:22:59 -0500423 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800424 down_read(&all_cpu_access_lock);
425
426 /* Secondly block other access to this @cpu ring buffer. */
427 mutex_lock(&per_cpu(cpu_access_lock, cpu));
428 }
429}
430
431static inline void trace_access_unlock(int cpu)
432{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500433 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800434 up_write(&all_cpu_access_lock);
435 } else {
436 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
437 up_read(&all_cpu_access_lock);
438 }
439}
440
441static inline void trace_access_lock_init(void)
442{
443 int cpu;
444
445 for_each_possible_cpu(cpu)
446 mutex_init(&per_cpu(cpu_access_lock, cpu));
447}
448
449#else
450
451static DEFINE_MUTEX(access_lock);
452
453static inline void trace_access_lock(int cpu)
454{
455 (void)cpu;
456 mutex_lock(&access_lock);
457}
458
459static inline void trace_access_unlock(int cpu)
460{
461 (void)cpu;
462 mutex_unlock(&access_lock);
463}
464
465static inline void trace_access_lock_init(void)
466{
467}
468
469#endif
470
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400471#ifdef CONFIG_STACKTRACE
472static void __ftrace_trace_stack(struct ring_buffer *buffer,
473 unsigned long flags,
474 int skip, int pc, struct pt_regs *regs);
475#else
476static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
477 unsigned long flags,
478 int skip, int pc, struct pt_regs *regs)
479{
480}
481#endif
482
Steven Rostedtee6bce52008-11-12 17:52:37 -0500483/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500484unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400485 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500486 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400487 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700488
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400489static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400490{
491 if (tr->trace_buffer.buffer)
492 ring_buffer_record_on(tr->trace_buffer.buffer);
493 /*
494 * This flag is looked at when buffers haven't been allocated
495 * yet, or by some tracers (like irqsoff), that just want to
496 * know if the ring buffer has been disabled, but it can handle
497 * races of where it gets disabled but we still do a record.
498 * As the check is in the fast path of the tracers, it is more
499 * important to be fast than accurate.
500 */
501 tr->buffer_disabled = 0;
502 /* Make the flag seen by readers */
503 smp_wmb();
504}
505
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200506/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500507 * tracing_on - enable tracing buffers
508 *
509 * This function enables tracing buffers that may have been
510 * disabled with tracing_off.
511 */
512void tracing_on(void)
513{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400514 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500515}
516EXPORT_SYMBOL_GPL(tracing_on);
517
518/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500519 * __trace_puts - write a constant string into the trace buffer.
520 * @ip: The address of the caller
521 * @str: The constant string to write
522 * @size: The size of the string.
523 */
524int __trace_puts(unsigned long ip, const char *str, int size)
525{
526 struct ring_buffer_event *event;
527 struct ring_buffer *buffer;
528 struct print_entry *entry;
529 unsigned long irq_flags;
530 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800531 int pc;
532
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800533 if (!(trace_flags & TRACE_ITER_PRINTK))
534 return 0;
535
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800536 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500537
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500538 if (unlikely(tracing_selftest_running || tracing_disabled))
539 return 0;
540
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500541 alloc = sizeof(*entry) + size + 2; /* possible \n added */
542
543 local_save_flags(irq_flags);
544 buffer = global_trace.trace_buffer.buffer;
545 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800546 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500547 if (!event)
548 return 0;
549
550 entry = ring_buffer_event_data(event);
551 entry->ip = ip;
552
553 memcpy(&entry->buf, str, size);
554
555 /* Add a newline if necessary */
556 if (entry->buf[size - 1] != '\n') {
557 entry->buf[size] = '\n';
558 entry->buf[size + 1] = '\0';
559 } else
560 entry->buf[size] = '\0';
561
562 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800563 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500564
565 return size;
566}
567EXPORT_SYMBOL_GPL(__trace_puts);
568
569/**
570 * __trace_bputs - write the pointer to a constant string into trace buffer
571 * @ip: The address of the caller
572 * @str: The constant string to write to the buffer to
573 */
574int __trace_bputs(unsigned long ip, const char *str)
575{
576 struct ring_buffer_event *event;
577 struct ring_buffer *buffer;
578 struct bputs_entry *entry;
579 unsigned long irq_flags;
580 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800581 int pc;
582
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800583 if (!(trace_flags & TRACE_ITER_PRINTK))
584 return 0;
585
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800586 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500587
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500588 if (unlikely(tracing_selftest_running || tracing_disabled))
589 return 0;
590
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500591 local_save_flags(irq_flags);
592 buffer = global_trace.trace_buffer.buffer;
593 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800594 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500595 if (!event)
596 return 0;
597
598 entry = ring_buffer_event_data(event);
599 entry->ip = ip;
600 entry->str = str;
601
602 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800603 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500604
605 return 1;
606}
607EXPORT_SYMBOL_GPL(__trace_bputs);
608
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500609#ifdef CONFIG_TRACER_SNAPSHOT
610/**
611 * trace_snapshot - take a snapshot of the current buffer.
612 *
613 * This causes a swap between the snapshot buffer and the current live
614 * tracing buffer. You can use this to take snapshots of the live
615 * trace when some condition is triggered, but continue to trace.
616 *
617 * Note, make sure to allocate the snapshot with either
618 * a tracing_snapshot_alloc(), or by doing it manually
619 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
620 *
621 * If the snapshot buffer is not allocated, it will stop tracing.
622 * Basically making a permanent snapshot.
623 */
624void tracing_snapshot(void)
625{
626 struct trace_array *tr = &global_trace;
627 struct tracer *tracer = tr->current_trace;
628 unsigned long flags;
629
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500630 if (in_nmi()) {
631 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
632 internal_trace_puts("*** snapshot is being ignored ***\n");
633 return;
634 }
635
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500636 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500637 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
638 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500639 tracing_off();
640 return;
641 }
642
643 /* Note, snapshot can not be used when the tracer uses it */
644 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500645 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
646 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500647 return;
648 }
649
650 local_irq_save(flags);
651 update_max_tr(tr, current, smp_processor_id());
652 local_irq_restore(flags);
653}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500654EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500655
656static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
657 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400658static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
659
660static int alloc_snapshot(struct trace_array *tr)
661{
662 int ret;
663
664 if (!tr->allocated_snapshot) {
665
666 /* allocate spare buffer */
667 ret = resize_buffer_duplicate_size(&tr->max_buffer,
668 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
669 if (ret < 0)
670 return ret;
671
672 tr->allocated_snapshot = true;
673 }
674
675 return 0;
676}
677
Fabian Frederickad1438a2014-04-17 21:44:42 +0200678static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400679{
680 /*
681 * We don't free the ring buffer. instead, resize it because
682 * The max_tr ring buffer has some state (e.g. ring->clock) and
683 * we want preserve it.
684 */
685 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
686 set_buffer_entries(&tr->max_buffer, 1);
687 tracing_reset_online_cpus(&tr->max_buffer);
688 tr->allocated_snapshot = false;
689}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500690
691/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500692 * tracing_alloc_snapshot - allocate snapshot buffer.
693 *
694 * This only allocates the snapshot buffer if it isn't already
695 * allocated - it doesn't also take a snapshot.
696 *
697 * This is meant to be used in cases where the snapshot buffer needs
698 * to be set up for events that can't sleep but need to be able to
699 * trigger a snapshot.
700 */
701int tracing_alloc_snapshot(void)
702{
703 struct trace_array *tr = &global_trace;
704 int ret;
705
706 ret = alloc_snapshot(tr);
707 WARN_ON(ret < 0);
708
709 return ret;
710}
711EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
712
713/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500714 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
715 *
716 * This is similar to trace_snapshot(), but it will allocate the
717 * snapshot buffer if it isn't already allocated. Use this only
718 * where it is safe to sleep, as the allocation may sleep.
719 *
720 * This causes a swap between the snapshot buffer and the current live
721 * tracing buffer. You can use this to take snapshots of the live
722 * trace when some condition is triggered, but continue to trace.
723 */
724void tracing_snapshot_alloc(void)
725{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500726 int ret;
727
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500728 ret = tracing_alloc_snapshot();
729 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400730 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500731
732 tracing_snapshot();
733}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500734EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500735#else
736void tracing_snapshot(void)
737{
738 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
739}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500740EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500741int tracing_alloc_snapshot(void)
742{
743 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
744 return -ENODEV;
745}
746EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500747void tracing_snapshot_alloc(void)
748{
749 /* Give warning */
750 tracing_snapshot();
751}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500752EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500753#endif /* CONFIG_TRACER_SNAPSHOT */
754
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400755static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400756{
757 if (tr->trace_buffer.buffer)
758 ring_buffer_record_off(tr->trace_buffer.buffer);
759 /*
760 * This flag is looked at when buffers haven't been allocated
761 * yet, or by some tracers (like irqsoff), that just want to
762 * know if the ring buffer has been disabled, but it can handle
763 * races of where it gets disabled but we still do a record.
764 * As the check is in the fast path of the tracers, it is more
765 * important to be fast than accurate.
766 */
767 tr->buffer_disabled = 1;
768 /* Make the flag seen by readers */
769 smp_wmb();
770}
771
Steven Rostedt499e5472012-02-22 15:50:28 -0500772/**
773 * tracing_off - turn off tracing buffers
774 *
775 * This function stops the tracing buffers from recording data.
776 * It does not disable any overhead the tracers themselves may
777 * be causing. This function simply causes all recording to
778 * the ring buffers to fail.
779 */
780void tracing_off(void)
781{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400782 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500783}
784EXPORT_SYMBOL_GPL(tracing_off);
785
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400786void disable_trace_on_warning(void)
787{
788 if (__disable_trace_on_warning)
789 tracing_off();
790}
791
Steven Rostedt499e5472012-02-22 15:50:28 -0500792/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400793 * tracer_tracing_is_on - show real state of ring buffer enabled
794 * @tr : the trace array to know if ring buffer is enabled
795 *
796 * Shows real state of the ring buffer if it is enabled or not.
797 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400798static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400799{
800 if (tr->trace_buffer.buffer)
801 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
802 return !tr->buffer_disabled;
803}
804
Steven Rostedt499e5472012-02-22 15:50:28 -0500805/**
806 * tracing_is_on - show state of ring buffers enabled
807 */
808int tracing_is_on(void)
809{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400810 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500811}
812EXPORT_SYMBOL_GPL(tracing_is_on);
813
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400814static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200815{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400816 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200817
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200818 if (!str)
819 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800820 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200821 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800822 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200823 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400824 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200825 return 1;
826}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400827__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200828
Tim Bird0e950172010-02-25 15:36:43 -0800829static int __init set_tracing_thresh(char *str)
830{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800831 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800832 int ret;
833
834 if (!str)
835 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200836 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800837 if (ret < 0)
838 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800839 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800840 return 1;
841}
842__setup("tracing_thresh=", set_tracing_thresh);
843
Steven Rostedt57f50be2008-05-12 21:20:44 +0200844unsigned long nsecs_to_usecs(unsigned long nsecs)
845{
846 return nsecs / 1000;
847}
848
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200849/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200850static const char *trace_options[] = {
851 "print-parent",
852 "sym-offset",
853 "sym-addr",
854 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200855 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200856 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200857 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200858 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200859 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100860 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500861 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500862 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500863 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200864 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200865 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100866 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200867 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500868 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400869 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400870 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800871 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800872 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400873 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500874 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700875 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400876 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200877 NULL
878};
879
Zhaolei5079f322009-08-25 16:12:56 +0800880static struct {
881 u64 (*func)(void);
882 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800883 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800884} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000885 { trace_clock_local, "local", 1 },
886 { trace_clock_global, "global", 1 },
887 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700888 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000889 { trace_clock, "perf", 1 },
890 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -0700891 { ktime_get_raw_fast_ns, "mono_raw", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800892 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800893};
894
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200895/*
896 * trace_parser_get_init - gets the buffer for trace parser
897 */
898int trace_parser_get_init(struct trace_parser *parser, int size)
899{
900 memset(parser, 0, sizeof(*parser));
901
902 parser->buffer = kmalloc(size, GFP_KERNEL);
903 if (!parser->buffer)
904 return 1;
905
906 parser->size = size;
907 return 0;
908}
909
910/*
911 * trace_parser_put - frees the buffer for trace parser
912 */
913void trace_parser_put(struct trace_parser *parser)
914{
915 kfree(parser->buffer);
916}
917
918/*
919 * trace_get_user - reads the user input string separated by space
920 * (matched by isspace(ch))
921 *
922 * For each string found the 'struct trace_parser' is updated,
923 * and the function returns.
924 *
925 * Returns number of bytes read.
926 *
927 * See kernel/trace/trace.h for 'struct trace_parser' details.
928 */
929int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
930 size_t cnt, loff_t *ppos)
931{
932 char ch;
933 size_t read = 0;
934 ssize_t ret;
935
936 if (!*ppos)
937 trace_parser_clear(parser);
938
939 ret = get_user(ch, ubuf++);
940 if (ret)
941 goto out;
942
943 read++;
944 cnt--;
945
946 /*
947 * The parser is not finished with the last write,
948 * continue reading the user input without skipping spaces.
949 */
950 if (!parser->cont) {
951 /* skip white space */
952 while (cnt && isspace(ch)) {
953 ret = get_user(ch, ubuf++);
954 if (ret)
955 goto out;
956 read++;
957 cnt--;
958 }
959
960 /* only spaces were written */
961 if (isspace(ch)) {
962 *ppos += read;
963 ret = read;
964 goto out;
965 }
966
967 parser->idx = 0;
968 }
969
970 /* read the non-space input */
971 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800972 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200973 parser->buffer[parser->idx++] = ch;
974 else {
975 ret = -EINVAL;
976 goto out;
977 }
978 ret = get_user(ch, ubuf++);
979 if (ret)
980 goto out;
981 read++;
982 cnt--;
983 }
984
985 /* We either got finished input or we have to wait for another call. */
986 if (isspace(ch)) {
987 parser->buffer[parser->idx] = 0;
988 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400989 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200990 parser->cont = true;
991 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400992 } else {
993 ret = -EINVAL;
994 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200995 }
996
997 *ppos += read;
998 ret = read;
999
1000out:
1001 return ret;
1002}
1003
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001004/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001005static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001006{
1007 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001008
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001009 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001010 return -EBUSY;
1011
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001012 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001013 if (cnt > len)
1014 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001015 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001016
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001017 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001018 return cnt;
1019}
1020
Tim Bird0e950172010-02-25 15:36:43 -08001021unsigned long __read_mostly tracing_thresh;
1022
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001023#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001024/*
1025 * Copy the new maximum trace into the separate maximum-trace
1026 * structure. (this way the maximum trace is permanently saved,
1027 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1028 */
1029static void
1030__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1031{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001032 struct trace_buffer *trace_buf = &tr->trace_buffer;
1033 struct trace_buffer *max_buf = &tr->max_buffer;
1034 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1035 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001036
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001037 max_buf->cpu = cpu;
1038 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001039
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001040 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001041 max_data->critical_start = data->critical_start;
1042 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001043
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001044 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001045 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001046 /*
1047 * If tsk == current, then use current_uid(), as that does not use
1048 * RCU. The irq tracer can be called out of RCU scope.
1049 */
1050 if (tsk == current)
1051 max_data->uid = current_uid();
1052 else
1053 max_data->uid = task_uid(tsk);
1054
Steven Rostedt8248ac02009-09-02 12:27:41 -04001055 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1056 max_data->policy = tsk->policy;
1057 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001058
1059 /* record this tasks comm */
1060 tracing_record_cmdline(tsk);
1061}
1062
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001063/**
1064 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1065 * @tr: tracer
1066 * @tsk: the task with the latency
1067 * @cpu: The cpu that initiated the trace.
1068 *
1069 * Flip the buffers between the @tr and the max_tr and record information
1070 * about which task was the cause of this latency.
1071 */
Ingo Molnare309b412008-05-12 21:20:51 +02001072void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001073update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1074{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001075 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001076
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001077 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001078 return;
1079
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001080 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001081
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001082 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001083 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001084 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001085 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001086 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001087
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001088 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001089
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001090 buf = tr->trace_buffer.buffer;
1091 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1092 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001093
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001094 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001095 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001096}
1097
1098/**
1099 * update_max_tr_single - only copy one trace over, and reset the rest
1100 * @tr - tracer
1101 * @tsk - task with the latency
1102 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001103 *
1104 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001105 */
Ingo Molnare309b412008-05-12 21:20:51 +02001106void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001107update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1108{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001109 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001110
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001111 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001112 return;
1113
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001114 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001115 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001116 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001117 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001118 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001119 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001120
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001121 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001122
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001123 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001124
Steven Rostedte8165db2009-09-03 19:13:05 -04001125 if (ret == -EBUSY) {
1126 /*
1127 * We failed to swap the buffer due to a commit taking
1128 * place on this CPU. We fail to record, but we reset
1129 * the max trace buffer (no one writes directly to it)
1130 * and flag that it failed.
1131 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001132 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001133 "Failed to swap buffers due to commit in progress\n");
1134 }
1135
Steven Rostedte8165db2009-09-03 19:13:05 -04001136 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001137
1138 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001139 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001140}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001141#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001142
Rabin Vincente30f53a2014-11-10 19:46:34 +01001143static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001144{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001145 /* Iterators are static, they should be filled or empty */
1146 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001147 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001148
Rabin Vincente30f53a2014-11-10 19:46:34 +01001149 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1150 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001151}
1152
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001153#ifdef CONFIG_FTRACE_STARTUP_TEST
1154static int run_tracer_selftest(struct tracer *type)
1155{
1156 struct trace_array *tr = &global_trace;
1157 struct tracer *saved_tracer = tr->current_trace;
1158 int ret;
1159
1160 if (!type->selftest || tracing_selftest_disabled)
1161 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001162
1163 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001164 * Run a selftest on this tracer.
1165 * Here we reset the trace buffer, and set the current
1166 * tracer to be this tracer. The tracer can then run some
1167 * internal tracing to verify that everything is in order.
1168 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001169 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001170 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001171
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001172 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001173
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001174#ifdef CONFIG_TRACER_MAX_TRACE
1175 if (type->use_max_tr) {
1176 /* If we expanded the buffers, make sure the max is expanded too */
1177 if (ring_buffer_expanded)
1178 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1179 RING_BUFFER_ALL_CPUS);
1180 tr->allocated_snapshot = true;
1181 }
1182#endif
1183
1184 /* the test is responsible for initializing and enabling */
1185 pr_info("Testing tracer %s: ", type->name);
1186 ret = type->selftest(type, tr);
1187 /* the test is responsible for resetting too */
1188 tr->current_trace = saved_tracer;
1189 if (ret) {
1190 printk(KERN_CONT "FAILED!\n");
1191 /* Add the warning after printing 'FAILED' */
1192 WARN_ON(1);
1193 return -1;
1194 }
1195 /* Only reset on passing, to avoid touching corrupted buffers */
1196 tracing_reset_online_cpus(&tr->trace_buffer);
1197
1198#ifdef CONFIG_TRACER_MAX_TRACE
1199 if (type->use_max_tr) {
1200 tr->allocated_snapshot = false;
1201
1202 /* Shrink the max buffer again */
1203 if (ring_buffer_expanded)
1204 ring_buffer_resize(tr->max_buffer.buffer, 1,
1205 RING_BUFFER_ALL_CPUS);
1206 }
1207#endif
1208
1209 printk(KERN_CONT "PASSED\n");
1210 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001211}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001212#else
1213static inline int run_tracer_selftest(struct tracer *type)
1214{
1215 return 0;
1216}
1217#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001218
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001219/**
1220 * register_tracer - register a tracer with the ftrace system.
1221 * @type - the plugin for the tracer
1222 *
1223 * Register a new plugin tracer.
1224 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001225int register_tracer(struct tracer *type)
1226{
1227 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001228 int ret = 0;
1229
1230 if (!type->name) {
1231 pr_info("Tracer must have a name\n");
1232 return -1;
1233 }
1234
Dan Carpenter24a461d2010-07-10 12:06:44 +02001235 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001236 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1237 return -1;
1238 }
1239
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001240 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001241
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001242 tracing_selftest_running = true;
1243
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001244 for (t = trace_types; t; t = t->next) {
1245 if (strcmp(type->name, t->name) == 0) {
1246 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001247 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001248 type->name);
1249 ret = -1;
1250 goto out;
1251 }
1252 }
1253
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001254 if (!type->set_flag)
1255 type->set_flag = &dummy_set_flag;
1256 if (!type->flags)
1257 type->flags = &dummy_tracer_flags;
1258 else
1259 if (!type->flags->opts)
1260 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001261
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001262 ret = run_tracer_selftest(type);
1263 if (ret < 0)
1264 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001265
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001266 type->next = trace_types;
1267 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001268
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001269 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001270 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001271 mutex_unlock(&trace_types_lock);
1272
Steven Rostedtdac74942009-02-05 01:13:38 -05001273 if (ret || !default_bootup_tracer)
1274 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001275
Li Zefanee6c2c12009-09-18 14:06:47 +08001276 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001277 goto out_unlock;
1278
1279 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1280 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001281 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001282 default_bootup_tracer = NULL;
1283 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001284 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001285#ifdef CONFIG_FTRACE_STARTUP_TEST
1286 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1287 type->name);
1288#endif
1289
1290 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001291 return ret;
1292}
1293
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001294void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001295{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001296 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001297
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001298 if (!buffer)
1299 return;
1300
Steven Rostedtf6339032009-09-04 12:35:16 -04001301 ring_buffer_record_disable(buffer);
1302
1303 /* Make sure all commits have finished */
1304 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001305 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001306
1307 ring_buffer_record_enable(buffer);
1308}
1309
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001310void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001311{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001312 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001313 int cpu;
1314
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001315 if (!buffer)
1316 return;
1317
Steven Rostedt621968c2009-09-04 12:02:35 -04001318 ring_buffer_record_disable(buffer);
1319
1320 /* Make sure all commits have finished */
1321 synchronize_sched();
1322
Alexander Z Lam94571582013-08-02 18:36:16 -07001323 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001324
1325 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001326 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001327
1328 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001329}
1330
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001331/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001332void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001333{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001334 struct trace_array *tr;
1335
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001336 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001337 tracing_reset_online_cpus(&tr->trace_buffer);
1338#ifdef CONFIG_TRACER_MAX_TRACE
1339 tracing_reset_online_cpus(&tr->max_buffer);
1340#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001341 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001342}
1343
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001344#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001345#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001346static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001347struct saved_cmdlines_buffer {
1348 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1349 unsigned *map_cmdline_to_pid;
1350 unsigned cmdline_num;
1351 int cmdline_idx;
1352 char *saved_cmdlines;
1353};
1354static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001355
Steven Rostedt25b0b442008-05-12 21:21:00 +02001356/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001357static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001358
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001359static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001360{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001361 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1362}
1363
1364static inline void set_cmdline(int idx, const char *cmdline)
1365{
1366 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1367}
1368
1369static int allocate_cmdlines_buffer(unsigned int val,
1370 struct saved_cmdlines_buffer *s)
1371{
1372 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1373 GFP_KERNEL);
1374 if (!s->map_cmdline_to_pid)
1375 return -ENOMEM;
1376
1377 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1378 if (!s->saved_cmdlines) {
1379 kfree(s->map_cmdline_to_pid);
1380 return -ENOMEM;
1381 }
1382
1383 s->cmdline_idx = 0;
1384 s->cmdline_num = val;
1385 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1386 sizeof(s->map_pid_to_cmdline));
1387 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1388 val * sizeof(*s->map_cmdline_to_pid));
1389
1390 return 0;
1391}
1392
1393static int trace_create_savedcmd(void)
1394{
1395 int ret;
1396
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001397 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001398 if (!savedcmd)
1399 return -ENOMEM;
1400
1401 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1402 if (ret < 0) {
1403 kfree(savedcmd);
1404 savedcmd = NULL;
1405 return -ENOMEM;
1406 }
1407
1408 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001409}
1410
Carsten Emdeb5130b12009-09-13 01:43:07 +02001411int is_tracing_stopped(void)
1412{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001413 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001414}
1415
Steven Rostedt0f048702008-11-05 16:05:44 -05001416/**
1417 * tracing_start - quick start of the tracer
1418 *
1419 * If tracing is enabled but was stopped by tracing_stop,
1420 * this will start the tracer back up.
1421 */
1422void tracing_start(void)
1423{
1424 struct ring_buffer *buffer;
1425 unsigned long flags;
1426
1427 if (tracing_disabled)
1428 return;
1429
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001430 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1431 if (--global_trace.stop_count) {
1432 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001433 /* Someone screwed up their debugging */
1434 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001435 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001436 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001437 goto out;
1438 }
1439
Steven Rostedta2f80712010-03-12 19:56:00 -05001440 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001441 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001442
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001443 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001444 if (buffer)
1445 ring_buffer_record_enable(buffer);
1446
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001447#ifdef CONFIG_TRACER_MAX_TRACE
1448 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001449 if (buffer)
1450 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001451#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001452
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001453 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001454
Steven Rostedt0f048702008-11-05 16:05:44 -05001455 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001456 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1457}
1458
1459static void tracing_start_tr(struct trace_array *tr)
1460{
1461 struct ring_buffer *buffer;
1462 unsigned long flags;
1463
1464 if (tracing_disabled)
1465 return;
1466
1467 /* If global, we need to also start the max tracer */
1468 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1469 return tracing_start();
1470
1471 raw_spin_lock_irqsave(&tr->start_lock, flags);
1472
1473 if (--tr->stop_count) {
1474 if (tr->stop_count < 0) {
1475 /* Someone screwed up their debugging */
1476 WARN_ON_ONCE(1);
1477 tr->stop_count = 0;
1478 }
1479 goto out;
1480 }
1481
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001482 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001483 if (buffer)
1484 ring_buffer_record_enable(buffer);
1485
1486 out:
1487 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001488}
1489
1490/**
1491 * tracing_stop - quick stop of the tracer
1492 *
1493 * Light weight way to stop tracing. Use in conjunction with
1494 * tracing_start.
1495 */
1496void tracing_stop(void)
1497{
1498 struct ring_buffer *buffer;
1499 unsigned long flags;
1500
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001501 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1502 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001503 goto out;
1504
Steven Rostedta2f80712010-03-12 19:56:00 -05001505 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001506 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001507
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001508 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001509 if (buffer)
1510 ring_buffer_record_disable(buffer);
1511
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001512#ifdef CONFIG_TRACER_MAX_TRACE
1513 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001514 if (buffer)
1515 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001516#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001517
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001518 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001519
Steven Rostedt0f048702008-11-05 16:05:44 -05001520 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001521 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1522}
1523
1524static void tracing_stop_tr(struct trace_array *tr)
1525{
1526 struct ring_buffer *buffer;
1527 unsigned long flags;
1528
1529 /* If global, we need to also stop the max tracer */
1530 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1531 return tracing_stop();
1532
1533 raw_spin_lock_irqsave(&tr->start_lock, flags);
1534 if (tr->stop_count++)
1535 goto out;
1536
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001537 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001538 if (buffer)
1539 ring_buffer_record_disable(buffer);
1540
1541 out:
1542 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001543}
1544
Ingo Molnare309b412008-05-12 21:20:51 +02001545void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001546
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001547static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001548{
Carsten Emdea635cf02009-03-18 09:00:41 +01001549 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001550
1551 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001552 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001553
1554 /*
1555 * It's not the end of the world if we don't get
1556 * the lock, but we also don't want to spin
1557 * nor do we want to disable interrupts,
1558 * so if we miss here, then better luck next time.
1559 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001560 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001561 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001562
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001563 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001564 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001565 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001566
Carsten Emdea635cf02009-03-18 09:00:41 +01001567 /*
1568 * Check whether the cmdline buffer at idx has a pid
1569 * mapped. We are going to overwrite that entry so we
1570 * need to clear the map_pid_to_cmdline. Otherwise we
1571 * would read the new comm for the old pid.
1572 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001573 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001574 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001575 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001577 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1578 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001579
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001580 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001581 }
1582
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001583 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001584
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001585 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001586
1587 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588}
1589
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001590static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001592 unsigned map;
1593
Steven Rostedt4ca53082009-03-16 19:20:15 -04001594 if (!pid) {
1595 strcpy(comm, "<idle>");
1596 return;
1597 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001598
Steven Rostedt74bf4072010-01-25 15:11:53 -05001599 if (WARN_ON_ONCE(pid < 0)) {
1600 strcpy(comm, "<XXX>");
1601 return;
1602 }
1603
Steven Rostedt4ca53082009-03-16 19:20:15 -04001604 if (pid > PID_MAX_DEFAULT) {
1605 strcpy(comm, "<...>");
1606 return;
1607 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001608
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001609 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001610 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001611 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001612 else
1613 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001614}
1615
1616void trace_find_cmdline(int pid, char comm[])
1617{
1618 preempt_disable();
1619 arch_spin_lock(&trace_cmdline_lock);
1620
1621 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001622
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001623 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001624 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001625}
1626
Ingo Molnare309b412008-05-12 21:20:51 +02001627void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001628{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001629 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001630 return;
1631
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001632 if (!__this_cpu_read(trace_cmdline_save))
1633 return;
1634
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001635 if (trace_save_cmdline(tsk))
1636 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001637}
1638
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001639void
Steven Rostedt38697052008-10-01 13:14:09 -04001640tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1641 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001642{
1643 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001644
Steven Rostedt777e2082008-09-29 23:02:42 -04001645 entry->preempt_count = pc & 0xff;
1646 entry->pid = (tsk) ? tsk->pid : 0;
1647 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001648#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001649 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001650#else
1651 TRACE_FLAG_IRQS_NOSUPPORT |
1652#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001653 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1654 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001655 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1656 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001657}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001658EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001659
Steven Rostedte77405a2009-09-02 14:17:06 -04001660struct ring_buffer_event *
1661trace_buffer_lock_reserve(struct ring_buffer *buffer,
1662 int type,
1663 unsigned long len,
1664 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001665{
1666 struct ring_buffer_event *event;
1667
Steven Rostedte77405a2009-09-02 14:17:06 -04001668 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001669 if (event != NULL) {
1670 struct trace_entry *ent = ring_buffer_event_data(event);
1671
1672 tracing_generic_entry_update(ent, flags, pc);
1673 ent->type = type;
1674 }
1675
1676 return event;
1677}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001678
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001679void
1680__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1681{
1682 __this_cpu_write(trace_cmdline_save, true);
1683 ring_buffer_unlock_commit(buffer, event);
1684}
1685
Steven Rostedte77405a2009-09-02 14:17:06 -04001686static inline void
1687__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1688 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001689 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001690{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001691 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001692
Steven Rostedte77405a2009-09-02 14:17:06 -04001693 ftrace_trace_stack(buffer, flags, 6, pc);
1694 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001695}
1696
Steven Rostedte77405a2009-09-02 14:17:06 -04001697void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1698 struct ring_buffer_event *event,
1699 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001700{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001701 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001702}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001703EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001704
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001705static struct ring_buffer *temp_buffer;
1706
Steven Rostedtef5580d2009-02-27 19:38:04 -05001707struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001708trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001709 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001710 int type, unsigned long len,
1711 unsigned long flags, int pc)
1712{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001713 struct ring_buffer_event *entry;
1714
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001715 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001716 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001717 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001718 /*
1719 * If tracing is off, but we have triggers enabled
1720 * we still need to look at the event data. Use the temp_buffer
1721 * to store the trace event for the tigger to use. It's recusive
1722 * safe and will not be recorded anywhere.
1723 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001724 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001725 *current_rb = temp_buffer;
1726 entry = trace_buffer_lock_reserve(*current_rb,
1727 type, len, flags, pc);
1728 }
1729 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001730}
1731EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1732
1733struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001734trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1735 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001736 unsigned long flags, int pc)
1737{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001738 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001739 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001740 type, len, flags, pc);
1741}
Steven Rostedt94487d62009-05-05 19:22:53 -04001742EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001743
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001744void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1745 struct ring_buffer_event *event,
1746 unsigned long flags, int pc,
1747 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001748{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001749 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001750
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -04001751 if (trace_flags & TRACE_ITER_STACKTRACE)
1752 __ftrace_trace_stack(buffer, flags, 0, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001753 ftrace_trace_userstack(buffer, flags, pc);
1754}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001755EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001756
Steven Rostedte77405a2009-09-02 14:17:06 -04001757void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1758 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001759{
Steven Rostedte77405a2009-09-02 14:17:06 -04001760 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001761}
Steven Rostedt12acd472009-04-17 16:01:56 -04001762EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001763
Ingo Molnare309b412008-05-12 21:20:51 +02001764void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001765trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001766 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1767 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001768{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001769 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001770 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001771 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001772 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001773
Steven Rostedtd7690412008-10-01 00:29:53 -04001774 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001775 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001776 return;
1777
Steven Rostedte77405a2009-09-02 14:17:06 -04001778 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001779 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001780 if (!event)
1781 return;
1782 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001783 entry->ip = ip;
1784 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001785
Tom Zanussif306cc82013-10-24 08:34:17 -05001786 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001787 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001788}
1789
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001790#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001791
1792#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1793struct ftrace_stack {
1794 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1795};
1796
1797static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1798static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1799
Steven Rostedte77405a2009-09-02 14:17:06 -04001800static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001801 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001802 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001803{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001804 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001805 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001806 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001807 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001808 int use_stack;
1809 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001810
1811 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001812 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001813
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001814 /*
1815 * Since events can happen in NMIs there's no safe way to
1816 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1817 * or NMI comes in, it will just have to use the default
1818 * FTRACE_STACK_SIZE.
1819 */
1820 preempt_disable_notrace();
1821
Shan Wei82146522012-11-19 13:21:01 +08001822 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001823 /*
1824 * We don't need any atomic variables, just a barrier.
1825 * If an interrupt comes in, we don't care, because it would
1826 * have exited and put the counter back to what we want.
1827 * We just need a barrier to keep gcc from moving things
1828 * around.
1829 */
1830 barrier();
1831 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001832 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001833 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1834
1835 if (regs)
1836 save_stack_trace_regs(regs, &trace);
1837 else
1838 save_stack_trace(&trace);
1839
1840 if (trace.nr_entries > size)
1841 size = trace.nr_entries;
1842 } else
1843 /* From now on, use_stack is a boolean */
1844 use_stack = 0;
1845
1846 size *= sizeof(unsigned long);
1847
1848 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1849 sizeof(*entry) + size, flags, pc);
1850 if (!event)
1851 goto out;
1852 entry = ring_buffer_event_data(event);
1853
1854 memset(&entry->caller, 0, size);
1855
1856 if (use_stack)
1857 memcpy(&entry->caller, trace.entries,
1858 trace.nr_entries * sizeof(unsigned long));
1859 else {
1860 trace.max_entries = FTRACE_STACK_ENTRIES;
1861 trace.entries = entry->caller;
1862 if (regs)
1863 save_stack_trace_regs(regs, &trace);
1864 else
1865 save_stack_trace(&trace);
1866 }
1867
1868 entry->size = trace.nr_entries;
1869
Tom Zanussif306cc82013-10-24 08:34:17 -05001870 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001871 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001872
1873 out:
1874 /* Again, don't let gcc optimize things here */
1875 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001876 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001877 preempt_enable_notrace();
1878
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001879}
1880
Steven Rostedte77405a2009-09-02 14:17:06 -04001881void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1882 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001883{
1884 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1885 return;
1886
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001887 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001888}
1889
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001890void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1891 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001892{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001893 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001894}
1895
Steven Rostedt03889382009-12-11 09:48:22 -05001896/**
1897 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001898 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001899 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001900void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001901{
1902 unsigned long flags;
1903
1904 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001905 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001906
1907 local_save_flags(flags);
1908
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001909 /*
1910 * Skip 3 more, seems to get us at the caller of
1911 * this function.
1912 */
1913 skip += 3;
1914 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1915 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001916}
1917
Steven Rostedt91e86e52010-11-10 12:56:12 +01001918static DEFINE_PER_CPU(int, user_stack_count);
1919
Steven Rostedte77405a2009-09-02 14:17:06 -04001920void
1921ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001922{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001923 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001924 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001925 struct userstack_entry *entry;
1926 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001927
1928 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1929 return;
1930
Steven Rostedtb6345872010-03-12 20:03:30 -05001931 /*
1932 * NMIs can not handle page faults, even with fix ups.
1933 * The save user stack can (and often does) fault.
1934 */
1935 if (unlikely(in_nmi()))
1936 return;
1937
Steven Rostedt91e86e52010-11-10 12:56:12 +01001938 /*
1939 * prevent recursion, since the user stack tracing may
1940 * trigger other kernel events.
1941 */
1942 preempt_disable();
1943 if (__this_cpu_read(user_stack_count))
1944 goto out;
1945
1946 __this_cpu_inc(user_stack_count);
1947
Steven Rostedte77405a2009-09-02 14:17:06 -04001948 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001949 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001950 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001951 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001952 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001953
Steven Rostedt48659d32009-09-11 11:36:23 -04001954 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001955 memset(&entry->caller, 0, sizeof(entry->caller));
1956
1957 trace.nr_entries = 0;
1958 trace.max_entries = FTRACE_STACK_ENTRIES;
1959 trace.skip = 0;
1960 trace.entries = entry->caller;
1961
1962 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001963 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001964 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001965
Li Zefan1dbd1952010-12-09 15:47:56 +08001966 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001967 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001968 out:
1969 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001970}
1971
Hannes Eder4fd27352009-02-10 19:44:12 +01001972#ifdef UNUSED
1973static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001974{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001975 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001976}
Hannes Eder4fd27352009-02-10 19:44:12 +01001977#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001978
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001979#endif /* CONFIG_STACKTRACE */
1980
Steven Rostedt07d777f2011-09-22 14:01:55 -04001981/* created for use with alloc_percpu */
1982struct trace_buffer_struct {
1983 char buffer[TRACE_BUF_SIZE];
1984};
1985
1986static struct trace_buffer_struct *trace_percpu_buffer;
1987static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1988static struct trace_buffer_struct *trace_percpu_irq_buffer;
1989static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1990
1991/*
1992 * The buffer used is dependent on the context. There is a per cpu
1993 * buffer for normal context, softirq contex, hard irq context and
1994 * for NMI context. Thise allows for lockless recording.
1995 *
1996 * Note, if the buffers failed to be allocated, then this returns NULL
1997 */
1998static char *get_trace_buf(void)
1999{
2000 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002001
2002 /*
2003 * If we have allocated per cpu buffers, then we do not
2004 * need to do any locking.
2005 */
2006 if (in_nmi())
2007 percpu_buffer = trace_percpu_nmi_buffer;
2008 else if (in_irq())
2009 percpu_buffer = trace_percpu_irq_buffer;
2010 else if (in_softirq())
2011 percpu_buffer = trace_percpu_sirq_buffer;
2012 else
2013 percpu_buffer = trace_percpu_buffer;
2014
2015 if (!percpu_buffer)
2016 return NULL;
2017
Shan Weid8a03492012-11-13 09:53:04 +08002018 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002019}
2020
2021static int alloc_percpu_trace_buffer(void)
2022{
2023 struct trace_buffer_struct *buffers;
2024 struct trace_buffer_struct *sirq_buffers;
2025 struct trace_buffer_struct *irq_buffers;
2026 struct trace_buffer_struct *nmi_buffers;
2027
2028 buffers = alloc_percpu(struct trace_buffer_struct);
2029 if (!buffers)
2030 goto err_warn;
2031
2032 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2033 if (!sirq_buffers)
2034 goto err_sirq;
2035
2036 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2037 if (!irq_buffers)
2038 goto err_irq;
2039
2040 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2041 if (!nmi_buffers)
2042 goto err_nmi;
2043
2044 trace_percpu_buffer = buffers;
2045 trace_percpu_sirq_buffer = sirq_buffers;
2046 trace_percpu_irq_buffer = irq_buffers;
2047 trace_percpu_nmi_buffer = nmi_buffers;
2048
2049 return 0;
2050
2051 err_nmi:
2052 free_percpu(irq_buffers);
2053 err_irq:
2054 free_percpu(sirq_buffers);
2055 err_sirq:
2056 free_percpu(buffers);
2057 err_warn:
2058 WARN(1, "Could not allocate percpu trace_printk buffer");
2059 return -ENOMEM;
2060}
2061
Steven Rostedt81698832012-10-11 10:15:05 -04002062static int buffers_allocated;
2063
Steven Rostedt07d777f2011-09-22 14:01:55 -04002064void trace_printk_init_buffers(void)
2065{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002066 if (buffers_allocated)
2067 return;
2068
2069 if (alloc_percpu_trace_buffer())
2070 return;
2071
Steven Rostedt2184db42014-05-28 13:14:40 -04002072 /* trace_printk() is for debug use only. Don't use it in production. */
2073
Borislav Petkov69a1c992015-01-27 17:17:20 +01002074 pr_warning("\n");
2075 pr_warning("**********************************************************\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002076 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2077 pr_warning("** **\n");
2078 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2079 pr_warning("** **\n");
2080 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
Frans Klavereff264e2014-11-07 15:53:44 +01002081 pr_warning("** unsafe for production use. **\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002082 pr_warning("** **\n");
2083 pr_warning("** If you see this message and you are not debugging **\n");
2084 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2085 pr_warning("** **\n");
2086 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2087 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002088
Steven Rostedtb382ede62012-10-10 21:44:34 -04002089 /* Expand the buffers to set size */
2090 tracing_update_buffers();
2091
Steven Rostedt07d777f2011-09-22 14:01:55 -04002092 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002093
2094 /*
2095 * trace_printk_init_buffers() can be called by modules.
2096 * If that happens, then we need to start cmdline recording
2097 * directly here. If the global_trace.buffer is already
2098 * allocated here, then this was called by module code.
2099 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002100 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002101 tracing_start_cmdline_record();
2102}
2103
2104void trace_printk_start_comm(void)
2105{
2106 /* Start tracing comms if trace printk is set */
2107 if (!buffers_allocated)
2108 return;
2109 tracing_start_cmdline_record();
2110}
2111
2112static void trace_printk_start_stop_comm(int enabled)
2113{
2114 if (!buffers_allocated)
2115 return;
2116
2117 if (enabled)
2118 tracing_start_cmdline_record();
2119 else
2120 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002121}
2122
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002123/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002124 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002125 *
2126 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002127int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002128{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002129 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002130 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002131 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002132 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002133 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002134 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002135 char *tbuffer;
2136 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002137
2138 if (unlikely(tracing_selftest_running || tracing_disabled))
2139 return 0;
2140
2141 /* Don't pollute graph traces with trace_vprintk internals */
2142 pause_graph_tracing();
2143
2144 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002145 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002146
Steven Rostedt07d777f2011-09-22 14:01:55 -04002147 tbuffer = get_trace_buf();
2148 if (!tbuffer) {
2149 len = 0;
2150 goto out;
2151 }
2152
2153 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2154
2155 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002156 goto out;
2157
Steven Rostedt07d777f2011-09-22 14:01:55 -04002158 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002159 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002160 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002161 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2162 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002163 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002164 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002165 entry = ring_buffer_event_data(event);
2166 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002167 entry->fmt = fmt;
2168
Steven Rostedt07d777f2011-09-22 14:01:55 -04002169 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002170 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002171 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002172 ftrace_trace_stack(buffer, flags, 6, pc);
2173 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002174
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002175out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002176 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002177 unpause_graph_tracing();
2178
2179 return len;
2180}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002181EXPORT_SYMBOL_GPL(trace_vbprintk);
2182
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002183static int
2184__trace_array_vprintk(struct ring_buffer *buffer,
2185 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002186{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002187 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002188 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002189 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002190 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002191 unsigned long flags;
2192 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002193
2194 if (tracing_disabled || tracing_selftest_running)
2195 return 0;
2196
Steven Rostedt07d777f2011-09-22 14:01:55 -04002197 /* Don't pollute graph traces with trace_vprintk internals */
2198 pause_graph_tracing();
2199
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002200 pc = preempt_count();
2201 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002202
Steven Rostedt07d777f2011-09-22 14:01:55 -04002203
2204 tbuffer = get_trace_buf();
2205 if (!tbuffer) {
2206 len = 0;
2207 goto out;
2208 }
2209
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002210 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002211
Steven Rostedt07d777f2011-09-22 14:01:55 -04002212 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002213 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002214 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002215 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002216 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002217 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002218 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002219 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002220
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002221 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002222 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002223 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002224 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002225 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002226 out:
2227 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002228 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002229
2230 return len;
2231}
Steven Rostedt659372d2009-09-03 19:11:07 -04002232
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002233int trace_array_vprintk(struct trace_array *tr,
2234 unsigned long ip, const char *fmt, va_list args)
2235{
2236 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2237}
2238
2239int trace_array_printk(struct trace_array *tr,
2240 unsigned long ip, const char *fmt, ...)
2241{
2242 int ret;
2243 va_list ap;
2244
2245 if (!(trace_flags & TRACE_ITER_PRINTK))
2246 return 0;
2247
2248 va_start(ap, fmt);
2249 ret = trace_array_vprintk(tr, ip, fmt, ap);
2250 va_end(ap);
2251 return ret;
2252}
2253
2254int trace_array_printk_buf(struct ring_buffer *buffer,
2255 unsigned long ip, const char *fmt, ...)
2256{
2257 int ret;
2258 va_list ap;
2259
2260 if (!(trace_flags & TRACE_ITER_PRINTK))
2261 return 0;
2262
2263 va_start(ap, fmt);
2264 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2265 va_end(ap);
2266 return ret;
2267}
2268
Steven Rostedt659372d2009-09-03 19:11:07 -04002269int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2270{
Steven Rostedta813a152009-10-09 01:41:35 -04002271 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002272}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002273EXPORT_SYMBOL_GPL(trace_vprintk);
2274
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002275static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002276{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002277 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2278
Steven Rostedt5a90f572008-09-03 17:42:51 -04002279 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002280 if (buf_iter)
2281 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002282}
2283
Ingo Molnare309b412008-05-12 21:20:51 +02002284static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002285peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2286 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002287{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002288 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002289 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002290
Steven Rostedtd7690412008-10-01 00:29:53 -04002291 if (buf_iter)
2292 event = ring_buffer_iter_peek(buf_iter, ts);
2293 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002294 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002295 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002296
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002297 if (event) {
2298 iter->ent_size = ring_buffer_event_length(event);
2299 return ring_buffer_event_data(event);
2300 }
2301 iter->ent_size = 0;
2302 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002303}
Steven Rostedtd7690412008-10-01 00:29:53 -04002304
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002305static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002306__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2307 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002308{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002309 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002310 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002311 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002312 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002313 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002314 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002315 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002316 int cpu;
2317
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002318 /*
2319 * If we are in a per_cpu trace file, don't bother by iterating over
2320 * all cpu and peek directly.
2321 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002322 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002323 if (ring_buffer_empty_cpu(buffer, cpu_file))
2324 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002325 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002326 if (ent_cpu)
2327 *ent_cpu = cpu_file;
2328
2329 return ent;
2330 }
2331
Steven Rostedtab464282008-05-12 21:21:00 +02002332 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002333
2334 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002335 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002336
Steven Rostedtbc21b472010-03-31 19:49:26 -04002337 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002338
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002339 /*
2340 * Pick the entry with the smallest timestamp:
2341 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002342 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002343 next = ent;
2344 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002345 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002346 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002347 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002348 }
2349 }
2350
Steven Rostedt12b5da32012-03-27 10:43:28 -04002351 iter->ent_size = next_size;
2352
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002353 if (ent_cpu)
2354 *ent_cpu = next_cpu;
2355
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002356 if (ent_ts)
2357 *ent_ts = next_ts;
2358
Steven Rostedtbc21b472010-03-31 19:49:26 -04002359 if (missing_events)
2360 *missing_events = next_lost;
2361
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002362 return next;
2363}
2364
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002365/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002366struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2367 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002368{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002369 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002370}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002371
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002372/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002373void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002374{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002375 iter->ent = __find_next_entry(iter, &iter->cpu,
2376 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002377
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002378 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002379 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002380
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002381 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002382}
2383
Ingo Molnare309b412008-05-12 21:20:51 +02002384static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002385{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002386 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002387 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002388}
2389
Ingo Molnare309b412008-05-12 21:20:51 +02002390static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002391{
2392 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002393 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002394 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002395
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002396 WARN_ON_ONCE(iter->leftover);
2397
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002398 (*pos)++;
2399
2400 /* can't go backwards */
2401 if (iter->idx > i)
2402 return NULL;
2403
2404 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002405 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002406 else
2407 ent = iter;
2408
2409 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002410 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002411
2412 iter->pos = *pos;
2413
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002414 return ent;
2415}
2416
Jason Wessel955b61e2010-08-05 09:22:23 -05002417void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002418{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002419 struct ring_buffer_event *event;
2420 struct ring_buffer_iter *buf_iter;
2421 unsigned long entries = 0;
2422 u64 ts;
2423
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002424 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002425
Steven Rostedt6d158a82012-06-27 20:46:14 -04002426 buf_iter = trace_buffer_iter(iter, cpu);
2427 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002428 return;
2429
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002430 ring_buffer_iter_reset(buf_iter);
2431
2432 /*
2433 * We could have the case with the max latency tracers
2434 * that a reset never took place on a cpu. This is evident
2435 * by the timestamp being before the start of the buffer.
2436 */
2437 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002438 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002439 break;
2440 entries++;
2441 ring_buffer_read(buf_iter, NULL);
2442 }
2443
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002444 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002445}
2446
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002447/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002448 * The current tracer is copied to avoid a global locking
2449 * all around.
2450 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002451static void *s_start(struct seq_file *m, loff_t *pos)
2452{
2453 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002454 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002455 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002456 void *p = NULL;
2457 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002458 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002459
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002460 /*
2461 * copy the tracer to avoid using a global lock all around.
2462 * iter->trace is a copy of current_trace, the pointer to the
2463 * name may be used instead of a strcmp(), as iter->trace->name
2464 * will point to the same string as current_trace->name.
2465 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002466 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002467 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2468 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002469 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002470
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002471#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002472 if (iter->snapshot && iter->trace->use_max_tr)
2473 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002474#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002475
2476 if (!iter->snapshot)
2477 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002478
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002479 if (*pos != iter->pos) {
2480 iter->ent = NULL;
2481 iter->cpu = 0;
2482 iter->idx = -1;
2483
Steven Rostedtae3b5092013-01-23 15:22:59 -05002484 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002485 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002486 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002487 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002488 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002489
Lai Jiangshanac91d852010-03-02 17:54:50 +08002490 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002491 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2492 ;
2493
2494 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002495 /*
2496 * If we overflowed the seq_file before, then we want
2497 * to just reuse the trace_seq buffer again.
2498 */
2499 if (iter->leftover)
2500 p = iter;
2501 else {
2502 l = *pos - 1;
2503 p = s_next(m, p, &l);
2504 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002505 }
2506
Lai Jiangshan4f535962009-05-18 19:35:34 +08002507 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002508 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002509 return p;
2510}
2511
2512static void s_stop(struct seq_file *m, void *p)
2513{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002514 struct trace_iterator *iter = m->private;
2515
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002516#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002517 if (iter->snapshot && iter->trace->use_max_tr)
2518 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002519#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002520
2521 if (!iter->snapshot)
2522 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002523
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002524 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002525 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002526}
2527
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002528static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002529get_total_entries(struct trace_buffer *buf,
2530 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002531{
2532 unsigned long count;
2533 int cpu;
2534
2535 *total = 0;
2536 *entries = 0;
2537
2538 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002539 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002540 /*
2541 * If this buffer has skipped entries, then we hold all
2542 * entries for the trace and we need to ignore the
2543 * ones before the time stamp.
2544 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002545 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2546 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002547 /* total is the same as the entries */
2548 *total += count;
2549 } else
2550 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002551 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002552 *entries += count;
2553 }
2554}
2555
Ingo Molnare309b412008-05-12 21:20:51 +02002556static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002557{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002558 seq_puts(m, "# _------=> CPU# \n"
2559 "# / _-----=> irqs-off \n"
2560 "# | / _----=> need-resched \n"
2561 "# || / _---=> hardirq/softirq \n"
2562 "# ||| / _--=> preempt-depth \n"
2563 "# |||| / delay \n"
2564 "# cmd pid ||||| time | caller \n"
2565 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002566}
2567
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002568static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002569{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002570 unsigned long total;
2571 unsigned long entries;
2572
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002573 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002574 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2575 entries, total, num_online_cpus());
2576 seq_puts(m, "#\n");
2577}
2578
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002579static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002580{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002581 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002582 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2583 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002584}
2585
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002586static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002587{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002588 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002589 seq_puts(m, "# _-----=> irqs-off\n"
2590 "# / _----=> need-resched\n"
2591 "# | / _---=> hardirq/softirq\n"
2592 "# || / _--=> preempt-depth\n"
2593 "# ||| / delay\n"
2594 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2595 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002596}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002597
Jiri Olsa62b915f2010-04-02 19:01:22 +02002598void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002599print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2600{
2601 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002602 struct trace_buffer *buf = iter->trace_buffer;
2603 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002604 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002605 unsigned long entries;
2606 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002607 const char *name = "preemption";
2608
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002609 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002610
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002611 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002612
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002613 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002614 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002615 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002616 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002617 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002618 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002619 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002620 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002621 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002622 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002623#if defined(CONFIG_PREEMPT_NONE)
2624 "server",
2625#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2626 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002627#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002628 "preempt",
2629#else
2630 "unknown",
2631#endif
2632 /* These are reserved for later use */
2633 0, 0, 0, 0);
2634#ifdef CONFIG_SMP
2635 seq_printf(m, " #P:%d)\n", num_online_cpus());
2636#else
2637 seq_puts(m, ")\n");
2638#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002639 seq_puts(m, "# -----------------\n");
2640 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002641 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002642 data->comm, data->pid,
2643 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002644 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002645 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002646
2647 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002648 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002649 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2650 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002651 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002652 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2653 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002654 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002655 }
2656
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002657 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002658}
2659
Steven Rostedta3097202008-11-07 22:36:02 -05002660static void test_cpu_buff_start(struct trace_iterator *iter)
2661{
2662 struct trace_seq *s = &iter->seq;
2663
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002664 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2665 return;
2666
2667 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2668 return;
2669
Rusty Russell44623442009-01-01 10:12:23 +10302670 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002671 return;
2672
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002673 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002674 return;
2675
Rusty Russell44623442009-01-01 10:12:23 +10302676 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002677
2678 /* Don't print started cpu buffer for the first entry of the trace */
2679 if (iter->idx > 1)
2680 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2681 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002682}
2683
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002684static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002685{
Steven Rostedt214023c2008-05-12 21:20:46 +02002686 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002687 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002688 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002689 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002690
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002691 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002692
Steven Rostedta3097202008-11-07 22:36:02 -05002693 test_cpu_buff_start(iter);
2694
Steven Rostedtf633cef2008-12-23 23:24:13 -05002695 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002696
2697 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002698 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2699 trace_print_lat_context(iter);
2700 else
2701 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002702 }
2703
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002704 if (trace_seq_has_overflowed(s))
2705 return TRACE_TYPE_PARTIAL_LINE;
2706
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002707 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002708 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002709
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002710 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002711
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002712 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002713}
2714
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002715static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002716{
2717 struct trace_seq *s = &iter->seq;
2718 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002719 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002720
2721 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002722
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002723 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2724 trace_seq_printf(s, "%d %d %llu ",
2725 entry->pid, iter->cpu, iter->ts);
2726
2727 if (trace_seq_has_overflowed(s))
2728 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002729
Steven Rostedtf633cef2008-12-23 23:24:13 -05002730 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002731 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002732 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002733
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002734 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002735
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002736 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002737}
2738
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002739static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002740{
2741 struct trace_seq *s = &iter->seq;
2742 unsigned char newline = '\n';
2743 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002744 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002745
2746 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002747
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002748 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002749 SEQ_PUT_HEX_FIELD(s, entry->pid);
2750 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2751 SEQ_PUT_HEX_FIELD(s, iter->ts);
2752 if (trace_seq_has_overflowed(s))
2753 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002754 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002755
Steven Rostedtf633cef2008-12-23 23:24:13 -05002756 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002757 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002758 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002759 if (ret != TRACE_TYPE_HANDLED)
2760 return ret;
2761 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002762
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002763 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002764
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002765 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002766}
2767
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002768static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002769{
2770 struct trace_seq *s = &iter->seq;
2771 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002772 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002773
2774 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002775
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002776 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002777 SEQ_PUT_FIELD(s, entry->pid);
2778 SEQ_PUT_FIELD(s, iter->cpu);
2779 SEQ_PUT_FIELD(s, iter->ts);
2780 if (trace_seq_has_overflowed(s))
2781 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002782 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002783
Steven Rostedtf633cef2008-12-23 23:24:13 -05002784 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002785 return event ? event->funcs->binary(iter, 0, event) :
2786 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002787}
2788
Jiri Olsa62b915f2010-04-02 19:01:22 +02002789int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002790{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002791 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002792 int cpu;
2793
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002794 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002795 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002796 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002797 buf_iter = trace_buffer_iter(iter, cpu);
2798 if (buf_iter) {
2799 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002800 return 0;
2801 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002802 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002803 return 0;
2804 }
2805 return 1;
2806 }
2807
Steven Rostedtab464282008-05-12 21:21:00 +02002808 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002809 buf_iter = trace_buffer_iter(iter, cpu);
2810 if (buf_iter) {
2811 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002812 return 0;
2813 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002814 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002815 return 0;
2816 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002817 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002818
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002819 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002820}
2821
Lai Jiangshan4f535962009-05-18 19:35:34 +08002822/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002823enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002824{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002825 enum print_line_t ret;
2826
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002827 if (iter->lost_events) {
2828 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2829 iter->cpu, iter->lost_events);
2830 if (trace_seq_has_overflowed(&iter->seq))
2831 return TRACE_TYPE_PARTIAL_LINE;
2832 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002833
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002834 if (iter->trace && iter->trace->print_line) {
2835 ret = iter->trace->print_line(iter);
2836 if (ret != TRACE_TYPE_UNHANDLED)
2837 return ret;
2838 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002839
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002840 if (iter->ent->type == TRACE_BPUTS &&
2841 trace_flags & TRACE_ITER_PRINTK &&
2842 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2843 return trace_print_bputs_msg_only(iter);
2844
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002845 if (iter->ent->type == TRACE_BPRINT &&
2846 trace_flags & TRACE_ITER_PRINTK &&
2847 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002848 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002849
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002850 if (iter->ent->type == TRACE_PRINT &&
2851 trace_flags & TRACE_ITER_PRINTK &&
2852 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002853 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002854
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002855 if (trace_flags & TRACE_ITER_BIN)
2856 return print_bin_fmt(iter);
2857
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002858 if (trace_flags & TRACE_ITER_HEX)
2859 return print_hex_fmt(iter);
2860
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002861 if (trace_flags & TRACE_ITER_RAW)
2862 return print_raw_fmt(iter);
2863
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002864 return print_trace_fmt(iter);
2865}
2866
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002867void trace_latency_header(struct seq_file *m)
2868{
2869 struct trace_iterator *iter = m->private;
2870
2871 /* print nothing if the buffers are empty */
2872 if (trace_empty(iter))
2873 return;
2874
2875 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2876 print_trace_header(m, iter);
2877
2878 if (!(trace_flags & TRACE_ITER_VERBOSE))
2879 print_lat_help_header(m);
2880}
2881
Jiri Olsa62b915f2010-04-02 19:01:22 +02002882void trace_default_header(struct seq_file *m)
2883{
2884 struct trace_iterator *iter = m->private;
2885
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002886 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2887 return;
2888
Jiri Olsa62b915f2010-04-02 19:01:22 +02002889 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2890 /* print nothing if the buffers are empty */
2891 if (trace_empty(iter))
2892 return;
2893 print_trace_header(m, iter);
2894 if (!(trace_flags & TRACE_ITER_VERBOSE))
2895 print_lat_help_header(m);
2896 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002897 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2898 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002899 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002900 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002901 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002902 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002903 }
2904}
2905
Steven Rostedte0a413f2011-09-29 21:26:16 -04002906static void test_ftrace_alive(struct seq_file *m)
2907{
2908 if (!ftrace_is_dead())
2909 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002910 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2911 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002912}
2913
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002914#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002915static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002916{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002917 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2918 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2919 "# Takes a snapshot of the main buffer.\n"
2920 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2921 "# (Doesn't have to be '2' works with any number that\n"
2922 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002923}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002924
2925static void show_snapshot_percpu_help(struct seq_file *m)
2926{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002927 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002928#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002929 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2930 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002931#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002932 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2933 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002934#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002935 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2936 "# (Doesn't have to be '2' works with any number that\n"
2937 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002938}
2939
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002940static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2941{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002942 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002943 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002944 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002945 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002946
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002947 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002948 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2949 show_snapshot_main_help(m);
2950 else
2951 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002952}
2953#else
2954/* Should never be called */
2955static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2956#endif
2957
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002958static int s_show(struct seq_file *m, void *v)
2959{
2960 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002961 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002962
2963 if (iter->ent == NULL) {
2964 if (iter->tr) {
2965 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2966 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002967 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002968 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002969 if (iter->snapshot && trace_empty(iter))
2970 print_snapshot_help(m, iter);
2971 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002972 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002973 else
2974 trace_default_header(m);
2975
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002976 } else if (iter->leftover) {
2977 /*
2978 * If we filled the seq_file buffer earlier, we
2979 * want to just show it now.
2980 */
2981 ret = trace_print_seq(m, &iter->seq);
2982
2983 /* ret should this time be zero, but you never know */
2984 iter->leftover = ret;
2985
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002986 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002987 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002988 ret = trace_print_seq(m, &iter->seq);
2989 /*
2990 * If we overflow the seq_file buffer, then it will
2991 * ask us for this data again at start up.
2992 * Use that instead.
2993 * ret is 0 if seq_file write succeeded.
2994 * -1 otherwise.
2995 */
2996 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002997 }
2998
2999 return 0;
3000}
3001
Oleg Nesterov649e9c72013-07-23 17:25:54 +02003002/*
3003 * Should be used after trace_array_get(), trace_types_lock
3004 * ensures that i_cdev was already initialized.
3005 */
3006static inline int tracing_get_cpu(struct inode *inode)
3007{
3008 if (inode->i_cdev) /* See trace_create_cpu_file() */
3009 return (long)inode->i_cdev - 1;
3010 return RING_BUFFER_ALL_CPUS;
3011}
3012
James Morris88e9d342009-09-22 16:43:43 -07003013static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003014 .start = s_start,
3015 .next = s_next,
3016 .stop = s_stop,
3017 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003018};
3019
Ingo Molnare309b412008-05-12 21:20:51 +02003020static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003021__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003022{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003023 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003024 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003025 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003026
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003027 if (tracing_disabled)
3028 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003029
Jiri Olsa50e18b92012-04-25 10:23:39 +02003030 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003031 if (!iter)
3032 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003033
Gil Fruchter72917232015-06-09 10:32:35 +03003034 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003035 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003036 if (!iter->buffer_iter)
3037 goto release;
3038
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003039 /*
3040 * We make a copy of the current tracer to avoid concurrent
3041 * changes on it while we are reading.
3042 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003043 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003044 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003045 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003046 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003047
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003048 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003049
Li Zefan79f55992009-06-15 14:58:26 +08003050 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003051 goto fail;
3052
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003053 iter->tr = tr;
3054
3055#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003056 /* Currently only the top directory has a snapshot */
3057 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003058 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003059 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003060#endif
3061 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003062 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003063 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003064 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003065 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003066
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003067 /* Notify the tracer early; before we stop tracing. */
3068 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003069 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003070
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003071 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003072 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003073 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3074
David Sharp8be07092012-11-13 12:18:22 -08003075 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003076 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003077 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3078
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003079 /* stop the trace while dumping if we are not opening "snapshot" */
3080 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003081 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003082
Steven Rostedtae3b5092013-01-23 15:22:59 -05003083 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003084 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003085 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003086 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003087 }
3088 ring_buffer_read_prepare_sync();
3089 for_each_tracing_cpu(cpu) {
3090 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003091 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003092 }
3093 } else {
3094 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003095 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003096 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003097 ring_buffer_read_prepare_sync();
3098 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003099 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003100 }
3101
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003102 mutex_unlock(&trace_types_lock);
3103
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003104 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003105
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003106 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003107 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003108 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003109 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003110release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003111 seq_release_private(inode, file);
3112 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003113}
3114
3115int tracing_open_generic(struct inode *inode, struct file *filp)
3116{
Steven Rostedt60a11772008-05-12 21:20:44 +02003117 if (tracing_disabled)
3118 return -ENODEV;
3119
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003120 filp->private_data = inode->i_private;
3121 return 0;
3122}
3123
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003124bool tracing_is_disabled(void)
3125{
3126 return (tracing_disabled) ? true: false;
3127}
3128
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003129/*
3130 * Open and update trace_array ref count.
3131 * Must have the current trace_array passed to it.
3132 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003133static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003134{
3135 struct trace_array *tr = inode->i_private;
3136
3137 if (tracing_disabled)
3138 return -ENODEV;
3139
3140 if (trace_array_get(tr) < 0)
3141 return -ENODEV;
3142
3143 filp->private_data = inode->i_private;
3144
3145 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003146}
3147
Hannes Eder4fd27352009-02-10 19:44:12 +01003148static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003149{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003150 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003151 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003152 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003153 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003154
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003155 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003156 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003157 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003158 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003159
Oleg Nesterov6484c712013-07-23 17:26:10 +02003160 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003161 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003162 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003163
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003164 for_each_tracing_cpu(cpu) {
3165 if (iter->buffer_iter[cpu])
3166 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3167 }
3168
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003169 if (iter->trace && iter->trace->close)
3170 iter->trace->close(iter);
3171
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003172 if (!iter->snapshot)
3173 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003174 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003175
3176 __trace_array_put(tr);
3177
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003178 mutex_unlock(&trace_types_lock);
3179
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003180 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003181 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003182 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003183 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003184 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003185
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003186 return 0;
3187}
3188
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003189static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3190{
3191 struct trace_array *tr = inode->i_private;
3192
3193 trace_array_put(tr);
3194 return 0;
3195}
3196
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003197static int tracing_single_release_tr(struct inode *inode, struct file *file)
3198{
3199 struct trace_array *tr = inode->i_private;
3200
3201 trace_array_put(tr);
3202
3203 return single_release(inode, file);
3204}
3205
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003206static int tracing_open(struct inode *inode, struct file *file)
3207{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003208 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003209 struct trace_iterator *iter;
3210 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003211
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003212 if (trace_array_get(tr) < 0)
3213 return -ENODEV;
3214
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003215 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003216 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3217 int cpu = tracing_get_cpu(inode);
3218
3219 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003220 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003221 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003222 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003223 }
3224
3225 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003226 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003227 if (IS_ERR(iter))
3228 ret = PTR_ERR(iter);
3229 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3230 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3231 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003232
3233 if (ret < 0)
3234 trace_array_put(tr);
3235
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003236 return ret;
3237}
3238
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003239/*
3240 * Some tracers are not suitable for instance buffers.
3241 * A tracer is always available for the global array (toplevel)
3242 * or if it explicitly states that it is.
3243 */
3244static bool
3245trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3246{
3247 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3248}
3249
3250/* Find the next tracer that this trace array may use */
3251static struct tracer *
3252get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3253{
3254 while (t && !trace_ok_for_array(t, tr))
3255 t = t->next;
3256
3257 return t;
3258}
3259
Ingo Molnare309b412008-05-12 21:20:51 +02003260static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003261t_next(struct seq_file *m, void *v, loff_t *pos)
3262{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003263 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003264 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003265
3266 (*pos)++;
3267
3268 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003269 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003270
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003271 return t;
3272}
3273
3274static void *t_start(struct seq_file *m, loff_t *pos)
3275{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003276 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003277 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003278 loff_t l = 0;
3279
3280 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003281
3282 t = get_tracer_for_array(tr, trace_types);
3283 for (; t && l < *pos; t = t_next(m, t, &l))
3284 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003285
3286 return t;
3287}
3288
3289static void t_stop(struct seq_file *m, void *p)
3290{
3291 mutex_unlock(&trace_types_lock);
3292}
3293
3294static int t_show(struct seq_file *m, void *v)
3295{
3296 struct tracer *t = v;
3297
3298 if (!t)
3299 return 0;
3300
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003301 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003302 if (t->next)
3303 seq_putc(m, ' ');
3304 else
3305 seq_putc(m, '\n');
3306
3307 return 0;
3308}
3309
James Morris88e9d342009-09-22 16:43:43 -07003310static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003311 .start = t_start,
3312 .next = t_next,
3313 .stop = t_stop,
3314 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003315};
3316
3317static int show_traces_open(struct inode *inode, struct file *file)
3318{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003319 struct trace_array *tr = inode->i_private;
3320 struct seq_file *m;
3321 int ret;
3322
Steven Rostedt60a11772008-05-12 21:20:44 +02003323 if (tracing_disabled)
3324 return -ENODEV;
3325
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003326 ret = seq_open(file, &show_traces_seq_ops);
3327 if (ret)
3328 return ret;
3329
3330 m = file->private_data;
3331 m->private = tr;
3332
3333 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003334}
3335
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003336static ssize_t
3337tracing_write_stub(struct file *filp, const char __user *ubuf,
3338 size_t count, loff_t *ppos)
3339{
3340 return count;
3341}
3342
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003343loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003344{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003345 int ret;
3346
Slava Pestov364829b2010-11-24 15:13:16 -08003347 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003348 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003349 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003350 file->f_pos = ret = 0;
3351
3352 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003353}
3354
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003355static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003356 .open = tracing_open,
3357 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003358 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003359 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003360 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003361};
3362
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003363static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003364 .open = show_traces_open,
3365 .read = seq_read,
3366 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003367 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003368};
3369
Ingo Molnar36dfe922008-05-12 21:20:52 +02003370/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003371 * The tracer itself will not take this lock, but still we want
3372 * to provide a consistent cpumask to user-space:
3373 */
3374static DEFINE_MUTEX(tracing_cpumask_update_lock);
3375
3376/*
3377 * Temporary storage for the character representation of the
3378 * CPU bitmask (and one more byte for the newline):
3379 */
3380static char mask_str[NR_CPUS + 1];
3381
Ingo Molnarc7078de2008-05-12 21:20:52 +02003382static ssize_t
3383tracing_cpumask_read(struct file *filp, char __user *ubuf,
3384 size_t count, loff_t *ppos)
3385{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003386 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003387 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003388
3389 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003390
Tejun Heo1a402432015-02-13 14:37:39 -08003391 len = snprintf(mask_str, count, "%*pb\n",
3392 cpumask_pr_args(tr->tracing_cpumask));
3393 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003394 count = -EINVAL;
3395 goto out_err;
3396 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02003397 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3398
3399out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003400 mutex_unlock(&tracing_cpumask_update_lock);
3401
3402 return count;
3403}
3404
3405static ssize_t
3406tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3407 size_t count, loff_t *ppos)
3408{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003409 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303410 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003411 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303412
3413 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3414 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003415
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303416 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003417 if (err)
3418 goto err_unlock;
3419
Li Zefan215368e2009-06-15 10:56:42 +08003420 mutex_lock(&tracing_cpumask_update_lock);
3421
Steven Rostedta5e25882008-12-02 15:34:05 -05003422 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003423 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003424 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003425 /*
3426 * Increase/decrease the disabled counter if we are
3427 * about to flip a bit in the cpumask:
3428 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003429 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303430 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003431 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3432 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003433 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003434 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303435 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003436 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3437 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003438 }
3439 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003440 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003441 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003442
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003443 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003444
Ingo Molnarc7078de2008-05-12 21:20:52 +02003445 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303446 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003447
Ingo Molnarc7078de2008-05-12 21:20:52 +02003448 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003449
3450err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003451 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003452
3453 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003454}
3455
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003456static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003457 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003458 .read = tracing_cpumask_read,
3459 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003460 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003461 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003462};
3463
Li Zefanfdb372e2009-12-08 11:15:59 +08003464static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003465{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003466 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003467 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003468 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003469 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003470
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003471 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003472 tracer_flags = tr->current_trace->flags->val;
3473 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003474
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003475 for (i = 0; trace_options[i]; i++) {
3476 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003477 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003478 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003479 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003480 }
3481
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003482 for (i = 0; trace_opts[i].name; i++) {
3483 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003484 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003485 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003486 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003487 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003488 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003489
Li Zefanfdb372e2009-12-08 11:15:59 +08003490 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003491}
3492
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003493static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003494 struct tracer_flags *tracer_flags,
3495 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003496{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003497 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003498 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003499
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003500 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003501 if (ret)
3502 return ret;
3503
3504 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003505 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003506 else
Zhaolei77708412009-08-07 18:53:21 +08003507 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003508 return 0;
3509}
3510
Li Zefan8d18eaa2009-12-08 11:17:06 +08003511/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003512static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003513{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003514 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003515 struct tracer_flags *tracer_flags = trace->flags;
3516 struct tracer_opt *opts = NULL;
3517 int i;
3518
3519 for (i = 0; tracer_flags->opts[i].name; i++) {
3520 opts = &tracer_flags->opts[i];
3521
3522 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003523 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003524 }
3525
3526 return -EINVAL;
3527}
3528
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003529/* Some tracers require overwrite to stay enabled */
3530int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3531{
3532 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3533 return -1;
3534
3535 return 0;
3536}
3537
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003538int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003539{
3540 /* do nothing if flag is already set */
3541 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003542 return 0;
3543
3544 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003545 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003546 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003547 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003548
3549 if (enabled)
3550 trace_flags |= mask;
3551 else
3552 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003553
3554 if (mask == TRACE_ITER_RECORD_CMD)
3555 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003556
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003557 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003558 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003559#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003560 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003561#endif
3562 }
Steven Rostedt81698832012-10-11 10:15:05 -04003563
3564 if (mask == TRACE_ITER_PRINTK)
3565 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003566
3567 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003568}
3569
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003570static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003571{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003572 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003573 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003574 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003575 int i;
3576
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003577 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003578
Li Zefan8d18eaa2009-12-08 11:17:06 +08003579 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003580 neg = 1;
3581 cmp += 2;
3582 }
3583
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003584 mutex_lock(&trace_types_lock);
3585
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003586 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003587 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003588 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003589 break;
3590 }
3591 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003592
3593 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003594 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003595 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003596
3597 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003598
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003599 return ret;
3600}
3601
3602static ssize_t
3603tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3604 size_t cnt, loff_t *ppos)
3605{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003606 struct seq_file *m = filp->private_data;
3607 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003608 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003609 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003610
3611 if (cnt >= sizeof(buf))
3612 return -EINVAL;
3613
3614 if (copy_from_user(&buf, ubuf, cnt))
3615 return -EFAULT;
3616
Steven Rostedta8dd2172013-01-09 20:54:17 -05003617 buf[cnt] = 0;
3618
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003619 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003620 if (ret < 0)
3621 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003622
Jiri Olsacf8517c2009-10-23 19:36:16 -04003623 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003624
3625 return cnt;
3626}
3627
Li Zefanfdb372e2009-12-08 11:15:59 +08003628static int tracing_trace_options_open(struct inode *inode, struct file *file)
3629{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003630 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003631 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003632
Li Zefanfdb372e2009-12-08 11:15:59 +08003633 if (tracing_disabled)
3634 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003635
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003636 if (trace_array_get(tr) < 0)
3637 return -ENODEV;
3638
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003639 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3640 if (ret < 0)
3641 trace_array_put(tr);
3642
3643 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003644}
3645
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003646static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003647 .open = tracing_trace_options_open,
3648 .read = seq_read,
3649 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003650 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003651 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003652};
3653
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003654static const char readme_msg[] =
3655 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003656 "# echo 0 > tracing_on : quick way to disable tracing\n"
3657 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3658 " Important files:\n"
3659 " trace\t\t\t- The static contents of the buffer\n"
3660 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3661 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3662 " current_tracer\t- function and latency tracers\n"
3663 " available_tracers\t- list of configured tracers for current_tracer\n"
3664 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3665 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3666 " trace_clock\t\t-change the clock used to order events\n"
3667 " local: Per cpu clock but may not be synced across CPUs\n"
3668 " global: Synced across CPUs but slows tracing down.\n"
3669 " counter: Not a clock, but just an increment\n"
3670 " uptime: Jiffy counter from time of boot\n"
3671 " perf: Same clock that perf events use\n"
3672#ifdef CONFIG_X86_64
3673 " x86-tsc: TSC cycle counter\n"
3674#endif
3675 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3676 " tracing_cpumask\t- Limit which CPUs to trace\n"
3677 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3678 "\t\t\t Remove sub-buffer with rmdir\n"
3679 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003680 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3681 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003682 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003683#ifdef CONFIG_DYNAMIC_FTRACE
3684 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003685 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3686 "\t\t\t functions\n"
3687 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3688 "\t modules: Can select a group via module\n"
3689 "\t Format: :mod:<module-name>\n"
3690 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3691 "\t triggers: a command to perform when function is hit\n"
3692 "\t Format: <function>:<trigger>[:count]\n"
3693 "\t trigger: traceon, traceoff\n"
3694 "\t\t enable_event:<system>:<event>\n"
3695 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003696#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003697 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003698#endif
3699#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003700 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003701#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003702 "\t\t dump\n"
3703 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003704 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3705 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3706 "\t The first one will disable tracing every time do_fault is hit\n"
3707 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3708 "\t The first time do trap is hit and it disables tracing, the\n"
3709 "\t counter will decrement to 2. If tracing is already disabled,\n"
3710 "\t the counter will not decrement. It only decrements when the\n"
3711 "\t trigger did work\n"
3712 "\t To remove trigger without count:\n"
3713 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3714 "\t To remove trigger with a count:\n"
3715 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003716 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003717 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3718 "\t modules: Can select a group via module command :mod:\n"
3719 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003720#endif /* CONFIG_DYNAMIC_FTRACE */
3721#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003722 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3723 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003724#endif
3725#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3726 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003727 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003728 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3729#endif
3730#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003731 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3732 "\t\t\t snapshot buffer. Read the contents for more\n"
3733 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003734#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003735#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003736 " stack_trace\t\t- Shows the max stack trace when active\n"
3737 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003738 "\t\t\t Write into this file to reset the max size (trigger a\n"
3739 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003740#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003741 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3742 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003743#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003744#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003745 " events/\t\t- Directory containing all trace event subsystems:\n"
3746 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3747 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003748 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3749 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003750 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003751 " events/<system>/<event>/\t- Directory containing control files for\n"
3752 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003753 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3754 " filter\t\t- If set, only events passing filter are traced\n"
3755 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003756 "\t Format: <trigger>[:count][if <filter>]\n"
3757 "\t trigger: traceon, traceoff\n"
3758 "\t enable_event:<system>:<event>\n"
3759 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003760#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003761 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003762#endif
3763#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003764 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003765#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003766 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3767 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3768 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3769 "\t events/block/block_unplug/trigger\n"
3770 "\t The first disables tracing every time block_unplug is hit.\n"
3771 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3772 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3773 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3774 "\t Like function triggers, the counter is only decremented if it\n"
3775 "\t enabled or disabled tracing.\n"
3776 "\t To remove a trigger without a count:\n"
3777 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3778 "\t To remove a trigger with a count:\n"
3779 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3780 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003781;
3782
3783static ssize_t
3784tracing_readme_read(struct file *filp, char __user *ubuf,
3785 size_t cnt, loff_t *ppos)
3786{
3787 return simple_read_from_buffer(ubuf, cnt, ppos,
3788 readme_msg, strlen(readme_msg));
3789}
3790
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003791static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003792 .open = tracing_open_generic,
3793 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003794 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003795};
3796
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003797static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003798{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003799 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003800
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003801 if (*pos || m->count)
3802 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003803
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003804 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003805
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003806 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3807 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003808 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003809 continue;
3810
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003811 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003812 }
3813
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003814 return NULL;
3815}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003816
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003817static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3818{
3819 void *v;
3820 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003821
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003822 preempt_disable();
3823 arch_spin_lock(&trace_cmdline_lock);
3824
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003825 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003826 while (l <= *pos) {
3827 v = saved_cmdlines_next(m, v, &l);
3828 if (!v)
3829 return NULL;
3830 }
3831
3832 return v;
3833}
3834
3835static void saved_cmdlines_stop(struct seq_file *m, void *v)
3836{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003837 arch_spin_unlock(&trace_cmdline_lock);
3838 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003839}
3840
3841static int saved_cmdlines_show(struct seq_file *m, void *v)
3842{
3843 char buf[TASK_COMM_LEN];
3844 unsigned int *pid = v;
3845
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003846 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003847 seq_printf(m, "%d %s\n", *pid, buf);
3848 return 0;
3849}
3850
3851static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3852 .start = saved_cmdlines_start,
3853 .next = saved_cmdlines_next,
3854 .stop = saved_cmdlines_stop,
3855 .show = saved_cmdlines_show,
3856};
3857
3858static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3859{
3860 if (tracing_disabled)
3861 return -ENODEV;
3862
3863 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003864}
3865
3866static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003867 .open = tracing_saved_cmdlines_open,
3868 .read = seq_read,
3869 .llseek = seq_lseek,
3870 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003871};
3872
3873static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003874tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3875 size_t cnt, loff_t *ppos)
3876{
3877 char buf[64];
3878 int r;
3879
3880 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003881 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003882 arch_spin_unlock(&trace_cmdline_lock);
3883
3884 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3885}
3886
3887static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3888{
3889 kfree(s->saved_cmdlines);
3890 kfree(s->map_cmdline_to_pid);
3891 kfree(s);
3892}
3893
3894static int tracing_resize_saved_cmdlines(unsigned int val)
3895{
3896 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3897
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003898 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003899 if (!s)
3900 return -ENOMEM;
3901
3902 if (allocate_cmdlines_buffer(val, s) < 0) {
3903 kfree(s);
3904 return -ENOMEM;
3905 }
3906
3907 arch_spin_lock(&trace_cmdline_lock);
3908 savedcmd_temp = savedcmd;
3909 savedcmd = s;
3910 arch_spin_unlock(&trace_cmdline_lock);
3911 free_saved_cmdlines_buffer(savedcmd_temp);
3912
3913 return 0;
3914}
3915
3916static ssize_t
3917tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3918 size_t cnt, loff_t *ppos)
3919{
3920 unsigned long val;
3921 int ret;
3922
3923 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3924 if (ret)
3925 return ret;
3926
3927 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3928 if (!val || val > PID_MAX_DEFAULT)
3929 return -EINVAL;
3930
3931 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3932 if (ret < 0)
3933 return ret;
3934
3935 *ppos += cnt;
3936
3937 return cnt;
3938}
3939
3940static const struct file_operations tracing_saved_cmdlines_size_fops = {
3941 .open = tracing_open_generic,
3942 .read = tracing_saved_cmdlines_size_read,
3943 .write = tracing_saved_cmdlines_size_write,
3944};
3945
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04003946#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3947static union trace_enum_map_item *
3948update_enum_map(union trace_enum_map_item *ptr)
3949{
3950 if (!ptr->map.enum_string) {
3951 if (ptr->tail.next) {
3952 ptr = ptr->tail.next;
3953 /* Set ptr to the next real item (skip head) */
3954 ptr++;
3955 } else
3956 return NULL;
3957 }
3958 return ptr;
3959}
3960
3961static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3962{
3963 union trace_enum_map_item *ptr = v;
3964
3965 /*
3966 * Paranoid! If ptr points to end, we don't want to increment past it.
3967 * This really should never happen.
3968 */
3969 ptr = update_enum_map(ptr);
3970 if (WARN_ON_ONCE(!ptr))
3971 return NULL;
3972
3973 ptr++;
3974
3975 (*pos)++;
3976
3977 ptr = update_enum_map(ptr);
3978
3979 return ptr;
3980}
3981
3982static void *enum_map_start(struct seq_file *m, loff_t *pos)
3983{
3984 union trace_enum_map_item *v;
3985 loff_t l = 0;
3986
3987 mutex_lock(&trace_enum_mutex);
3988
3989 v = trace_enum_maps;
3990 if (v)
3991 v++;
3992
3993 while (v && l < *pos) {
3994 v = enum_map_next(m, v, &l);
3995 }
3996
3997 return v;
3998}
3999
4000static void enum_map_stop(struct seq_file *m, void *v)
4001{
4002 mutex_unlock(&trace_enum_mutex);
4003}
4004
4005static int enum_map_show(struct seq_file *m, void *v)
4006{
4007 union trace_enum_map_item *ptr = v;
4008
4009 seq_printf(m, "%s %ld (%s)\n",
4010 ptr->map.enum_string, ptr->map.enum_value,
4011 ptr->map.system);
4012
4013 return 0;
4014}
4015
4016static const struct seq_operations tracing_enum_map_seq_ops = {
4017 .start = enum_map_start,
4018 .next = enum_map_next,
4019 .stop = enum_map_stop,
4020 .show = enum_map_show,
4021};
4022
4023static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4024{
4025 if (tracing_disabled)
4026 return -ENODEV;
4027
4028 return seq_open(filp, &tracing_enum_map_seq_ops);
4029}
4030
4031static const struct file_operations tracing_enum_map_fops = {
4032 .open = tracing_enum_map_open,
4033 .read = seq_read,
4034 .llseek = seq_lseek,
4035 .release = seq_release,
4036};
4037
4038static inline union trace_enum_map_item *
4039trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4040{
4041 /* Return tail of array given the head */
4042 return ptr + ptr->head.length + 1;
4043}
4044
4045static void
4046trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4047 int len)
4048{
4049 struct trace_enum_map **stop;
4050 struct trace_enum_map **map;
4051 union trace_enum_map_item *map_array;
4052 union trace_enum_map_item *ptr;
4053
4054 stop = start + len;
4055
4056 /*
4057 * The trace_enum_maps contains the map plus a head and tail item,
4058 * where the head holds the module and length of array, and the
4059 * tail holds a pointer to the next list.
4060 */
4061 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4062 if (!map_array) {
4063 pr_warning("Unable to allocate trace enum mapping\n");
4064 return;
4065 }
4066
4067 mutex_lock(&trace_enum_mutex);
4068
4069 if (!trace_enum_maps)
4070 trace_enum_maps = map_array;
4071 else {
4072 ptr = trace_enum_maps;
4073 for (;;) {
4074 ptr = trace_enum_jmp_to_tail(ptr);
4075 if (!ptr->tail.next)
4076 break;
4077 ptr = ptr->tail.next;
4078
4079 }
4080 ptr->tail.next = map_array;
4081 }
4082 map_array->head.mod = mod;
4083 map_array->head.length = len;
4084 map_array++;
4085
4086 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4087 map_array->map = **map;
4088 map_array++;
4089 }
4090 memset(map_array, 0, sizeof(*map_array));
4091
4092 mutex_unlock(&trace_enum_mutex);
4093}
4094
4095static void trace_create_enum_file(struct dentry *d_tracer)
4096{
4097 trace_create_file("enum_map", 0444, d_tracer,
4098 NULL, &tracing_enum_map_fops);
4099}
4100
4101#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4102static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4103static inline void trace_insert_enum_map_file(struct module *mod,
4104 struct trace_enum_map **start, int len) { }
4105#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4106
4107static void trace_insert_enum_map(struct module *mod,
4108 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004109{
4110 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004111
4112 if (len <= 0)
4113 return;
4114
4115 map = start;
4116
4117 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004118
4119 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004120}
4121
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004122static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004123tracing_set_trace_read(struct file *filp, char __user *ubuf,
4124 size_t cnt, loff_t *ppos)
4125{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004126 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004127 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004128 int r;
4129
4130 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004131 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004132 mutex_unlock(&trace_types_lock);
4133
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004134 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004135}
4136
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004137int tracer_init(struct tracer *t, struct trace_array *tr)
4138{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004139 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004140 return t->init(tr);
4141}
4142
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004143static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004144{
4145 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004146
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004147 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004148 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004149}
4150
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004151#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004152/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004153static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4154 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004155{
4156 int cpu, ret = 0;
4157
4158 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4159 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004160 ret = ring_buffer_resize(trace_buf->buffer,
4161 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004162 if (ret < 0)
4163 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004164 per_cpu_ptr(trace_buf->data, cpu)->entries =
4165 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004166 }
4167 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004168 ret = ring_buffer_resize(trace_buf->buffer,
4169 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004170 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004171 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4172 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004173 }
4174
4175 return ret;
4176}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004177#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004178
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004179static int __tracing_resize_ring_buffer(struct trace_array *tr,
4180 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004181{
4182 int ret;
4183
4184 /*
4185 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004186 * we use the size that was given, and we can forget about
4187 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004188 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004189 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004190
Steven Rostedtb382ede62012-10-10 21:44:34 -04004191 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004192 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004193 return 0;
4194
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004195 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004196 if (ret < 0)
4197 return ret;
4198
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004199#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004200 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4201 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004202 goto out;
4203
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004204 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004205 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004206 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4207 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004208 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004209 /*
4210 * AARGH! We are left with different
4211 * size max buffer!!!!
4212 * The max buffer is our "snapshot" buffer.
4213 * When a tracer needs a snapshot (one of the
4214 * latency tracers), it swaps the max buffer
4215 * with the saved snap shot. We succeeded to
4216 * update the size of the main buffer, but failed to
4217 * update the size of the max buffer. But when we tried
4218 * to reset the main buffer to the original size, we
4219 * failed there too. This is very unlikely to
4220 * happen, but if it does, warn and kill all
4221 * tracing.
4222 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004223 WARN_ON(1);
4224 tracing_disabled = 1;
4225 }
4226 return ret;
4227 }
4228
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004229 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004230 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004231 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004232 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004233
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004234 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004235#endif /* CONFIG_TRACER_MAX_TRACE */
4236
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004237 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004238 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004239 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004240 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004241
4242 return ret;
4243}
4244
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004245static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4246 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004247{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004248 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004249
4250 mutex_lock(&trace_types_lock);
4251
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004252 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4253 /* make sure, this cpu is enabled in the mask */
4254 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4255 ret = -EINVAL;
4256 goto out;
4257 }
4258 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004259
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004260 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004261 if (ret < 0)
4262 ret = -ENOMEM;
4263
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004264out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004265 mutex_unlock(&trace_types_lock);
4266
4267 return ret;
4268}
4269
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004270
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004271/**
4272 * tracing_update_buffers - used by tracing facility to expand ring buffers
4273 *
4274 * To save on memory when the tracing is never used on a system with it
4275 * configured in. The ring buffers are set to a minimum size. But once
4276 * a user starts to use the tracing facility, then they need to grow
4277 * to their default size.
4278 *
4279 * This function is to be called when a tracer is about to be used.
4280 */
4281int tracing_update_buffers(void)
4282{
4283 int ret = 0;
4284
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004285 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004286 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004287 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004288 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004289 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004290
4291 return ret;
4292}
4293
Steven Rostedt577b7852009-02-26 23:43:05 -05004294struct trace_option_dentry;
4295
4296static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004297create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004298
4299static void
4300destroy_trace_option_files(struct trace_option_dentry *topts);
4301
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004302/*
4303 * Used to clear out the tracer before deletion of an instance.
4304 * Must have trace_types_lock held.
4305 */
4306static void tracing_set_nop(struct trace_array *tr)
4307{
4308 if (tr->current_trace == &nop_trace)
4309 return;
4310
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004311 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004312
4313 if (tr->current_trace->reset)
4314 tr->current_trace->reset(tr);
4315
4316 tr->current_trace = &nop_trace;
4317}
4318
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004319static void update_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004320{
Steven Rostedt577b7852009-02-26 23:43:05 -05004321 static struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004322
4323 /* Only enable if the directory has been created already. */
4324 if (!tr->dir)
4325 return;
4326
4327 /* Currently, only the top instance has options */
4328 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
4329 return;
4330
4331 destroy_trace_option_files(topts);
4332 topts = create_trace_option_files(tr, t);
4333}
4334
4335static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4336{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004337 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004338#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004339 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004340#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004341 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004342
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004343 mutex_lock(&trace_types_lock);
4344
Steven Rostedt73c51622009-03-11 13:42:01 -04004345 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004346 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004347 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004348 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004349 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004350 ret = 0;
4351 }
4352
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004353 for (t = trace_types; t; t = t->next) {
4354 if (strcmp(t->name, buf) == 0)
4355 break;
4356 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004357 if (!t) {
4358 ret = -EINVAL;
4359 goto out;
4360 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004361 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004362 goto out;
4363
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004364 /* Some tracers are only allowed for the top level buffer */
4365 if (!trace_ok_for_array(t, tr)) {
4366 ret = -EINVAL;
4367 goto out;
4368 }
4369
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004370 /* If trace pipe files are being read, we can't change the tracer */
4371 if (tr->current_trace->ref) {
4372 ret = -EBUSY;
4373 goto out;
4374 }
4375
Steven Rostedt9f029e82008-11-12 15:24:24 -05004376 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004377
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004378 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004379
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004380 if (tr->current_trace->reset)
4381 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004382
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004383 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004384 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004385
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004386#ifdef CONFIG_TRACER_MAX_TRACE
4387 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004388
4389 if (had_max_tr && !t->use_max_tr) {
4390 /*
4391 * We need to make sure that the update_max_tr sees that
4392 * current_trace changed to nop_trace to keep it from
4393 * swapping the buffers after we resize it.
4394 * The update_max_tr is called from interrupts disabled
4395 * so a synchronized_sched() is sufficient.
4396 */
4397 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004398 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004399 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004400#endif
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004401 update_tracer_options(tr, t);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004402
4403#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004404 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004405 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004406 if (ret < 0)
4407 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004408 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004409#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004410
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004411 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004412 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004413 if (ret)
4414 goto out;
4415 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004416
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004417 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004418 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004419 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004420 out:
4421 mutex_unlock(&trace_types_lock);
4422
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004423 return ret;
4424}
4425
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004426static ssize_t
4427tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4428 size_t cnt, loff_t *ppos)
4429{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004430 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004431 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004432 int i;
4433 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004434 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004435
Steven Rostedt60063a62008-10-28 10:44:24 -04004436 ret = cnt;
4437
Li Zefanee6c2c12009-09-18 14:06:47 +08004438 if (cnt > MAX_TRACER_SIZE)
4439 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004440
4441 if (copy_from_user(&buf, ubuf, cnt))
4442 return -EFAULT;
4443
4444 buf[cnt] = 0;
4445
4446 /* strip ending whitespace. */
4447 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4448 buf[i] = 0;
4449
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004450 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004451 if (err)
4452 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004453
Jiri Olsacf8517c2009-10-23 19:36:16 -04004454 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004455
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004456 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004457}
4458
4459static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004460tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4461 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004462{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004463 char buf[64];
4464 int r;
4465
Steven Rostedtcffae432008-05-12 21:21:00 +02004466 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004467 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004468 if (r > sizeof(buf))
4469 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004470 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004471}
4472
4473static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004474tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4475 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004476{
Hannes Eder5e398412009-02-10 19:44:34 +01004477 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004478 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004479
Peter Huewe22fe9b52011-06-07 21:58:27 +02004480 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4481 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004482 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004483
4484 *ptr = val * 1000;
4485
4486 return cnt;
4487}
4488
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004489static ssize_t
4490tracing_thresh_read(struct file *filp, char __user *ubuf,
4491 size_t cnt, loff_t *ppos)
4492{
4493 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4494}
4495
4496static ssize_t
4497tracing_thresh_write(struct file *filp, const char __user *ubuf,
4498 size_t cnt, loff_t *ppos)
4499{
4500 struct trace_array *tr = filp->private_data;
4501 int ret;
4502
4503 mutex_lock(&trace_types_lock);
4504 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4505 if (ret < 0)
4506 goto out;
4507
4508 if (tr->current_trace->update_thresh) {
4509 ret = tr->current_trace->update_thresh(tr);
4510 if (ret < 0)
4511 goto out;
4512 }
4513
4514 ret = cnt;
4515out:
4516 mutex_unlock(&trace_types_lock);
4517
4518 return ret;
4519}
4520
4521static ssize_t
4522tracing_max_lat_read(struct file *filp, char __user *ubuf,
4523 size_t cnt, loff_t *ppos)
4524{
4525 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4526}
4527
4528static ssize_t
4529tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4530 size_t cnt, loff_t *ppos)
4531{
4532 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4533}
4534
Steven Rostedtb3806b42008-05-12 21:20:46 +02004535static int tracing_open_pipe(struct inode *inode, struct file *filp)
4536{
Oleg Nesterov15544202013-07-23 17:25:57 +02004537 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004538 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004539 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004540
4541 if (tracing_disabled)
4542 return -ENODEV;
4543
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004544 if (trace_array_get(tr) < 0)
4545 return -ENODEV;
4546
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004547 mutex_lock(&trace_types_lock);
4548
Steven Rostedtb3806b42008-05-12 21:20:46 +02004549 /* create a buffer to store the information to pass to userspace */
4550 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004551 if (!iter) {
4552 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004553 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004554 goto out;
4555 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004556
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004557 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004558 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004559
4560 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4561 ret = -ENOMEM;
4562 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304563 }
4564
Steven Rostedta3097202008-11-07 22:36:02 -05004565 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304566 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004567
Steven Rostedt112f38a72009-06-01 15:16:05 -04004568 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4569 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4570
David Sharp8be07092012-11-13 12:18:22 -08004571 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004572 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004573 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4574
Oleg Nesterov15544202013-07-23 17:25:57 +02004575 iter->tr = tr;
4576 iter->trace_buffer = &tr->trace_buffer;
4577 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004578 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004579 filp->private_data = iter;
4580
Steven Rostedt107bad82008-05-12 21:21:01 +02004581 if (iter->trace->pipe_open)
4582 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004583
Arnd Bergmannb4447862010-07-07 23:40:11 +02004584 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004585
4586 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004587out:
4588 mutex_unlock(&trace_types_lock);
4589 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004590
4591fail:
4592 kfree(iter->trace);
4593 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004594 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004595 mutex_unlock(&trace_types_lock);
4596 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004597}
4598
4599static int tracing_release_pipe(struct inode *inode, struct file *file)
4600{
4601 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004602 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004603
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004604 mutex_lock(&trace_types_lock);
4605
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004606 tr->current_trace->ref--;
4607
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004608 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004609 iter->trace->pipe_close(iter);
4610
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004611 mutex_unlock(&trace_types_lock);
4612
Rusty Russell44623442009-01-01 10:12:23 +10304613 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004614 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004615 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004616
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004617 trace_array_put(tr);
4618
Steven Rostedtb3806b42008-05-12 21:20:46 +02004619 return 0;
4620}
4621
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004622static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004623trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004624{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004625 /* Iterators are static, they should be filled or empty */
4626 if (trace_buffer_iter(iter, iter->cpu_file))
4627 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004628
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004629 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004630 /*
4631 * Always select as readable when in blocking mode
4632 */
4633 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004634 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004635 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004636 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004637}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004638
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004639static unsigned int
4640tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4641{
4642 struct trace_iterator *iter = filp->private_data;
4643
4644 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004645}
4646
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004647/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004648static int tracing_wait_pipe(struct file *filp)
4649{
4650 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004651 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004652
4653 while (trace_empty(iter)) {
4654
4655 if ((filp->f_flags & O_NONBLOCK)) {
4656 return -EAGAIN;
4657 }
4658
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004659 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004660 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004661 * We still block if tracing is disabled, but we have never
4662 * read anything. This allows a user to cat this file, and
4663 * then enable tracing. But after we have read something,
4664 * we give an EOF when tracing is again disabled.
4665 *
4666 * iter->pos will be 0 if we haven't read anything.
4667 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004668 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004669 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004670
4671 mutex_unlock(&iter->mutex);
4672
Rabin Vincente30f53a2014-11-10 19:46:34 +01004673 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004674
4675 mutex_lock(&iter->mutex);
4676
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004677 if (ret)
4678 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004679 }
4680
4681 return 1;
4682}
4683
Steven Rostedtb3806b42008-05-12 21:20:46 +02004684/*
4685 * Consumer reader.
4686 */
4687static ssize_t
4688tracing_read_pipe(struct file *filp, char __user *ubuf,
4689 size_t cnt, loff_t *ppos)
4690{
4691 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004692 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004693
4694 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004695 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4696 if (sret != -EBUSY)
4697 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004698
Steven Rostedtf9520752009-03-02 14:04:40 -05004699 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004700
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004701 /*
4702 * Avoid more than one consumer on a single file descriptor
4703 * This is just a matter of traces coherency, the ring buffer itself
4704 * is protected.
4705 */
4706 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004707 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004708 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4709 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004710 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004711 }
4712
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004713waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004714 sret = tracing_wait_pipe(filp);
4715 if (sret <= 0)
4716 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004717
4718 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004719 if (trace_empty(iter)) {
4720 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004721 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004722 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004723
4724 if (cnt >= PAGE_SIZE)
4725 cnt = PAGE_SIZE - 1;
4726
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004727 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004728 memset(&iter->seq, 0,
4729 sizeof(struct trace_iterator) -
4730 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004731 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004732 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004733
Lai Jiangshan4f535962009-05-18 19:35:34 +08004734 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004735 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004736 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004737 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004738 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004739
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004740 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004741 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004742 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004743 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004744 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004745 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004746 if (ret != TRACE_TYPE_NO_CONSUME)
4747 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004748
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004749 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004750 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004751
4752 /*
4753 * Setting the full flag means we reached the trace_seq buffer
4754 * size and we should leave by partial output condition above.
4755 * One of the trace_seq_* functions is not used properly.
4756 */
4757 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4758 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004759 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004760 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004761 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004762
Steven Rostedtb3806b42008-05-12 21:20:46 +02004763 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004764 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004765 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004766 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004767
4768 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004769 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004770 * entries, go back to wait for more entries.
4771 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004772 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004773 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004774
Steven Rostedt107bad82008-05-12 21:21:01 +02004775out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004776 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004777
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004778 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004779}
4780
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004781static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4782 unsigned int idx)
4783{
4784 __free_page(spd->pages[idx]);
4785}
4786
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004787static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004788 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004789 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004790 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004791 .steal = generic_pipe_buf_steal,
4792 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004793};
4794
Steven Rostedt34cd4992009-02-09 12:06:29 -05004795static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004796tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004797{
4798 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004799 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004800 int ret;
4801
4802 /* Seq buffer is page-sized, exactly what we need. */
4803 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004804 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004805 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004806
4807 if (trace_seq_has_overflowed(&iter->seq)) {
4808 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004809 break;
4810 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004811
4812 /*
4813 * This should not be hit, because it should only
4814 * be set if the iter->seq overflowed. But check it
4815 * anyway to be safe.
4816 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05004817 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004818 iter->seq.seq.len = save_len;
4819 break;
4820 }
4821
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004822 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004823 if (rem < count) {
4824 rem = 0;
4825 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004826 break;
4827 }
4828
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004829 if (ret != TRACE_TYPE_NO_CONSUME)
4830 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004831 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004832 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004833 rem = 0;
4834 iter->ent = NULL;
4835 break;
4836 }
4837 }
4838
4839 return rem;
4840}
4841
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004842static ssize_t tracing_splice_read_pipe(struct file *filp,
4843 loff_t *ppos,
4844 struct pipe_inode_info *pipe,
4845 size_t len,
4846 unsigned int flags)
4847{
Jens Axboe35f3d142010-05-20 10:43:18 +02004848 struct page *pages_def[PIPE_DEF_BUFFERS];
4849 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004850 struct trace_iterator *iter = filp->private_data;
4851 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004852 .pages = pages_def,
4853 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004854 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004855 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004856 .flags = flags,
4857 .ops = &tracing_pipe_buf_ops,
4858 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004859 };
4860 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004861 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004862 unsigned int i;
4863
Jens Axboe35f3d142010-05-20 10:43:18 +02004864 if (splice_grow_spd(pipe, &spd))
4865 return -ENOMEM;
4866
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004867 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004868
4869 if (iter->trace->splice_read) {
4870 ret = iter->trace->splice_read(iter, filp,
4871 ppos, pipe, len, flags);
4872 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004873 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004874 }
4875
4876 ret = tracing_wait_pipe(filp);
4877 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004878 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004879
Jason Wessel955b61e2010-08-05 09:22:23 -05004880 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004881 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004882 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004883 }
4884
Lai Jiangshan4f535962009-05-18 19:35:34 +08004885 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004886 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004887
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004888 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004889 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004890 spd.pages[i] = alloc_page(GFP_KERNEL);
4891 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004892 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004893
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004894 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004895
4896 /* Copy the data into the page, so we can start over. */
4897 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004898 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004899 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004900 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004901 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004902 break;
4903 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004904 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004905 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004906
Steven Rostedtf9520752009-03-02 14:04:40 -05004907 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004908 }
4909
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004910 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004911 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004912 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004913
4914 spd.nr_pages = i;
4915
Jens Axboe35f3d142010-05-20 10:43:18 +02004916 ret = splice_to_pipe(pipe, &spd);
4917out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004918 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004919 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004920
Steven Rostedt34cd4992009-02-09 12:06:29 -05004921out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004922 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004923 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004924}
4925
Steven Rostedta98a3c32008-05-12 21:20:59 +02004926static ssize_t
4927tracing_entries_read(struct file *filp, char __user *ubuf,
4928 size_t cnt, loff_t *ppos)
4929{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004930 struct inode *inode = file_inode(filp);
4931 struct trace_array *tr = inode->i_private;
4932 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004933 char buf[64];
4934 int r = 0;
4935 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004936
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004937 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004938
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004939 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004940 int cpu, buf_size_same;
4941 unsigned long size;
4942
4943 size = 0;
4944 buf_size_same = 1;
4945 /* check if all cpu sizes are same */
4946 for_each_tracing_cpu(cpu) {
4947 /* fill in the size from first enabled cpu */
4948 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004949 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4950 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004951 buf_size_same = 0;
4952 break;
4953 }
4954 }
4955
4956 if (buf_size_same) {
4957 if (!ring_buffer_expanded)
4958 r = sprintf(buf, "%lu (expanded: %lu)\n",
4959 size >> 10,
4960 trace_buf_size >> 10);
4961 else
4962 r = sprintf(buf, "%lu\n", size >> 10);
4963 } else
4964 r = sprintf(buf, "X\n");
4965 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004966 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004967
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004968 mutex_unlock(&trace_types_lock);
4969
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004970 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4971 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004972}
4973
4974static ssize_t
4975tracing_entries_write(struct file *filp, const char __user *ubuf,
4976 size_t cnt, loff_t *ppos)
4977{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004978 struct inode *inode = file_inode(filp);
4979 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004980 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004981 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004982
Peter Huewe22fe9b52011-06-07 21:58:27 +02004983 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4984 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004985 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004986
4987 /* must have at least 1 entry */
4988 if (!val)
4989 return -EINVAL;
4990
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004991 /* value is in KB */
4992 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004993 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004994 if (ret < 0)
4995 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004996
Jiri Olsacf8517c2009-10-23 19:36:16 -04004997 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004998
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004999 return cnt;
5000}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005001
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005002static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005003tracing_total_entries_read(struct file *filp, char __user *ubuf,
5004 size_t cnt, loff_t *ppos)
5005{
5006 struct trace_array *tr = filp->private_data;
5007 char buf[64];
5008 int r, cpu;
5009 unsigned long size = 0, expanded_size = 0;
5010
5011 mutex_lock(&trace_types_lock);
5012 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005013 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005014 if (!ring_buffer_expanded)
5015 expanded_size += trace_buf_size >> 10;
5016 }
5017 if (ring_buffer_expanded)
5018 r = sprintf(buf, "%lu\n", size);
5019 else
5020 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5021 mutex_unlock(&trace_types_lock);
5022
5023 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5024}
5025
5026static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005027tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5028 size_t cnt, loff_t *ppos)
5029{
5030 /*
5031 * There is no need to read what the user has written, this function
5032 * is just to make sure that there is no error when "echo" is used
5033 */
5034
5035 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005036
5037 return cnt;
5038}
5039
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005040static int
5041tracing_free_buffer_release(struct inode *inode, struct file *filp)
5042{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005043 struct trace_array *tr = inode->i_private;
5044
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005045 /* disable tracing ? */
5046 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005047 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005048 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005049 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005050
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005051 trace_array_put(tr);
5052
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005053 return 0;
5054}
5055
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005056static ssize_t
5057tracing_mark_write(struct file *filp, const char __user *ubuf,
5058 size_t cnt, loff_t *fpos)
5059{
Steven Rostedtd696b582011-09-22 11:50:27 -04005060 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005061 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005062 struct ring_buffer_event *event;
5063 struct ring_buffer *buffer;
5064 struct print_entry *entry;
5065 unsigned long irq_flags;
5066 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005067 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005068 int nr_pages = 1;
5069 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005070 int offset;
5071 int size;
5072 int len;
5073 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005074 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005075
Steven Rostedtc76f0692008-11-07 22:36:02 -05005076 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005077 return -EINVAL;
5078
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005079 if (!(trace_flags & TRACE_ITER_MARKERS))
5080 return -EINVAL;
5081
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005082 if (cnt > TRACE_BUF_SIZE)
5083 cnt = TRACE_BUF_SIZE;
5084
Steven Rostedtd696b582011-09-22 11:50:27 -04005085 /*
5086 * Userspace is injecting traces into the kernel trace buffer.
5087 * We want to be as non intrusive as possible.
5088 * To do so, we do not want to allocate any special buffers
5089 * or take any locks, but instead write the userspace data
5090 * straight into the ring buffer.
5091 *
5092 * First we need to pin the userspace buffer into memory,
5093 * which, most likely it is, because it just referenced it.
5094 * But there's no guarantee that it is. By using get_user_pages_fast()
5095 * and kmap_atomic/kunmap_atomic() we can get access to the
5096 * pages directly. We then write the data directly into the
5097 * ring buffer.
5098 */
5099 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005100
Steven Rostedtd696b582011-09-22 11:50:27 -04005101 /* check if we cross pages */
5102 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5103 nr_pages = 2;
5104
5105 offset = addr & (PAGE_SIZE - 1);
5106 addr &= PAGE_MASK;
5107
5108 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5109 if (ret < nr_pages) {
5110 while (--ret >= 0)
5111 put_page(pages[ret]);
5112 written = -EFAULT;
5113 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005114 }
5115
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005116 for (i = 0; i < nr_pages; i++)
5117 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005118
5119 local_save_flags(irq_flags);
5120 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005121 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005122 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5123 irq_flags, preempt_count());
5124 if (!event) {
5125 /* Ring buffer disabled, return as if not open for write */
5126 written = -EBADF;
5127 goto out_unlock;
5128 }
5129
5130 entry = ring_buffer_event_data(event);
5131 entry->ip = _THIS_IP_;
5132
5133 if (nr_pages == 2) {
5134 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005135 memcpy(&entry->buf, map_page[0] + offset, len);
5136 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005137 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005138 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005139
5140 if (entry->buf[cnt - 1] != '\n') {
5141 entry->buf[cnt] = '\n';
5142 entry->buf[cnt + 1] = '\0';
5143 } else
5144 entry->buf[cnt] = '\0';
5145
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005146 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005147
5148 written = cnt;
5149
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005150 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005151
Steven Rostedtd696b582011-09-22 11:50:27 -04005152 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005153 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005154 kunmap_atomic(map_page[i]);
5155 put_page(pages[i]);
5156 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005157 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005158 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005159}
5160
Li Zefan13f16d22009-12-08 11:16:11 +08005161static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005162{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005163 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005164 int i;
5165
5166 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005167 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005168 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005169 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5170 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005171 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005172
Li Zefan13f16d22009-12-08 11:16:11 +08005173 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005174}
5175
Steven Rostedte1e232c2014-02-10 23:38:46 -05005176static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005177{
Zhaolei5079f322009-08-25 16:12:56 +08005178 int i;
5179
Zhaolei5079f322009-08-25 16:12:56 +08005180 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5181 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5182 break;
5183 }
5184 if (i == ARRAY_SIZE(trace_clocks))
5185 return -EINVAL;
5186
Zhaolei5079f322009-08-25 16:12:56 +08005187 mutex_lock(&trace_types_lock);
5188
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005189 tr->clock_id = i;
5190
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005191 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005192
David Sharp60303ed2012-10-11 16:27:52 -07005193 /*
5194 * New clock may not be consistent with the previous clock.
5195 * Reset the buffer so that it doesn't have incomparable timestamps.
5196 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005197 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005198
5199#ifdef CONFIG_TRACER_MAX_TRACE
5200 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5201 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005202 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005203#endif
David Sharp60303ed2012-10-11 16:27:52 -07005204
Zhaolei5079f322009-08-25 16:12:56 +08005205 mutex_unlock(&trace_types_lock);
5206
Steven Rostedte1e232c2014-02-10 23:38:46 -05005207 return 0;
5208}
5209
5210static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5211 size_t cnt, loff_t *fpos)
5212{
5213 struct seq_file *m = filp->private_data;
5214 struct trace_array *tr = m->private;
5215 char buf[64];
5216 const char *clockstr;
5217 int ret;
5218
5219 if (cnt >= sizeof(buf))
5220 return -EINVAL;
5221
5222 if (copy_from_user(&buf, ubuf, cnt))
5223 return -EFAULT;
5224
5225 buf[cnt] = 0;
5226
5227 clockstr = strstrip(buf);
5228
5229 ret = tracing_set_clock(tr, clockstr);
5230 if (ret)
5231 return ret;
5232
Zhaolei5079f322009-08-25 16:12:56 +08005233 *fpos += cnt;
5234
5235 return cnt;
5236}
5237
Li Zefan13f16d22009-12-08 11:16:11 +08005238static int tracing_clock_open(struct inode *inode, struct file *file)
5239{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005240 struct trace_array *tr = inode->i_private;
5241 int ret;
5242
Li Zefan13f16d22009-12-08 11:16:11 +08005243 if (tracing_disabled)
5244 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005245
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005246 if (trace_array_get(tr))
5247 return -ENODEV;
5248
5249 ret = single_open(file, tracing_clock_show, inode->i_private);
5250 if (ret < 0)
5251 trace_array_put(tr);
5252
5253 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005254}
5255
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005256struct ftrace_buffer_info {
5257 struct trace_iterator iter;
5258 void *spare;
5259 unsigned int read;
5260};
5261
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005262#ifdef CONFIG_TRACER_SNAPSHOT
5263static int tracing_snapshot_open(struct inode *inode, struct file *file)
5264{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005265 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005266 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005267 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005268 int ret = 0;
5269
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005270 if (trace_array_get(tr) < 0)
5271 return -ENODEV;
5272
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005273 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005274 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005275 if (IS_ERR(iter))
5276 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005277 } else {
5278 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005279 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005280 m = kzalloc(sizeof(*m), GFP_KERNEL);
5281 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005282 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005283 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5284 if (!iter) {
5285 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005286 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005287 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005288 ret = 0;
5289
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005290 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005291 iter->trace_buffer = &tr->max_buffer;
5292 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005293 m->private = iter;
5294 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005295 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005296out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005297 if (ret < 0)
5298 trace_array_put(tr);
5299
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005300 return ret;
5301}
5302
5303static ssize_t
5304tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5305 loff_t *ppos)
5306{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005307 struct seq_file *m = filp->private_data;
5308 struct trace_iterator *iter = m->private;
5309 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005310 unsigned long val;
5311 int ret;
5312
5313 ret = tracing_update_buffers();
5314 if (ret < 0)
5315 return ret;
5316
5317 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5318 if (ret)
5319 return ret;
5320
5321 mutex_lock(&trace_types_lock);
5322
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005323 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005324 ret = -EBUSY;
5325 goto out;
5326 }
5327
5328 switch (val) {
5329 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005330 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5331 ret = -EINVAL;
5332 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005333 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005334 if (tr->allocated_snapshot)
5335 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005336 break;
5337 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005338/* Only allow per-cpu swap if the ring buffer supports it */
5339#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5340 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5341 ret = -EINVAL;
5342 break;
5343 }
5344#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005345 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005346 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005347 if (ret < 0)
5348 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005349 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005350 local_irq_disable();
5351 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005352 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005353 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005354 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005355 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005356 local_irq_enable();
5357 break;
5358 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005359 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005360 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5361 tracing_reset_online_cpus(&tr->max_buffer);
5362 else
5363 tracing_reset(&tr->max_buffer, iter->cpu_file);
5364 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005365 break;
5366 }
5367
5368 if (ret >= 0) {
5369 *ppos += cnt;
5370 ret = cnt;
5371 }
5372out:
5373 mutex_unlock(&trace_types_lock);
5374 return ret;
5375}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005376
5377static int tracing_snapshot_release(struct inode *inode, struct file *file)
5378{
5379 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005380 int ret;
5381
5382 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005383
5384 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005385 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005386
5387 /* If write only, the seq_file is just a stub */
5388 if (m)
5389 kfree(m->private);
5390 kfree(m);
5391
5392 return 0;
5393}
5394
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005395static int tracing_buffers_open(struct inode *inode, struct file *filp);
5396static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5397 size_t count, loff_t *ppos);
5398static int tracing_buffers_release(struct inode *inode, struct file *file);
5399static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5400 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5401
5402static int snapshot_raw_open(struct inode *inode, struct file *filp)
5403{
5404 struct ftrace_buffer_info *info;
5405 int ret;
5406
5407 ret = tracing_buffers_open(inode, filp);
5408 if (ret < 0)
5409 return ret;
5410
5411 info = filp->private_data;
5412
5413 if (info->iter.trace->use_max_tr) {
5414 tracing_buffers_release(inode, filp);
5415 return -EBUSY;
5416 }
5417
5418 info->iter.snapshot = true;
5419 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5420
5421 return ret;
5422}
5423
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005424#endif /* CONFIG_TRACER_SNAPSHOT */
5425
5426
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005427static const struct file_operations tracing_thresh_fops = {
5428 .open = tracing_open_generic,
5429 .read = tracing_thresh_read,
5430 .write = tracing_thresh_write,
5431 .llseek = generic_file_llseek,
5432};
5433
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005434static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005435 .open = tracing_open_generic,
5436 .read = tracing_max_lat_read,
5437 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005438 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005439};
5440
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005441static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005442 .open = tracing_open_generic,
5443 .read = tracing_set_trace_read,
5444 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005445 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005446};
5447
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005448static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005449 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005450 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005451 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005452 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005453 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005454 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005455};
5456
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005457static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005458 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005459 .read = tracing_entries_read,
5460 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005461 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005462 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005463};
5464
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005465static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005466 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005467 .read = tracing_total_entries_read,
5468 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005469 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005470};
5471
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005472static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005473 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005474 .write = tracing_free_buffer_write,
5475 .release = tracing_free_buffer_release,
5476};
5477
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005478static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005479 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005480 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005481 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005482 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005483};
5484
Zhaolei5079f322009-08-25 16:12:56 +08005485static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005486 .open = tracing_clock_open,
5487 .read = seq_read,
5488 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005489 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005490 .write = tracing_clock_write,
5491};
5492
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005493#ifdef CONFIG_TRACER_SNAPSHOT
5494static const struct file_operations snapshot_fops = {
5495 .open = tracing_snapshot_open,
5496 .read = seq_read,
5497 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005498 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005499 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005500};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005501
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005502static const struct file_operations snapshot_raw_fops = {
5503 .open = snapshot_raw_open,
5504 .read = tracing_buffers_read,
5505 .release = tracing_buffers_release,
5506 .splice_read = tracing_buffers_splice_read,
5507 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005508};
5509
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005510#endif /* CONFIG_TRACER_SNAPSHOT */
5511
Steven Rostedt2cadf912008-12-01 22:20:19 -05005512static int tracing_buffers_open(struct inode *inode, struct file *filp)
5513{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005514 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005515 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005516 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005517
5518 if (tracing_disabled)
5519 return -ENODEV;
5520
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005521 if (trace_array_get(tr) < 0)
5522 return -ENODEV;
5523
Steven Rostedt2cadf912008-12-01 22:20:19 -05005524 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005525 if (!info) {
5526 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005527 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005528 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005529
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005530 mutex_lock(&trace_types_lock);
5531
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005532 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005533 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005534 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005535 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005536 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005537 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005538 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005539
5540 filp->private_data = info;
5541
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005542 tr->current_trace->ref++;
5543
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005544 mutex_unlock(&trace_types_lock);
5545
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005546 ret = nonseekable_open(inode, filp);
5547 if (ret < 0)
5548 trace_array_put(tr);
5549
5550 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005551}
5552
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005553static unsigned int
5554tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5555{
5556 struct ftrace_buffer_info *info = filp->private_data;
5557 struct trace_iterator *iter = &info->iter;
5558
5559 return trace_poll(iter, filp, poll_table);
5560}
5561
Steven Rostedt2cadf912008-12-01 22:20:19 -05005562static ssize_t
5563tracing_buffers_read(struct file *filp, char __user *ubuf,
5564 size_t count, loff_t *ppos)
5565{
5566 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005567 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005568 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005569 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005570
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005571 if (!count)
5572 return 0;
5573
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005574#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005575 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5576 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005577#endif
5578
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005579 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005580 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5581 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005582 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005583 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005584
Steven Rostedt2cadf912008-12-01 22:20:19 -05005585 /* Do we have previous read data to read? */
5586 if (info->read < PAGE_SIZE)
5587 goto read;
5588
Steven Rostedtb6273442013-02-28 13:44:11 -05005589 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005590 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005591 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005592 &info->spare,
5593 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005594 iter->cpu_file, 0);
5595 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005596
5597 if (ret < 0) {
5598 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005599 if ((filp->f_flags & O_NONBLOCK))
5600 return -EAGAIN;
5601
Rabin Vincente30f53a2014-11-10 19:46:34 +01005602 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005603 if (ret)
5604 return ret;
5605
Steven Rostedtb6273442013-02-28 13:44:11 -05005606 goto again;
5607 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005608 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005609 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005610
Steven Rostedt436fc282011-10-14 10:44:25 -04005611 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005612 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005613 size = PAGE_SIZE - info->read;
5614 if (size > count)
5615 size = count;
5616
5617 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005618 if (ret == size)
5619 return -EFAULT;
5620
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005621 size -= ret;
5622
Steven Rostedt2cadf912008-12-01 22:20:19 -05005623 *ppos += size;
5624 info->read += size;
5625
5626 return size;
5627}
5628
5629static int tracing_buffers_release(struct inode *inode, struct file *file)
5630{
5631 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005632 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005633
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005634 mutex_lock(&trace_types_lock);
5635
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005636 iter->tr->current_trace->ref--;
5637
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005638 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005639
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005640 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005641 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005642 kfree(info);
5643
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005644 mutex_unlock(&trace_types_lock);
5645
Steven Rostedt2cadf912008-12-01 22:20:19 -05005646 return 0;
5647}
5648
5649struct buffer_ref {
5650 struct ring_buffer *buffer;
5651 void *page;
5652 int ref;
5653};
5654
5655static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5656 struct pipe_buffer *buf)
5657{
5658 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5659
5660 if (--ref->ref)
5661 return;
5662
5663 ring_buffer_free_read_page(ref->buffer, ref->page);
5664 kfree(ref);
5665 buf->private = 0;
5666}
5667
Steven Rostedt2cadf912008-12-01 22:20:19 -05005668static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5669 struct pipe_buffer *buf)
5670{
5671 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5672
5673 ref->ref++;
5674}
5675
5676/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005677static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005678 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005679 .confirm = generic_pipe_buf_confirm,
5680 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005681 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005682 .get = buffer_pipe_buf_get,
5683};
5684
5685/*
5686 * Callback from splice_to_pipe(), if we need to release some pages
5687 * at the end of the spd in case we error'ed out in filling the pipe.
5688 */
5689static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5690{
5691 struct buffer_ref *ref =
5692 (struct buffer_ref *)spd->partial[i].private;
5693
5694 if (--ref->ref)
5695 return;
5696
5697 ring_buffer_free_read_page(ref->buffer, ref->page);
5698 kfree(ref);
5699 spd->partial[i].private = 0;
5700}
5701
5702static ssize_t
5703tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5704 struct pipe_inode_info *pipe, size_t len,
5705 unsigned int flags)
5706{
5707 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005708 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005709 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5710 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005711 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005712 .pages = pages_def,
5713 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005714 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005715 .flags = flags,
5716 .ops = &buffer_pipe_buf_ops,
5717 .spd_release = buffer_spd_release,
5718 };
5719 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005720 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01005721 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005722
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005723#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005724 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5725 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005726#endif
5727
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005728 if (splice_grow_spd(pipe, &spd))
5729 return -ENOMEM;
Jens Axboe35f3d142010-05-20 10:43:18 +02005730
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005731 if (*ppos & (PAGE_SIZE - 1))
5732 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005733
5734 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005735 if (len < PAGE_SIZE)
5736 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005737 len &= PAGE_MASK;
5738 }
5739
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005740 again:
5741 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005742 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005743
Al Viroa786c062014-04-11 12:01:03 -04005744 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005745 struct page *page;
5746 int r;
5747
5748 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01005749 if (!ref) {
5750 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005751 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01005752 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005753
Steven Rostedt7267fa62009-04-29 00:16:21 -04005754 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005755 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005756 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005757 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005758 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005759 kfree(ref);
5760 break;
5761 }
5762
5763 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005764 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005765 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005766 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005767 kfree(ref);
5768 break;
5769 }
5770
5771 /*
5772 * zero out any left over data, this is going to
5773 * user land.
5774 */
5775 size = ring_buffer_page_len(ref->page);
5776 if (size < PAGE_SIZE)
5777 memset(ref->page + size, 0, PAGE_SIZE - size);
5778
5779 page = virt_to_page(ref->page);
5780
5781 spd.pages[i] = page;
5782 spd.partial[i].len = PAGE_SIZE;
5783 spd.partial[i].offset = 0;
5784 spd.partial[i].private = (unsigned long)ref;
5785 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005786 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005787
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005788 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005789 }
5790
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005791 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005792 spd.nr_pages = i;
5793
5794 /* did we read anything? */
5795 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005796 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005797 return ret;
Rabin Vincent07906da2014-11-06 22:26:07 +01005798
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005799 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5800 return -EAGAIN;
5801
Rabin Vincente30f53a2014-11-10 19:46:34 +01005802 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005803 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005804 return ret;
Rabin Vincente30f53a2014-11-10 19:46:34 +01005805
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005806 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005807 }
5808
5809 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005810 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005811
Steven Rostedt2cadf912008-12-01 22:20:19 -05005812 return ret;
5813}
5814
5815static const struct file_operations tracing_buffers_fops = {
5816 .open = tracing_buffers_open,
5817 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005818 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005819 .release = tracing_buffers_release,
5820 .splice_read = tracing_buffers_splice_read,
5821 .llseek = no_llseek,
5822};
5823
Steven Rostedtc8d77182009-04-29 18:03:45 -04005824static ssize_t
5825tracing_stats_read(struct file *filp, char __user *ubuf,
5826 size_t count, loff_t *ppos)
5827{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005828 struct inode *inode = file_inode(filp);
5829 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005830 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005831 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005832 struct trace_seq *s;
5833 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005834 unsigned long long t;
5835 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005836
Li Zefane4f2d102009-06-15 10:57:28 +08005837 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005838 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005839 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005840
5841 trace_seq_init(s);
5842
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005843 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005844 trace_seq_printf(s, "entries: %ld\n", cnt);
5845
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005846 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005847 trace_seq_printf(s, "overrun: %ld\n", cnt);
5848
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005849 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005850 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5851
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005852 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005853 trace_seq_printf(s, "bytes: %ld\n", cnt);
5854
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005855 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005856 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005857 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005858 usec_rem = do_div(t, USEC_PER_SEC);
5859 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5860 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005861
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005862 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005863 usec_rem = do_div(t, USEC_PER_SEC);
5864 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5865 } else {
5866 /* counter or tsc mode for trace_clock */
5867 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005868 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005869
5870 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005871 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005872 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005873
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005874 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005875 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5876
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005877 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005878 trace_seq_printf(s, "read events: %ld\n", cnt);
5879
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005880 count = simple_read_from_buffer(ubuf, count, ppos,
5881 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04005882
5883 kfree(s);
5884
5885 return count;
5886}
5887
5888static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005889 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005890 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005891 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005892 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005893};
5894
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005895#ifdef CONFIG_DYNAMIC_FTRACE
5896
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005897int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005898{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005899 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005900}
5901
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005902static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005903tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005904 size_t cnt, loff_t *ppos)
5905{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005906 static char ftrace_dyn_info_buffer[1024];
5907 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005908 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005909 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005910 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005911 int r;
5912
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005913 mutex_lock(&dyn_info_mutex);
5914 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005915
Steven Rostedta26a2a22008-10-31 00:03:22 -04005916 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005917 buf[r++] = '\n';
5918
5919 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5920
5921 mutex_unlock(&dyn_info_mutex);
5922
5923 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005924}
5925
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005926static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005927 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005928 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005929 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005930};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005931#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005932
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005933#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5934static void
5935ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005936{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005937 tracing_snapshot();
5938}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005939
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005940static void
5941ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5942{
5943 unsigned long *count = (long *)data;
5944
5945 if (!*count)
5946 return;
5947
5948 if (*count != -1)
5949 (*count)--;
5950
5951 tracing_snapshot();
5952}
5953
5954static int
5955ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5956 struct ftrace_probe_ops *ops, void *data)
5957{
5958 long count = (long)data;
5959
5960 seq_printf(m, "%ps:", (void *)ip);
5961
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005962 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005963
5964 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005965 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005966 else
5967 seq_printf(m, ":count=%ld\n", count);
5968
5969 return 0;
5970}
5971
5972static struct ftrace_probe_ops snapshot_probe_ops = {
5973 .func = ftrace_snapshot,
5974 .print = ftrace_snapshot_print,
5975};
5976
5977static struct ftrace_probe_ops snapshot_count_probe_ops = {
5978 .func = ftrace_count_snapshot,
5979 .print = ftrace_snapshot_print,
5980};
5981
5982static int
5983ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5984 char *glob, char *cmd, char *param, int enable)
5985{
5986 struct ftrace_probe_ops *ops;
5987 void *count = (void *)-1;
5988 char *number;
5989 int ret;
5990
5991 /* hash funcs only work with set_ftrace_filter */
5992 if (!enable)
5993 return -EINVAL;
5994
5995 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5996
5997 if (glob[0] == '!') {
5998 unregister_ftrace_function_probe_func(glob+1, ops);
5999 return 0;
6000 }
6001
6002 if (!param)
6003 goto out_reg;
6004
6005 number = strsep(&param, ":");
6006
6007 if (!strlen(number))
6008 goto out_reg;
6009
6010 /*
6011 * We use the callback data field (which is a pointer)
6012 * as our counter.
6013 */
6014 ret = kstrtoul(number, 0, (unsigned long *)&count);
6015 if (ret)
6016 return ret;
6017
6018 out_reg:
6019 ret = register_ftrace_function_probe(glob, ops, count);
6020
6021 if (ret >= 0)
6022 alloc_snapshot(&global_trace);
6023
6024 return ret < 0 ? ret : 0;
6025}
6026
6027static struct ftrace_func_command ftrace_snapshot_cmd = {
6028 .name = "snapshot",
6029 .func = ftrace_trace_snapshot_callback,
6030};
6031
Tom Zanussi38de93a2013-10-24 08:34:18 -05006032static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006033{
6034 return register_ftrace_command(&ftrace_snapshot_cmd);
6035}
6036#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006037static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006038#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006039
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006040static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006041{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006042 if (WARN_ON(!tr->dir))
6043 return ERR_PTR(-ENODEV);
6044
6045 /* Top directory uses NULL as the parent */
6046 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6047 return NULL;
6048
6049 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006050 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006051}
6052
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006053static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6054{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006055 struct dentry *d_tracer;
6056
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006057 if (tr->percpu_dir)
6058 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006059
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006060 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006061 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006062 return NULL;
6063
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006064 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006065
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006066 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006067 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006068
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006069 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006070}
6071
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006072static struct dentry *
6073trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6074 void *data, long cpu, const struct file_operations *fops)
6075{
6076 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6077
6078 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006079 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006080 return ret;
6081}
6082
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006083static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006084tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006085{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006086 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006087 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006088 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006089
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006090 if (!d_percpu)
6091 return;
6092
Steven Rostedtdd49a382010-10-20 21:51:26 -04006093 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006094 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006095 if (!d_cpu) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006096 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006097 return;
6098 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006099
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006100 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006101 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006102 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006103
6104 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006105 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006106 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006107
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006108 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006109 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006110
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006111 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006112 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006113
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006114 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006115 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006116
6117#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006118 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006119 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006120
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006121 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006122 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006123#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006124}
6125
Steven Rostedt60a11772008-05-12 21:20:44 +02006126#ifdef CONFIG_FTRACE_SELFTEST
6127/* Let selftest have access to static functions in this file */
6128#include "trace_selftest.c"
6129#endif
6130
Steven Rostedt577b7852009-02-26 23:43:05 -05006131struct trace_option_dentry {
6132 struct tracer_opt *opt;
6133 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006134 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006135 struct dentry *entry;
6136};
6137
6138static ssize_t
6139trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6140 loff_t *ppos)
6141{
6142 struct trace_option_dentry *topt = filp->private_data;
6143 char *buf;
6144
6145 if (topt->flags->val & topt->opt->bit)
6146 buf = "1\n";
6147 else
6148 buf = "0\n";
6149
6150 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6151}
6152
6153static ssize_t
6154trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6155 loff_t *ppos)
6156{
6157 struct trace_option_dentry *topt = filp->private_data;
6158 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006159 int ret;
6160
Peter Huewe22fe9b52011-06-07 21:58:27 +02006161 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6162 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006163 return ret;
6164
Li Zefan8d18eaa2009-12-08 11:17:06 +08006165 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006166 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006167
6168 if (!!(topt->flags->val & topt->opt->bit) != val) {
6169 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006170 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006171 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006172 mutex_unlock(&trace_types_lock);
6173 if (ret)
6174 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006175 }
6176
6177 *ppos += cnt;
6178
6179 return cnt;
6180}
6181
6182
6183static const struct file_operations trace_options_fops = {
6184 .open = tracing_open_generic,
6185 .read = trace_options_read,
6186 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006187 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006188};
6189
Steven Rostedta8259072009-02-26 22:19:12 -05006190static ssize_t
6191trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6192 loff_t *ppos)
6193{
6194 long index = (long)filp->private_data;
6195 char *buf;
6196
6197 if (trace_flags & (1 << index))
6198 buf = "1\n";
6199 else
6200 buf = "0\n";
6201
6202 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6203}
6204
6205static ssize_t
6206trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6207 loff_t *ppos)
6208{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006209 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006210 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05006211 unsigned long val;
6212 int ret;
6213
Peter Huewe22fe9b52011-06-07 21:58:27 +02006214 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6215 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006216 return ret;
6217
Zhaoleif2d84b62009-08-07 18:55:48 +08006218 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006219 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006220
6221 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006222 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006223 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006224
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006225 if (ret < 0)
6226 return ret;
6227
Steven Rostedta8259072009-02-26 22:19:12 -05006228 *ppos += cnt;
6229
6230 return cnt;
6231}
6232
Steven Rostedta8259072009-02-26 22:19:12 -05006233static const struct file_operations trace_options_core_fops = {
6234 .open = tracing_open_generic,
6235 .read = trace_options_core_read,
6236 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006237 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006238};
6239
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006240struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006241 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006242 struct dentry *parent,
6243 void *data,
6244 const struct file_operations *fops)
6245{
6246 struct dentry *ret;
6247
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006248 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006249 if (!ret)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006250 pr_warning("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006251
6252 return ret;
6253}
6254
6255
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006256static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006257{
6258 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006259
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006260 if (tr->options)
6261 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006262
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006263 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006264 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006265 return NULL;
6266
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006267 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006268 if (!tr->options) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006269 pr_warning("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006270 return NULL;
6271 }
6272
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006273 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006274}
6275
Steven Rostedt577b7852009-02-26 23:43:05 -05006276static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006277create_trace_option_file(struct trace_array *tr,
6278 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006279 struct tracer_flags *flags,
6280 struct tracer_opt *opt)
6281{
6282 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006283
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006284 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006285 if (!t_options)
6286 return;
6287
6288 topt->flags = flags;
6289 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006290 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006291
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006292 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006293 &trace_options_fops);
6294
Steven Rostedt577b7852009-02-26 23:43:05 -05006295}
6296
6297static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006298create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006299{
6300 struct trace_option_dentry *topts;
6301 struct tracer_flags *flags;
6302 struct tracer_opt *opts;
6303 int cnt;
6304
6305 if (!tracer)
6306 return NULL;
6307
6308 flags = tracer->flags;
6309
6310 if (!flags || !flags->opts)
6311 return NULL;
6312
6313 opts = flags->opts;
6314
6315 for (cnt = 0; opts[cnt].name; cnt++)
6316 ;
6317
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006318 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006319 if (!topts)
6320 return NULL;
6321
6322 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006323 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006324 &opts[cnt]);
6325
6326 return topts;
6327}
6328
6329static void
6330destroy_trace_option_files(struct trace_option_dentry *topts)
6331{
6332 int cnt;
6333
6334 if (!topts)
6335 return;
6336
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006337 for (cnt = 0; topts[cnt].opt; cnt++)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006338 tracefs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006339
6340 kfree(topts);
6341}
6342
Steven Rostedta8259072009-02-26 22:19:12 -05006343static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006344create_trace_option_core_file(struct trace_array *tr,
6345 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006346{
6347 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006348
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006349 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006350 if (!t_options)
6351 return NULL;
6352
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006353 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006354 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006355}
6356
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006357static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006358{
6359 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006360 int i;
6361
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006362 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006363 if (!t_options)
6364 return;
6365
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006366 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006367 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006368}
6369
Steven Rostedt499e5472012-02-22 15:50:28 -05006370static ssize_t
6371rb_simple_read(struct file *filp, char __user *ubuf,
6372 size_t cnt, loff_t *ppos)
6373{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006374 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006375 char buf[64];
6376 int r;
6377
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006378 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006379 r = sprintf(buf, "%d\n", r);
6380
6381 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6382}
6383
6384static ssize_t
6385rb_simple_write(struct file *filp, const char __user *ubuf,
6386 size_t cnt, loff_t *ppos)
6387{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006388 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006389 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006390 unsigned long val;
6391 int ret;
6392
6393 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6394 if (ret)
6395 return ret;
6396
6397 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006398 mutex_lock(&trace_types_lock);
6399 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006400 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006401 if (tr->current_trace->start)
6402 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006403 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006404 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006405 if (tr->current_trace->stop)
6406 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006407 }
6408 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006409 }
6410
6411 (*ppos)++;
6412
6413 return cnt;
6414}
6415
6416static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006417 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006418 .read = rb_simple_read,
6419 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006420 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006421 .llseek = default_llseek,
6422};
6423
Steven Rostedt277ba042012-08-03 16:10:49 -04006424struct dentry *trace_instance_dir;
6425
6426static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006427init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04006428
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006429static int
6430allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006431{
6432 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006433
6434 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6435
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006436 buf->tr = tr;
6437
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006438 buf->buffer = ring_buffer_alloc(size, rb_flags);
6439 if (!buf->buffer)
6440 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006441
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006442 buf->data = alloc_percpu(struct trace_array_cpu);
6443 if (!buf->data) {
6444 ring_buffer_free(buf->buffer);
6445 return -ENOMEM;
6446 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006447
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006448 /* Allocate the first page for all buffers */
6449 set_buffer_entries(&tr->trace_buffer,
6450 ring_buffer_size(tr->trace_buffer.buffer, 0));
6451
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006452 return 0;
6453}
6454
6455static int allocate_trace_buffers(struct trace_array *tr, int size)
6456{
6457 int ret;
6458
6459 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6460 if (ret)
6461 return ret;
6462
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006463#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006464 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6465 allocate_snapshot ? size : 1);
6466 if (WARN_ON(ret)) {
6467 ring_buffer_free(tr->trace_buffer.buffer);
6468 free_percpu(tr->trace_buffer.data);
6469 return -ENOMEM;
6470 }
6471 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006472
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006473 /*
6474 * Only the top level trace array gets its snapshot allocated
6475 * from the kernel command line.
6476 */
6477 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006478#endif
6479 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006480}
6481
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006482static void free_trace_buffer(struct trace_buffer *buf)
6483{
6484 if (buf->buffer) {
6485 ring_buffer_free(buf->buffer);
6486 buf->buffer = NULL;
6487 free_percpu(buf->data);
6488 buf->data = NULL;
6489 }
6490}
6491
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006492static void free_trace_buffers(struct trace_array *tr)
6493{
6494 if (!tr)
6495 return;
6496
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006497 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006498
6499#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006500 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006501#endif
6502}
6503
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006504static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006505{
Steven Rostedt277ba042012-08-03 16:10:49 -04006506 struct trace_array *tr;
6507 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006508
6509 mutex_lock(&trace_types_lock);
6510
6511 ret = -EEXIST;
6512 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6513 if (tr->name && strcmp(tr->name, name) == 0)
6514 goto out_unlock;
6515 }
6516
6517 ret = -ENOMEM;
6518 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6519 if (!tr)
6520 goto out_unlock;
6521
6522 tr->name = kstrdup(name, GFP_KERNEL);
6523 if (!tr->name)
6524 goto out_free_tr;
6525
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006526 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6527 goto out_free_tr;
6528
6529 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6530
Steven Rostedt277ba042012-08-03 16:10:49 -04006531 raw_spin_lock_init(&tr->start_lock);
6532
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006533 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6534
Steven Rostedt277ba042012-08-03 16:10:49 -04006535 tr->current_trace = &nop_trace;
6536
6537 INIT_LIST_HEAD(&tr->systems);
6538 INIT_LIST_HEAD(&tr->events);
6539
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006540 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006541 goto out_free_tr;
6542
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006543 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006544 if (!tr->dir)
6545 goto out_free_tr;
6546
6547 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006548 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006549 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006550 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006551 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006552
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006553 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006554
6555 list_add(&tr->list, &ftrace_trace_arrays);
6556
6557 mutex_unlock(&trace_types_lock);
6558
6559 return 0;
6560
6561 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006562 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006563 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006564 kfree(tr->name);
6565 kfree(tr);
6566
6567 out_unlock:
6568 mutex_unlock(&trace_types_lock);
6569
6570 return ret;
6571
6572}
6573
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006574static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006575{
6576 struct trace_array *tr;
6577 int found = 0;
6578 int ret;
6579
6580 mutex_lock(&trace_types_lock);
6581
6582 ret = -ENODEV;
6583 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6584 if (tr->name && strcmp(tr->name, name) == 0) {
6585 found = 1;
6586 break;
6587 }
6588 }
6589 if (!found)
6590 goto out_unlock;
6591
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006592 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006593 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006594 goto out_unlock;
6595
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006596 list_del(&tr->list);
6597
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006598 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006599 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006600 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006601 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006602 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006603
6604 kfree(tr->name);
6605 kfree(tr);
6606
6607 ret = 0;
6608
6609 out_unlock:
6610 mutex_unlock(&trace_types_lock);
6611
6612 return ret;
6613}
6614
Steven Rostedt277ba042012-08-03 16:10:49 -04006615static __init void create_trace_instances(struct dentry *d_tracer)
6616{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006617 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6618 instance_mkdir,
6619 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006620 if (WARN_ON(!trace_instance_dir))
6621 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04006622}
6623
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006624static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006625init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006626{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006627 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006628
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006629 trace_create_file("available_tracers", 0444, d_tracer,
6630 tr, &show_traces_fops);
6631
6632 trace_create_file("current_tracer", 0644, d_tracer,
6633 tr, &set_tracer_fops);
6634
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006635 trace_create_file("tracing_cpumask", 0644, d_tracer,
6636 tr, &tracing_cpumask_fops);
6637
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006638 trace_create_file("trace_options", 0644, d_tracer,
6639 tr, &tracing_iter_fops);
6640
6641 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006642 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006643
6644 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006645 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006646
6647 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006648 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006649
6650 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6651 tr, &tracing_total_entries_fops);
6652
Wang YanQing238ae932013-05-26 16:52:01 +08006653 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006654 tr, &tracing_free_buffer_fops);
6655
6656 trace_create_file("trace_marker", 0220, d_tracer,
6657 tr, &tracing_mark_fops);
6658
6659 trace_create_file("trace_clock", 0644, d_tracer, tr,
6660 &trace_clock_fops);
6661
6662 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006663 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006664
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006665#ifdef CONFIG_TRACER_MAX_TRACE
6666 trace_create_file("tracing_max_latency", 0644, d_tracer,
6667 &tr->max_latency, &tracing_max_lat_fops);
6668#endif
6669
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006670 if (ftrace_create_function_files(tr, d_tracer))
6671 WARN(1, "Could not allocate function filter files");
6672
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006673#ifdef CONFIG_TRACER_SNAPSHOT
6674 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006675 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006676#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006677
6678 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006679 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006680
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006681}
6682
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006683static struct vfsmount *trace_automount(void *ingore)
6684{
6685 struct vfsmount *mnt;
6686 struct file_system_type *type;
6687
6688 /*
6689 * To maintain backward compatibility for tools that mount
6690 * debugfs to get to the tracing facility, tracefs is automatically
6691 * mounted to the debugfs/tracing directory.
6692 */
6693 type = get_fs_type("tracefs");
6694 if (!type)
6695 return NULL;
6696 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6697 put_filesystem(type);
6698 if (IS_ERR(mnt))
6699 return NULL;
6700 mntget(mnt);
6701
6702 return mnt;
6703}
6704
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006705/**
6706 * tracing_init_dentry - initialize top level trace array
6707 *
6708 * This is called when creating files or directories in the tracing
6709 * directory. It is called via fs_initcall() by any of the boot up code
6710 * and expects to return the dentry of the top level tracing directory.
6711 */
6712struct dentry *tracing_init_dentry(void)
6713{
6714 struct trace_array *tr = &global_trace;
6715
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006716 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006717 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006718 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006719
6720 if (WARN_ON(!debugfs_initialized()))
6721 return ERR_PTR(-ENODEV);
6722
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006723 /*
6724 * As there may still be users that expect the tracing
6725 * files to exist in debugfs/tracing, we must automount
6726 * the tracefs file system there, so older tools still
6727 * work with the newer kerenl.
6728 */
6729 tr->dir = debugfs_create_automount("tracing", NULL,
6730 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006731 if (!tr->dir) {
6732 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6733 return ERR_PTR(-ENOMEM);
6734 }
6735
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006736 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006737}
6738
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006739extern struct trace_enum_map *__start_ftrace_enum_maps[];
6740extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6741
6742static void __init trace_enum_init(void)
6743{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006744 int len;
6745
6746 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006747 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006748}
6749
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006750#ifdef CONFIG_MODULES
6751static void trace_module_add_enums(struct module *mod)
6752{
6753 if (!mod->num_trace_enums)
6754 return;
6755
6756 /*
6757 * Modules with bad taint do not have events created, do
6758 * not bother with enums either.
6759 */
6760 if (trace_module_has_bad_taint(mod))
6761 return;
6762
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006763 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006764}
6765
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006766#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6767static void trace_module_remove_enums(struct module *mod)
6768{
6769 union trace_enum_map_item *map;
6770 union trace_enum_map_item **last = &trace_enum_maps;
6771
6772 if (!mod->num_trace_enums)
6773 return;
6774
6775 mutex_lock(&trace_enum_mutex);
6776
6777 map = trace_enum_maps;
6778
6779 while (map) {
6780 if (map->head.mod == mod)
6781 break;
6782 map = trace_enum_jmp_to_tail(map);
6783 last = &map->tail.next;
6784 map = map->tail.next;
6785 }
6786 if (!map)
6787 goto out;
6788
6789 *last = trace_enum_jmp_to_tail(map)->tail.next;
6790 kfree(map);
6791 out:
6792 mutex_unlock(&trace_enum_mutex);
6793}
6794#else
6795static inline void trace_module_remove_enums(struct module *mod) { }
6796#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6797
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006798static int trace_module_notify(struct notifier_block *self,
6799 unsigned long val, void *data)
6800{
6801 struct module *mod = data;
6802
6803 switch (val) {
6804 case MODULE_STATE_COMING:
6805 trace_module_add_enums(mod);
6806 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006807 case MODULE_STATE_GOING:
6808 trace_module_remove_enums(mod);
6809 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006810 }
6811
6812 return 0;
6813}
6814
6815static struct notifier_block trace_module_nb = {
6816 .notifier_call = trace_module_notify,
6817 .priority = 0,
6818};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006819#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006820
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006821static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006822{
6823 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006824
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006825 trace_access_lock_init();
6826
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006827 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006828 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006829 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006830
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006831 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006832
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006833 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006834 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006835
Li Zefan339ae5d2009-04-17 10:34:30 +08006836 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006837 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006838
Avadh Patel69abe6a2009-04-10 16:04:48 -04006839 trace_create_file("saved_cmdlines", 0444, d_tracer,
6840 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006841
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006842 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6843 NULL, &tracing_saved_cmdlines_size_fops);
6844
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006845 trace_enum_init();
6846
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006847 trace_create_enum_file(d_tracer);
6848
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006849#ifdef CONFIG_MODULES
6850 register_module_notifier(&trace_module_nb);
6851#endif
6852
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006853#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006854 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6855 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006856#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006857
Steven Rostedt277ba042012-08-03 16:10:49 -04006858 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006859
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006860 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006861
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006862 /* If the tracer was started via cmdline, create options for it here */
6863 if (global_trace.current_trace != &nop_trace)
6864 update_tracer_options(&global_trace, global_trace.current_trace);
6865
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006866 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006867}
6868
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006869static int trace_panic_handler(struct notifier_block *this,
6870 unsigned long event, void *unused)
6871{
Steven Rostedt944ac422008-10-23 19:26:08 -04006872 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006873 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006874 return NOTIFY_OK;
6875}
6876
6877static struct notifier_block trace_panic_notifier = {
6878 .notifier_call = trace_panic_handler,
6879 .next = NULL,
6880 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6881};
6882
6883static int trace_die_handler(struct notifier_block *self,
6884 unsigned long val,
6885 void *data)
6886{
6887 switch (val) {
6888 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006889 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006890 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006891 break;
6892 default:
6893 break;
6894 }
6895 return NOTIFY_OK;
6896}
6897
6898static struct notifier_block trace_die_notifier = {
6899 .notifier_call = trace_die_handler,
6900 .priority = 200
6901};
6902
6903/*
6904 * printk is set to max of 1024, we really don't need it that big.
6905 * Nothing should be printing 1000 characters anyway.
6906 */
6907#define TRACE_MAX_PRINT 1000
6908
6909/*
6910 * Define here KERN_TRACE so that we have one place to modify
6911 * it if we decide to change what log level the ftrace dump
6912 * should be at.
6913 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006914#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006915
Jason Wessel955b61e2010-08-05 09:22:23 -05006916void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006917trace_printk_seq(struct trace_seq *s)
6918{
6919 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006920 if (s->seq.len >= TRACE_MAX_PRINT)
6921 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006922
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05006923 /*
6924 * More paranoid code. Although the buffer size is set to
6925 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6926 * an extra layer of protection.
6927 */
6928 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6929 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006930
6931 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006932 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006933
6934 printk(KERN_TRACE "%s", s->buffer);
6935
Steven Rostedtf9520752009-03-02 14:04:40 -05006936 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006937}
6938
Jason Wessel955b61e2010-08-05 09:22:23 -05006939void trace_init_global_iter(struct trace_iterator *iter)
6940{
6941 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006942 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006943 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006944 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006945
6946 if (iter->trace && iter->trace->open)
6947 iter->trace->open(iter);
6948
6949 /* Annotate start of buffers if we had overruns */
6950 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6951 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6952
6953 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6954 if (trace_clocks[iter->tr->clock_id].in_ns)
6955 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006956}
6957
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006958void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006959{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006960 /* use static because iter can be a bit big for the stack */
6961 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006962 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006963 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006964 unsigned long flags;
6965 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006966
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006967 /* Only allow one dump user at a time. */
6968 if (atomic_inc_return(&dump_running) != 1) {
6969 atomic_dec(&dump_running);
6970 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006971 }
6972
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006973 /*
6974 * Always turn off tracing when we dump.
6975 * We don't need to show trace output of what happens
6976 * between multiple crashes.
6977 *
6978 * If the user does a sysrq-z, then they can re-enable
6979 * tracing with echo 1 > tracing_on.
6980 */
6981 tracing_off();
6982
6983 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006984
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006985 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006986 trace_init_global_iter(&iter);
6987
Steven Rostedtd7690412008-10-01 00:29:53 -04006988 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05306989 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006990 }
6991
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006992 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6993
Török Edwinb54d3de2008-11-22 13:28:48 +02006994 /* don't look at user memory in panic mode */
6995 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6996
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006997 switch (oops_dump_mode) {
6998 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006999 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007000 break;
7001 case DUMP_ORIG:
7002 iter.cpu_file = raw_smp_processor_id();
7003 break;
7004 case DUMP_NONE:
7005 goto out_enable;
7006 default:
7007 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007008 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007009 }
7010
7011 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007012
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007013 /* Did function tracer already get disabled? */
7014 if (ftrace_is_dead()) {
7015 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7016 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7017 }
7018
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007019 /*
7020 * We need to stop all tracing on all CPUS to read the
7021 * the next buffer. This is a bit expensive, but is
7022 * not done often. We fill all what we can read,
7023 * and then release the locks again.
7024 */
7025
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007026 while (!trace_empty(&iter)) {
7027
7028 if (!cnt)
7029 printk(KERN_TRACE "---------------------------------\n");
7030
7031 cnt++;
7032
7033 /* reset all but tr, trace, and overruns */
7034 memset(&iter.seq, 0,
7035 sizeof(struct trace_iterator) -
7036 offsetof(struct trace_iterator, seq));
7037 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7038 iter.pos = -1;
7039
Jason Wessel955b61e2010-08-05 09:22:23 -05007040 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007041 int ret;
7042
7043 ret = print_trace_line(&iter);
7044 if (ret != TRACE_TYPE_NO_CONSUME)
7045 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007046 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007047 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007048
7049 trace_printk_seq(&iter.seq);
7050 }
7051
7052 if (!cnt)
7053 printk(KERN_TRACE " (ftrace buffer empty)\n");
7054 else
7055 printk(KERN_TRACE "---------------------------------\n");
7056
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007057 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007058 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007059
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007060 for_each_tracing_cpu(cpu) {
7061 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007062 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007063 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007064 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007065}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007066EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007067
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007068__init static int tracer_alloc_buffers(void)
7069{
Steven Rostedt73c51622009-03-11 13:42:01 -04007070 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307071 int ret = -ENOMEM;
7072
7073 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7074 goto out;
7075
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007076 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307077 goto out_free_buffer_mask;
7078
Steven Rostedt07d777f2011-09-22 14:01:55 -04007079 /* Only allocate trace_printk buffers if a trace_printk exists */
7080 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007081 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007082 trace_printk_init_buffers();
7083
Steven Rostedt73c51622009-03-11 13:42:01 -04007084 /* To save memory, keep the ring buffer size to its minimum */
7085 if (ring_buffer_expanded)
7086 ring_buf_size = trace_buf_size;
7087 else
7088 ring_buf_size = 1;
7089
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307090 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007091 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007092
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007093 raw_spin_lock_init(&global_trace.start_lock);
7094
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007095 /* Used for event triggers */
7096 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7097 if (!temp_buffer)
7098 goto out_free_cpumask;
7099
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007100 if (trace_create_savedcmd() < 0)
7101 goto out_free_temp_buffer;
7102
Steven Rostedtab464282008-05-12 21:21:00 +02007103 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007104 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007105 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7106 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007107 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007108 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007109
Steven Rostedt499e5472012-02-22 15:50:28 -05007110 if (global_trace.buffer_disabled)
7111 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007112
Steven Rostedte1e232c2014-02-10 23:38:46 -05007113 if (trace_boot_clock) {
7114 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7115 if (ret < 0)
7116 pr_warning("Trace clock %s not defined, going back to default\n",
7117 trace_boot_clock);
7118 }
7119
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007120 /*
7121 * register_tracer() might reference current_trace, so it
7122 * needs to be set before we register anything. This is
7123 * just a bootstrap of current_trace anyway.
7124 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007125 global_trace.current_trace = &nop_trace;
7126
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007127 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7128
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007129 ftrace_init_global_array_ops(&global_trace);
7130
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007131 register_tracer(&nop_trace);
7132
Steven Rostedt60a11772008-05-12 21:20:44 +02007133 /* All seems OK, enable tracing */
7134 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007135
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007136 atomic_notifier_chain_register(&panic_notifier_list,
7137 &trace_panic_notifier);
7138
7139 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007140
Steven Rostedtae63b312012-05-03 23:09:03 -04007141 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7142
7143 INIT_LIST_HEAD(&global_trace.systems);
7144 INIT_LIST_HEAD(&global_trace.events);
7145 list_add(&global_trace.list, &ftrace_trace_arrays);
7146
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007147 while (trace_boot_options) {
7148 char *option;
7149
7150 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007151 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007152 }
7153
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007154 register_snapshot_cmd();
7155
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007156 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007157
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007158out_free_savedcmd:
7159 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007160out_free_temp_buffer:
7161 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307162out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007163 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307164out_free_buffer_mask:
7165 free_cpumask_var(tracing_buffer_mask);
7166out:
7167 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007168}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007169
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007170void __init trace_init(void)
7171{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007172 if (tracepoint_printk) {
7173 tracepoint_print_iter =
7174 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7175 if (WARN_ON(!tracepoint_print_iter))
7176 tracepoint_printk = 0;
7177 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007178 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007179 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007180}
7181
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007182__init static int clear_boot_tracer(void)
7183{
7184 /*
7185 * The default tracer at boot buffer is an init section.
7186 * This function is called in lateinit. If we did not
7187 * find the boot tracer, then clear it out, to prevent
7188 * later registration from accessing the buffer that is
7189 * about to be freed.
7190 */
7191 if (!default_bootup_tracer)
7192 return 0;
7193
7194 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7195 default_bootup_tracer);
7196 default_bootup_tracer = NULL;
7197
7198 return 0;
7199}
7200
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007201fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007202late_initcall(clear_boot_tracer);