blob: 39e69568302e0d8095412e78f50f5346fca67dd3 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020023#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020024#include <linux/hardirq.h>
25#include <linux/linkage.h>
26#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050027#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020028#include <linux/ftrace.h>
29#include <linux/module.h>
30#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050031#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040032#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010033#include <linux/string.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080034#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020036#include <linux/ctype.h>
37#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020038#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050039#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020040#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060041#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020042
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020043#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050044#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010046/*
Steven Rostedt73c51622009-03-11 13:42:01 -040047 * On boot up, the ring buffer is set to the minimum size, so that
48 * we do not waste memory on systems that are not using tracing.
49 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050050bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040051
52/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010053 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010054 * A selftest will lurk into the ring-buffer to count the
55 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010056 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010057 * at the same time, giving false positive or negative results.
58 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010059static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010060
Steven Rostedtb2821ae2009-02-02 21:38:32 -050061/*
62 * If a tracer is running, we do not want to run SELFTEST.
63 */
Li Zefan020e5f82009-07-01 10:47:05 +080064bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050065
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050066/* Pipe tracepoints to printk */
67struct trace_iterator *tracepoint_print_iter;
68int tracepoint_printk;
69
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010070/* For tracers that don't implement custom flags */
71static struct tracer_opt dummy_tracer_opt[] = {
72 { }
73};
74
75static struct tracer_flags dummy_tracer_flags = {
76 .val = 0,
77 .opts = dummy_tracer_opt
78};
79
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050080static int
81dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010082{
83 return 0;
84}
Steven Rostedt0f048702008-11-05 16:05:44 -050085
86/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040087 * To prevent the comm cache from being overwritten when no
88 * tracing is active, only save the comm when a trace event
89 * occurred.
90 */
91static DEFINE_PER_CPU(bool, trace_cmdline_save);
92
93/*
Steven Rostedt0f048702008-11-05 16:05:44 -050094 * Kill all tracing for good (never come back).
95 * It is initialized to 1 but will turn to zero if the initialization
96 * of the tracer is successful. But that is the only place that sets
97 * this back to zero.
98 */
Hannes Eder4fd27352009-02-10 19:44:12 +010099static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500100
Christoph Lameter9288f992009-10-07 19:17:45 -0400101DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -0400102
Jason Wessel955b61e2010-08-05 09:22:23 -0500103cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200104
Steven Rostedt944ac422008-10-23 19:26:08 -0400105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400119 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200120
121enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400122
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400126#ifdef CONFIG_TRACE_ENUM_MAP_FILE
127/* Map of enums to their values, for "enum_map" file */
128struct trace_enum_map_head {
129 struct module *mod;
130 unsigned long length;
131};
132
133union trace_enum_map_item;
134
135struct trace_enum_map_tail {
136 /*
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "enum_string"
139 */
140 union trace_enum_map_item *next;
141 const char *end; /* points to NULL */
142};
143
144static DEFINE_MUTEX(trace_enum_mutex);
145
146/*
147 * The trace_enum_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved enum_map items.
152 */
153union trace_enum_map_item {
154 struct trace_enum_map map;
155 struct trace_enum_map_head head;
156 struct trace_enum_map_tail tail;
157};
158
159static union trace_enum_map_item *trace_enum_maps;
160#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
161
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500162static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163
Li Zefanee6c2c12009-09-18 14:06:47 +0800164#define MAX_TRACER_SIZE 100
165static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500166static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100167
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500168static bool allocate_snapshot;
169
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200170static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100171{
Chen Gang67012ab2013-04-08 12:06:44 +0800172 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500173 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400174 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500175 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176 return 1;
177}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200178__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100179
Steven Rostedt944ac422008-10-23 19:26:08 -0400180static int __init set_ftrace_dump_on_oops(char *str)
181{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200182 if (*str++ != '=' || !*str) {
183 ftrace_dump_on_oops = DUMP_ALL;
184 return 1;
185 }
186
187 if (!strcmp("orig_cpu", str)) {
188 ftrace_dump_on_oops = DUMP_ORIG;
189 return 1;
190 }
191
192 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400193}
194__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200195
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400196static int __init stop_trace_on_warning(char *str)
197{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200198 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
199 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200 return 1;
201}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200202__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400203
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400204static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500205{
206 allocate_snapshot = true;
207 /* We also need the main ring buffer expanded */
208 ring_buffer_expanded = true;
209 return 1;
210}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400211__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500212
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400213
214static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
215static char *trace_boot_options __initdata;
216
217static int __init set_trace_boot_options(char *str)
218{
Chen Gang67012ab2013-04-08 12:06:44 +0800219 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400220 trace_boot_options = trace_boot_options_buf;
221 return 0;
222}
223__setup("trace_options=", set_trace_boot_options);
224
Steven Rostedte1e232c2014-02-10 23:38:46 -0500225static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
226static char *trace_boot_clock __initdata;
227
228static int __init set_trace_boot_clock(char *str)
229{
230 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
231 trace_boot_clock = trace_boot_clock_buf;
232 return 0;
233}
234__setup("trace_clock=", set_trace_boot_clock);
235
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500236static int __init set_tracepoint_printk(char *str)
237{
238 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
239 tracepoint_printk = 1;
240 return 1;
241}
242__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400243
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800244unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200245{
246 nsec += 500;
247 do_div(nsec, 1000);
248 return nsec;
249}
250
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200251/*
252 * The global_trace is the descriptor that holds the tracing
253 * buffers for the live tracing. For each CPU, it contains
254 * a link list of pages that will store trace entries. The
255 * page descriptor of the pages in the memory is used to hold
256 * the link list by linking the lru item in the page descriptor
257 * to each of the pages in the buffer per CPU.
258 *
259 * For each active CPU there is a data field that holds the
260 * pages for the buffer for that CPU. Each CPU has the same number
261 * of pages allocated for its buffer.
262 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200263static struct trace_array global_trace;
264
Steven Rostedtae63b312012-05-03 23:09:03 -0400265LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200266
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400267int trace_array_get(struct trace_array *this_tr)
268{
269 struct trace_array *tr;
270 int ret = -ENODEV;
271
272 mutex_lock(&trace_types_lock);
273 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
274 if (tr == this_tr) {
275 tr->ref++;
276 ret = 0;
277 break;
278 }
279 }
280 mutex_unlock(&trace_types_lock);
281
282 return ret;
283}
284
285static void __trace_array_put(struct trace_array *this_tr)
286{
287 WARN_ON(!this_tr->ref);
288 this_tr->ref--;
289}
290
291void trace_array_put(struct trace_array *this_tr)
292{
293 mutex_lock(&trace_types_lock);
294 __trace_array_put(this_tr);
295 mutex_unlock(&trace_types_lock);
296}
297
Tom Zanussif306cc82013-10-24 08:34:17 -0500298int filter_check_discard(struct ftrace_event_file *file, void *rec,
299 struct ring_buffer *buffer,
300 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500301{
Tom Zanussif306cc82013-10-24 08:34:17 -0500302 if (unlikely(file->flags & FTRACE_EVENT_FL_FILTERED) &&
303 !filter_match_preds(file->filter, rec)) {
304 ring_buffer_discard_commit(buffer, event);
305 return 1;
306 }
307
308 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500309}
Tom Zanussif306cc82013-10-24 08:34:17 -0500310EXPORT_SYMBOL_GPL(filter_check_discard);
311
312int call_filter_check_discard(struct ftrace_event_call *call, void *rec,
313 struct ring_buffer *buffer,
314 struct ring_buffer_event *event)
315{
316 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
317 !filter_match_preds(call->filter, rec)) {
318 ring_buffer_discard_commit(buffer, event);
319 return 1;
320 }
321
322 return 0;
323}
324EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500325
Fabian Frederickad1438a2014-04-17 21:44:42 +0200326static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400327{
328 u64 ts;
329
330 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700331 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400332 return trace_clock_local();
333
Alexander Z Lam94571582013-08-02 18:36:16 -0700334 ts = ring_buffer_time_stamp(buf->buffer, cpu);
335 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400336
337 return ts;
338}
339
Alexander Z Lam94571582013-08-02 18:36:16 -0700340cycle_t ftrace_now(int cpu)
341{
342 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
343}
344
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400345/**
346 * tracing_is_enabled - Show if global_trace has been disabled
347 *
348 * Shows if the global trace has been enabled or not. It uses the
349 * mirror flag "buffer_disabled" to be used in fast paths such as for
350 * the irqsoff tracer. But it may be inaccurate due to races. If you
351 * need to know the accurate state, use tracing_is_on() which is a little
352 * slower, but accurate.
353 */
Steven Rostedt90369902008-11-05 16:05:44 -0500354int tracing_is_enabled(void)
355{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400356 /*
357 * For quick access (irqsoff uses this in fast path), just
358 * return the mirror variable of the state of the ring buffer.
359 * It's a little racy, but we don't really care.
360 */
361 smp_rmb();
362 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500363}
364
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200365/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400366 * trace_buf_size is the size in bytes that is allocated
367 * for a buffer. Note, the number of bytes is always rounded
368 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400369 *
370 * This number is purposely set to a low number of 16384.
371 * If the dump on oops happens, it will be much appreciated
372 * to not have to wait for all that output. Anyway this can be
373 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200374 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400375#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400376
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400377static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200378
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200379/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200380static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200381
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200382/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200383 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200384 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700385DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200386
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800387/*
388 * serialize the access of the ring buffer
389 *
390 * ring buffer serializes readers, but it is low level protection.
391 * The validity of the events (which returns by ring_buffer_peek() ..etc)
392 * are not protected by ring buffer.
393 *
394 * The content of events may become garbage if we allow other process consumes
395 * these events concurrently:
396 * A) the page of the consumed events may become a normal page
397 * (not reader page) in ring buffer, and this page will be rewrited
398 * by events producer.
399 * B) The page of the consumed events may become a page for splice_read,
400 * and this page will be returned to system.
401 *
402 * These primitives allow multi process access to different cpu ring buffer
403 * concurrently.
404 *
405 * These primitives don't distinguish read-only and read-consume access.
406 * Multi read-only access are also serialized.
407 */
408
409#ifdef CONFIG_SMP
410static DECLARE_RWSEM(all_cpu_access_lock);
411static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
412
413static inline void trace_access_lock(int cpu)
414{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500415 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800416 /* gain it for accessing the whole ring buffer. */
417 down_write(&all_cpu_access_lock);
418 } else {
419 /* gain it for accessing a cpu ring buffer. */
420
Steven Rostedtae3b5092013-01-23 15:22:59 -0500421 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800422 down_read(&all_cpu_access_lock);
423
424 /* Secondly block other access to this @cpu ring buffer. */
425 mutex_lock(&per_cpu(cpu_access_lock, cpu));
426 }
427}
428
429static inline void trace_access_unlock(int cpu)
430{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500431 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800432 up_write(&all_cpu_access_lock);
433 } else {
434 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
435 up_read(&all_cpu_access_lock);
436 }
437}
438
439static inline void trace_access_lock_init(void)
440{
441 int cpu;
442
443 for_each_possible_cpu(cpu)
444 mutex_init(&per_cpu(cpu_access_lock, cpu));
445}
446
447#else
448
449static DEFINE_MUTEX(access_lock);
450
451static inline void trace_access_lock(int cpu)
452{
453 (void)cpu;
454 mutex_lock(&access_lock);
455}
456
457static inline void trace_access_unlock(int cpu)
458{
459 (void)cpu;
460 mutex_unlock(&access_lock);
461}
462
463static inline void trace_access_lock_init(void)
464{
465}
466
467#endif
468
Steven Rostedtee6bce52008-11-12 17:52:37 -0500469/* trace_flags holds trace_options default values */
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500470unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
Steven Rostedta2a16d62009-03-24 23:17:58 -0400471 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
Steven Rostedt77271ce2011-11-17 09:34:33 -0500472 TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400473 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700474
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400475static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400476{
477 if (tr->trace_buffer.buffer)
478 ring_buffer_record_on(tr->trace_buffer.buffer);
479 /*
480 * This flag is looked at when buffers haven't been allocated
481 * yet, or by some tracers (like irqsoff), that just want to
482 * know if the ring buffer has been disabled, but it can handle
483 * races of where it gets disabled but we still do a record.
484 * As the check is in the fast path of the tracers, it is more
485 * important to be fast than accurate.
486 */
487 tr->buffer_disabled = 0;
488 /* Make the flag seen by readers */
489 smp_wmb();
490}
491
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200492/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500493 * tracing_on - enable tracing buffers
494 *
495 * This function enables tracing buffers that may have been
496 * disabled with tracing_off.
497 */
498void tracing_on(void)
499{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400500 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500501}
502EXPORT_SYMBOL_GPL(tracing_on);
503
504/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500505 * __trace_puts - write a constant string into the trace buffer.
506 * @ip: The address of the caller
507 * @str: The constant string to write
508 * @size: The size of the string.
509 */
510int __trace_puts(unsigned long ip, const char *str, int size)
511{
512 struct ring_buffer_event *event;
513 struct ring_buffer *buffer;
514 struct print_entry *entry;
515 unsigned long irq_flags;
516 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800517 int pc;
518
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800519 if (!(trace_flags & TRACE_ITER_PRINTK))
520 return 0;
521
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800522 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500523
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500524 if (unlikely(tracing_selftest_running || tracing_disabled))
525 return 0;
526
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500527 alloc = sizeof(*entry) + size + 2; /* possible \n added */
528
529 local_save_flags(irq_flags);
530 buffer = global_trace.trace_buffer.buffer;
531 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800532 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500533 if (!event)
534 return 0;
535
536 entry = ring_buffer_event_data(event);
537 entry->ip = ip;
538
539 memcpy(&entry->buf, str, size);
540
541 /* Add a newline if necessary */
542 if (entry->buf[size - 1] != '\n') {
543 entry->buf[size] = '\n';
544 entry->buf[size + 1] = '\0';
545 } else
546 entry->buf[size] = '\0';
547
548 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800549 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500550
551 return size;
552}
553EXPORT_SYMBOL_GPL(__trace_puts);
554
555/**
556 * __trace_bputs - write the pointer to a constant string into trace buffer
557 * @ip: The address of the caller
558 * @str: The constant string to write to the buffer to
559 */
560int __trace_bputs(unsigned long ip, const char *str)
561{
562 struct ring_buffer_event *event;
563 struct ring_buffer *buffer;
564 struct bputs_entry *entry;
565 unsigned long irq_flags;
566 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800567 int pc;
568
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800569 if (!(trace_flags & TRACE_ITER_PRINTK))
570 return 0;
571
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800572 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500573
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500574 if (unlikely(tracing_selftest_running || tracing_disabled))
575 return 0;
576
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500577 local_save_flags(irq_flags);
578 buffer = global_trace.trace_buffer.buffer;
579 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800580 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500581 if (!event)
582 return 0;
583
584 entry = ring_buffer_event_data(event);
585 entry->ip = ip;
586 entry->str = str;
587
588 __buffer_unlock_commit(buffer, event);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800589 ftrace_trace_stack(buffer, irq_flags, 4, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500590
591 return 1;
592}
593EXPORT_SYMBOL_GPL(__trace_bputs);
594
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500595#ifdef CONFIG_TRACER_SNAPSHOT
596/**
597 * trace_snapshot - take a snapshot of the current buffer.
598 *
599 * This causes a swap between the snapshot buffer and the current live
600 * tracing buffer. You can use this to take snapshots of the live
601 * trace when some condition is triggered, but continue to trace.
602 *
603 * Note, make sure to allocate the snapshot with either
604 * a tracing_snapshot_alloc(), or by doing it manually
605 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
606 *
607 * If the snapshot buffer is not allocated, it will stop tracing.
608 * Basically making a permanent snapshot.
609 */
610void tracing_snapshot(void)
611{
612 struct trace_array *tr = &global_trace;
613 struct tracer *tracer = tr->current_trace;
614 unsigned long flags;
615
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500616 if (in_nmi()) {
617 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
618 internal_trace_puts("*** snapshot is being ignored ***\n");
619 return;
620 }
621
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500622 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500623 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
624 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500625 tracing_off();
626 return;
627 }
628
629 /* Note, snapshot can not be used when the tracer uses it */
630 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500631 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
632 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500633 return;
634 }
635
636 local_irq_save(flags);
637 update_max_tr(tr, current, smp_processor_id());
638 local_irq_restore(flags);
639}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500640EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500641
642static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
643 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400644static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
645
646static int alloc_snapshot(struct trace_array *tr)
647{
648 int ret;
649
650 if (!tr->allocated_snapshot) {
651
652 /* allocate spare buffer */
653 ret = resize_buffer_duplicate_size(&tr->max_buffer,
654 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
655 if (ret < 0)
656 return ret;
657
658 tr->allocated_snapshot = true;
659 }
660
661 return 0;
662}
663
Fabian Frederickad1438a2014-04-17 21:44:42 +0200664static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400665{
666 /*
667 * We don't free the ring buffer. instead, resize it because
668 * The max_tr ring buffer has some state (e.g. ring->clock) and
669 * we want preserve it.
670 */
671 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
672 set_buffer_entries(&tr->max_buffer, 1);
673 tracing_reset_online_cpus(&tr->max_buffer);
674 tr->allocated_snapshot = false;
675}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500676
677/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500678 * tracing_alloc_snapshot - allocate snapshot buffer.
679 *
680 * This only allocates the snapshot buffer if it isn't already
681 * allocated - it doesn't also take a snapshot.
682 *
683 * This is meant to be used in cases where the snapshot buffer needs
684 * to be set up for events that can't sleep but need to be able to
685 * trigger a snapshot.
686 */
687int tracing_alloc_snapshot(void)
688{
689 struct trace_array *tr = &global_trace;
690 int ret;
691
692 ret = alloc_snapshot(tr);
693 WARN_ON(ret < 0);
694
695 return ret;
696}
697EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
698
699/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500700 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
701 *
702 * This is similar to trace_snapshot(), but it will allocate the
703 * snapshot buffer if it isn't already allocated. Use this only
704 * where it is safe to sleep, as the allocation may sleep.
705 *
706 * This causes a swap between the snapshot buffer and the current live
707 * tracing buffer. You can use this to take snapshots of the live
708 * trace when some condition is triggered, but continue to trace.
709 */
710void tracing_snapshot_alloc(void)
711{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500712 int ret;
713
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500714 ret = tracing_alloc_snapshot();
715 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400716 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500717
718 tracing_snapshot();
719}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500720EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500721#else
722void tracing_snapshot(void)
723{
724 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
725}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500726EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500727int tracing_alloc_snapshot(void)
728{
729 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
730 return -ENODEV;
731}
732EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500733void tracing_snapshot_alloc(void)
734{
735 /* Give warning */
736 tracing_snapshot();
737}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500738EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500739#endif /* CONFIG_TRACER_SNAPSHOT */
740
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400741static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400742{
743 if (tr->trace_buffer.buffer)
744 ring_buffer_record_off(tr->trace_buffer.buffer);
745 /*
746 * This flag is looked at when buffers haven't been allocated
747 * yet, or by some tracers (like irqsoff), that just want to
748 * know if the ring buffer has been disabled, but it can handle
749 * races of where it gets disabled but we still do a record.
750 * As the check is in the fast path of the tracers, it is more
751 * important to be fast than accurate.
752 */
753 tr->buffer_disabled = 1;
754 /* Make the flag seen by readers */
755 smp_wmb();
756}
757
Steven Rostedt499e5472012-02-22 15:50:28 -0500758/**
759 * tracing_off - turn off tracing buffers
760 *
761 * This function stops the tracing buffers from recording data.
762 * It does not disable any overhead the tracers themselves may
763 * be causing. This function simply causes all recording to
764 * the ring buffers to fail.
765 */
766void tracing_off(void)
767{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400768 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500769}
770EXPORT_SYMBOL_GPL(tracing_off);
771
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400772void disable_trace_on_warning(void)
773{
774 if (__disable_trace_on_warning)
775 tracing_off();
776}
777
Steven Rostedt499e5472012-02-22 15:50:28 -0500778/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400779 * tracer_tracing_is_on - show real state of ring buffer enabled
780 * @tr : the trace array to know if ring buffer is enabled
781 *
782 * Shows real state of the ring buffer if it is enabled or not.
783 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400784static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400785{
786 if (tr->trace_buffer.buffer)
787 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
788 return !tr->buffer_disabled;
789}
790
Steven Rostedt499e5472012-02-22 15:50:28 -0500791/**
792 * tracing_is_on - show state of ring buffers enabled
793 */
794int tracing_is_on(void)
795{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400796 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500797}
798EXPORT_SYMBOL_GPL(tracing_is_on);
799
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400800static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200801{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400802 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200803
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200804 if (!str)
805 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800806 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200807 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800808 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200809 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400810 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200811 return 1;
812}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400813__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200814
Tim Bird0e950172010-02-25 15:36:43 -0800815static int __init set_tracing_thresh(char *str)
816{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800817 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800818 int ret;
819
820 if (!str)
821 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200822 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800823 if (ret < 0)
824 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800825 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800826 return 1;
827}
828__setup("tracing_thresh=", set_tracing_thresh);
829
Steven Rostedt57f50be2008-05-12 21:20:44 +0200830unsigned long nsecs_to_usecs(unsigned long nsecs)
831{
832 return nsecs / 1000;
833}
834
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200835/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200836static const char *trace_options[] = {
837 "print-parent",
838 "sym-offset",
839 "sym-addr",
840 "verbose",
Ingo Molnarf9896bf2008-05-12 21:20:47 +0200841 "raw",
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +0200842 "hex",
Ingo Molnarcb0f12a2008-05-12 21:20:47 +0200843 "bin",
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +0200844 "block",
Ingo Molnar86387f72008-05-12 21:20:51 +0200845 "stacktrace",
Ingo Molnar5e1607a2009-03-05 10:24:48 +0100846 "trace_printk",
Steven Rostedtb2a866f2008-11-03 23:15:57 -0500847 "ftrace_preempt",
Steven Rostedt9f029e82008-11-12 15:24:24 -0500848 "branch",
Steven Rostedt12ef7d42008-11-12 17:52:38 -0500849 "annotate",
Török Edwin02b67512008-11-22 13:28:47 +0200850 "userstacktrace",
Török Edwinb54d3de2008-11-22 13:28:48 +0200851 "sym-userobj",
Frederic Weisbecker66896a82008-12-13 20:18:13 +0100852 "printk-msg-only",
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -0200853 "context-info",
Steven Rostedtc032ef642009-03-04 20:34:24 -0500854 "latency-format",
Steven Rostedtbe6f1642009-03-24 11:06:24 -0400855 "sleep-time",
Steven Rostedta2a16d62009-03-24 23:17:58 -0400856 "graph-time",
Li Zefane870e9a2010-07-02 11:07:32 +0800857 "record-cmd",
David Sharp750912f2010-12-08 13:46:47 -0800858 "overwrite",
Steven Rostedtcf30cf62011-06-14 22:44:07 -0400859 "disable_on_free",
Steven Rostedt77271ce2011-11-17 09:34:33 -0500860 "irq-info",
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -0700861 "markers",
Steven Rostedt (Red Hat)328df472013-03-14 12:10:40 -0400862 "function-trace",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200863 NULL
864};
865
Zhaolei5079f322009-08-25 16:12:56 +0800866static struct {
867 u64 (*func)(void);
868 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800869 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800870} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000871 { trace_clock_local, "local", 1 },
872 { trace_clock_global, "global", 1 },
873 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700874 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000875 { trace_clock, "perf", 1 },
876 { ktime_get_mono_fast_ns, "mono", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800877 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800878};
879
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200880/*
881 * trace_parser_get_init - gets the buffer for trace parser
882 */
883int trace_parser_get_init(struct trace_parser *parser, int size)
884{
885 memset(parser, 0, sizeof(*parser));
886
887 parser->buffer = kmalloc(size, GFP_KERNEL);
888 if (!parser->buffer)
889 return 1;
890
891 parser->size = size;
892 return 0;
893}
894
895/*
896 * trace_parser_put - frees the buffer for trace parser
897 */
898void trace_parser_put(struct trace_parser *parser)
899{
900 kfree(parser->buffer);
901}
902
903/*
904 * trace_get_user - reads the user input string separated by space
905 * (matched by isspace(ch))
906 *
907 * For each string found the 'struct trace_parser' is updated,
908 * and the function returns.
909 *
910 * Returns number of bytes read.
911 *
912 * See kernel/trace/trace.h for 'struct trace_parser' details.
913 */
914int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
915 size_t cnt, loff_t *ppos)
916{
917 char ch;
918 size_t read = 0;
919 ssize_t ret;
920
921 if (!*ppos)
922 trace_parser_clear(parser);
923
924 ret = get_user(ch, ubuf++);
925 if (ret)
926 goto out;
927
928 read++;
929 cnt--;
930
931 /*
932 * The parser is not finished with the last write,
933 * continue reading the user input without skipping spaces.
934 */
935 if (!parser->cont) {
936 /* skip white space */
937 while (cnt && isspace(ch)) {
938 ret = get_user(ch, ubuf++);
939 if (ret)
940 goto out;
941 read++;
942 cnt--;
943 }
944
945 /* only spaces were written */
946 if (isspace(ch)) {
947 *ppos += read;
948 ret = read;
949 goto out;
950 }
951
952 parser->idx = 0;
953 }
954
955 /* read the non-space input */
956 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800957 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200958 parser->buffer[parser->idx++] = ch;
959 else {
960 ret = -EINVAL;
961 goto out;
962 }
963 ret = get_user(ch, ubuf++);
964 if (ret)
965 goto out;
966 read++;
967 cnt--;
968 }
969
970 /* We either got finished input or we have to wait for another call. */
971 if (isspace(ch)) {
972 parser->buffer[parser->idx] = 0;
973 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400974 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200975 parser->cont = true;
976 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400977 } else {
978 ret = -EINVAL;
979 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200980 }
981
982 *ppos += read;
983 ret = read;
984
985out:
986 return ret;
987}
988
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -0400989/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +0200990static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200991{
992 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200993
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500994 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200995 return -EBUSY;
996
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -0500997 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +0200998 if (cnt > len)
999 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001000 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001001
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001002 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001003 return cnt;
1004}
1005
Tim Bird0e950172010-02-25 15:36:43 -08001006unsigned long __read_mostly tracing_thresh;
1007
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001008#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001009/*
1010 * Copy the new maximum trace into the separate maximum-trace
1011 * structure. (this way the maximum trace is permanently saved,
1012 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1013 */
1014static void
1015__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1016{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001017 struct trace_buffer *trace_buf = &tr->trace_buffer;
1018 struct trace_buffer *max_buf = &tr->max_buffer;
1019 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1020 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001021
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001022 max_buf->cpu = cpu;
1023 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001024
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001025 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001026 max_data->critical_start = data->critical_start;
1027 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001028
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001029 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001030 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001031 /*
1032 * If tsk == current, then use current_uid(), as that does not use
1033 * RCU. The irq tracer can be called out of RCU scope.
1034 */
1035 if (tsk == current)
1036 max_data->uid = current_uid();
1037 else
1038 max_data->uid = task_uid(tsk);
1039
Steven Rostedt8248ac02009-09-02 12:27:41 -04001040 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1041 max_data->policy = tsk->policy;
1042 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001043
1044 /* record this tasks comm */
1045 tracing_record_cmdline(tsk);
1046}
1047
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001048/**
1049 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1050 * @tr: tracer
1051 * @tsk: the task with the latency
1052 * @cpu: The cpu that initiated the trace.
1053 *
1054 * Flip the buffers between the @tr and the max_tr and record information
1055 * about which task was the cause of this latency.
1056 */
Ingo Molnare309b412008-05-12 21:20:51 +02001057void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001058update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1059{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001060 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001061
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001062 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001063 return;
1064
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001065 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001066
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001067 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001068 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001069 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001070 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001071 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001072
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001073 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001074
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001075 buf = tr->trace_buffer.buffer;
1076 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1077 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001078
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001079 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001080 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001081}
1082
1083/**
1084 * update_max_tr_single - only copy one trace over, and reset the rest
1085 * @tr - tracer
1086 * @tsk - task with the latency
1087 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001088 *
1089 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001090 */
Ingo Molnare309b412008-05-12 21:20:51 +02001091void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001092update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1093{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001094 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001095
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001096 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001097 return;
1098
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001099 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001100 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001101 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001102 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001103 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001104 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001105
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001106 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001107
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001108 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001109
Steven Rostedte8165db2009-09-03 19:13:05 -04001110 if (ret == -EBUSY) {
1111 /*
1112 * We failed to swap the buffer due to a commit taking
1113 * place on this CPU. We fail to record, but we reset
1114 * the max trace buffer (no one writes directly to it)
1115 * and flag that it failed.
1116 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001117 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001118 "Failed to swap buffers due to commit in progress\n");
1119 }
1120
Steven Rostedte8165db2009-09-03 19:13:05 -04001121 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001122
1123 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001124 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001125}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001126#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001127
Rabin Vincente30f53a2014-11-10 19:46:34 +01001128static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001129{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001130 /* Iterators are static, they should be filled or empty */
1131 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001132 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001133
Rabin Vincente30f53a2014-11-10 19:46:34 +01001134 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1135 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001136}
1137
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001138#ifdef CONFIG_FTRACE_STARTUP_TEST
1139static int run_tracer_selftest(struct tracer *type)
1140{
1141 struct trace_array *tr = &global_trace;
1142 struct tracer *saved_tracer = tr->current_trace;
1143 int ret;
1144
1145 if (!type->selftest || tracing_selftest_disabled)
1146 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001147
1148 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001149 * Run a selftest on this tracer.
1150 * Here we reset the trace buffer, and set the current
1151 * tracer to be this tracer. The tracer can then run some
1152 * internal tracing to verify that everything is in order.
1153 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001154 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001155 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001156
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001157 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001158
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001159#ifdef CONFIG_TRACER_MAX_TRACE
1160 if (type->use_max_tr) {
1161 /* If we expanded the buffers, make sure the max is expanded too */
1162 if (ring_buffer_expanded)
1163 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1164 RING_BUFFER_ALL_CPUS);
1165 tr->allocated_snapshot = true;
1166 }
1167#endif
1168
1169 /* the test is responsible for initializing and enabling */
1170 pr_info("Testing tracer %s: ", type->name);
1171 ret = type->selftest(type, tr);
1172 /* the test is responsible for resetting too */
1173 tr->current_trace = saved_tracer;
1174 if (ret) {
1175 printk(KERN_CONT "FAILED!\n");
1176 /* Add the warning after printing 'FAILED' */
1177 WARN_ON(1);
1178 return -1;
1179 }
1180 /* Only reset on passing, to avoid touching corrupted buffers */
1181 tracing_reset_online_cpus(&tr->trace_buffer);
1182
1183#ifdef CONFIG_TRACER_MAX_TRACE
1184 if (type->use_max_tr) {
1185 tr->allocated_snapshot = false;
1186
1187 /* Shrink the max buffer again */
1188 if (ring_buffer_expanded)
1189 ring_buffer_resize(tr->max_buffer.buffer, 1,
1190 RING_BUFFER_ALL_CPUS);
1191 }
1192#endif
1193
1194 printk(KERN_CONT "PASSED\n");
1195 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001196}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001197#else
1198static inline int run_tracer_selftest(struct tracer *type)
1199{
1200 return 0;
1201}
1202#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001203
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001204/**
1205 * register_tracer - register a tracer with the ftrace system.
1206 * @type - the plugin for the tracer
1207 *
1208 * Register a new plugin tracer.
1209 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001210int register_tracer(struct tracer *type)
1211{
1212 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001213 int ret = 0;
1214
1215 if (!type->name) {
1216 pr_info("Tracer must have a name\n");
1217 return -1;
1218 }
1219
Dan Carpenter24a461d2010-07-10 12:06:44 +02001220 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001221 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1222 return -1;
1223 }
1224
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001225 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001226
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001227 tracing_selftest_running = true;
1228
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001229 for (t = trace_types; t; t = t->next) {
1230 if (strcmp(type->name, t->name) == 0) {
1231 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001232 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001233 type->name);
1234 ret = -1;
1235 goto out;
1236 }
1237 }
1238
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001239 if (!type->set_flag)
1240 type->set_flag = &dummy_set_flag;
1241 if (!type->flags)
1242 type->flags = &dummy_tracer_flags;
1243 else
1244 if (!type->flags->opts)
1245 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001246
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001247 ret = run_tracer_selftest(type);
1248 if (ret < 0)
1249 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001250
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001251 type->next = trace_types;
1252 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001253
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001254 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001255 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001256 mutex_unlock(&trace_types_lock);
1257
Steven Rostedtdac74942009-02-05 01:13:38 -05001258 if (ret || !default_bootup_tracer)
1259 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001260
Li Zefanee6c2c12009-09-18 14:06:47 +08001261 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001262 goto out_unlock;
1263
1264 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1265 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001266 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001267 default_bootup_tracer = NULL;
1268 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001269 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001270#ifdef CONFIG_FTRACE_STARTUP_TEST
1271 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1272 type->name);
1273#endif
1274
1275 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001276 return ret;
1277}
1278
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001279void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001280{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001281 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001282
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001283 if (!buffer)
1284 return;
1285
Steven Rostedtf6339032009-09-04 12:35:16 -04001286 ring_buffer_record_disable(buffer);
1287
1288 /* Make sure all commits have finished */
1289 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001290 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001291
1292 ring_buffer_record_enable(buffer);
1293}
1294
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001295void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001296{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001297 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001298 int cpu;
1299
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001300 if (!buffer)
1301 return;
1302
Steven Rostedt621968c2009-09-04 12:02:35 -04001303 ring_buffer_record_disable(buffer);
1304
1305 /* Make sure all commits have finished */
1306 synchronize_sched();
1307
Alexander Z Lam94571582013-08-02 18:36:16 -07001308 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001309
1310 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001311 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001312
1313 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001314}
1315
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001316/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001317void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001318{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001319 struct trace_array *tr;
1320
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001321 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001322 tracing_reset_online_cpus(&tr->trace_buffer);
1323#ifdef CONFIG_TRACER_MAX_TRACE
1324 tracing_reset_online_cpus(&tr->max_buffer);
1325#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001326 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001327}
1328
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001329#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001330#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001331static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001332struct saved_cmdlines_buffer {
1333 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1334 unsigned *map_cmdline_to_pid;
1335 unsigned cmdline_num;
1336 int cmdline_idx;
1337 char *saved_cmdlines;
1338};
1339static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001340
Steven Rostedt25b0b442008-05-12 21:21:00 +02001341/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001342static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001343
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001344static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001345{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001346 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1347}
1348
1349static inline void set_cmdline(int idx, const char *cmdline)
1350{
1351 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1352}
1353
1354static int allocate_cmdlines_buffer(unsigned int val,
1355 struct saved_cmdlines_buffer *s)
1356{
1357 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1358 GFP_KERNEL);
1359 if (!s->map_cmdline_to_pid)
1360 return -ENOMEM;
1361
1362 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1363 if (!s->saved_cmdlines) {
1364 kfree(s->map_cmdline_to_pid);
1365 return -ENOMEM;
1366 }
1367
1368 s->cmdline_idx = 0;
1369 s->cmdline_num = val;
1370 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1371 sizeof(s->map_pid_to_cmdline));
1372 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1373 val * sizeof(*s->map_cmdline_to_pid));
1374
1375 return 0;
1376}
1377
1378static int trace_create_savedcmd(void)
1379{
1380 int ret;
1381
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001382 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001383 if (!savedcmd)
1384 return -ENOMEM;
1385
1386 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1387 if (ret < 0) {
1388 kfree(savedcmd);
1389 savedcmd = NULL;
1390 return -ENOMEM;
1391 }
1392
1393 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001394}
1395
Carsten Emdeb5130b12009-09-13 01:43:07 +02001396int is_tracing_stopped(void)
1397{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001398 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001399}
1400
Steven Rostedt0f048702008-11-05 16:05:44 -05001401/**
1402 * tracing_start - quick start of the tracer
1403 *
1404 * If tracing is enabled but was stopped by tracing_stop,
1405 * this will start the tracer back up.
1406 */
1407void tracing_start(void)
1408{
1409 struct ring_buffer *buffer;
1410 unsigned long flags;
1411
1412 if (tracing_disabled)
1413 return;
1414
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001415 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1416 if (--global_trace.stop_count) {
1417 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001418 /* Someone screwed up their debugging */
1419 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001420 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001421 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001422 goto out;
1423 }
1424
Steven Rostedta2f80712010-03-12 19:56:00 -05001425 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001426 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001427
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001428 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001429 if (buffer)
1430 ring_buffer_record_enable(buffer);
1431
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001432#ifdef CONFIG_TRACER_MAX_TRACE
1433 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001434 if (buffer)
1435 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001436#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001437
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001438 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001439
Steven Rostedt0f048702008-11-05 16:05:44 -05001440 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001441 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1442}
1443
1444static void tracing_start_tr(struct trace_array *tr)
1445{
1446 struct ring_buffer *buffer;
1447 unsigned long flags;
1448
1449 if (tracing_disabled)
1450 return;
1451
1452 /* If global, we need to also start the max tracer */
1453 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1454 return tracing_start();
1455
1456 raw_spin_lock_irqsave(&tr->start_lock, flags);
1457
1458 if (--tr->stop_count) {
1459 if (tr->stop_count < 0) {
1460 /* Someone screwed up their debugging */
1461 WARN_ON_ONCE(1);
1462 tr->stop_count = 0;
1463 }
1464 goto out;
1465 }
1466
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001467 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001468 if (buffer)
1469 ring_buffer_record_enable(buffer);
1470
1471 out:
1472 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001473}
1474
1475/**
1476 * tracing_stop - quick stop of the tracer
1477 *
1478 * Light weight way to stop tracing. Use in conjunction with
1479 * tracing_start.
1480 */
1481void tracing_stop(void)
1482{
1483 struct ring_buffer *buffer;
1484 unsigned long flags;
1485
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001486 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1487 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001488 goto out;
1489
Steven Rostedta2f80712010-03-12 19:56:00 -05001490 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001491 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001492
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001493 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001494 if (buffer)
1495 ring_buffer_record_disable(buffer);
1496
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001497#ifdef CONFIG_TRACER_MAX_TRACE
1498 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001499 if (buffer)
1500 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001501#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001502
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001503 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001504
Steven Rostedt0f048702008-11-05 16:05:44 -05001505 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001506 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1507}
1508
1509static void tracing_stop_tr(struct trace_array *tr)
1510{
1511 struct ring_buffer *buffer;
1512 unsigned long flags;
1513
1514 /* If global, we need to also stop the max tracer */
1515 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1516 return tracing_stop();
1517
1518 raw_spin_lock_irqsave(&tr->start_lock, flags);
1519 if (tr->stop_count++)
1520 goto out;
1521
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001522 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001523 if (buffer)
1524 ring_buffer_record_disable(buffer);
1525
1526 out:
1527 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001528}
1529
Ingo Molnare309b412008-05-12 21:20:51 +02001530void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001531
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001532static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001533{
Carsten Emdea635cf02009-03-18 09:00:41 +01001534 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001535
1536 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001537 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001538
1539 /*
1540 * It's not the end of the world if we don't get
1541 * the lock, but we also don't want to spin
1542 * nor do we want to disable interrupts,
1543 * so if we miss here, then better luck next time.
1544 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001545 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001546 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001547
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001548 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001549 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001550 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001551
Carsten Emdea635cf02009-03-18 09:00:41 +01001552 /*
1553 * Check whether the cmdline buffer at idx has a pid
1554 * mapped. We are going to overwrite that entry so we
1555 * need to clear the map_pid_to_cmdline. Otherwise we
1556 * would read the new comm for the old pid.
1557 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001558 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001559 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001560 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001561
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001562 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1563 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001564
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001565 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001566 }
1567
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001568 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001569
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001570 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001571
1572 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001573}
1574
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001575static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001577 unsigned map;
1578
Steven Rostedt4ca53082009-03-16 19:20:15 -04001579 if (!pid) {
1580 strcpy(comm, "<idle>");
1581 return;
1582 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001583
Steven Rostedt74bf4072010-01-25 15:11:53 -05001584 if (WARN_ON_ONCE(pid < 0)) {
1585 strcpy(comm, "<XXX>");
1586 return;
1587 }
1588
Steven Rostedt4ca53082009-03-16 19:20:15 -04001589 if (pid > PID_MAX_DEFAULT) {
1590 strcpy(comm, "<...>");
1591 return;
1592 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001593
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001594 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001595 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001596 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001597 else
1598 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001599}
1600
1601void trace_find_cmdline(int pid, char comm[])
1602{
1603 preempt_disable();
1604 arch_spin_lock(&trace_cmdline_lock);
1605
1606 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001607
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001608 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001609 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001610}
1611
Ingo Molnare309b412008-05-12 21:20:51 +02001612void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001613{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001614 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001615 return;
1616
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001617 if (!__this_cpu_read(trace_cmdline_save))
1618 return;
1619
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001620 if (trace_save_cmdline(tsk))
1621 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001622}
1623
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001624void
Steven Rostedt38697052008-10-01 13:14:09 -04001625tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1626 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001627{
1628 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001629
Steven Rostedt777e2082008-09-29 23:02:42 -04001630 entry->preempt_count = pc & 0xff;
1631 entry->pid = (tsk) ? tsk->pid : 0;
1632 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001633#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001634 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001635#else
1636 TRACE_FLAG_IRQS_NOSUPPORT |
1637#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001638 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1639 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001640 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1641 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001642}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001643EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001644
Steven Rostedte77405a2009-09-02 14:17:06 -04001645struct ring_buffer_event *
1646trace_buffer_lock_reserve(struct ring_buffer *buffer,
1647 int type,
1648 unsigned long len,
1649 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001650{
1651 struct ring_buffer_event *event;
1652
Steven Rostedte77405a2009-09-02 14:17:06 -04001653 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001654 if (event != NULL) {
1655 struct trace_entry *ent = ring_buffer_event_data(event);
1656
1657 tracing_generic_entry_update(ent, flags, pc);
1658 ent->type = type;
1659 }
1660
1661 return event;
1662}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001663
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001664void
1665__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1666{
1667 __this_cpu_write(trace_cmdline_save, true);
1668 ring_buffer_unlock_commit(buffer, event);
1669}
1670
Steven Rostedte77405a2009-09-02 14:17:06 -04001671static inline void
1672__trace_buffer_unlock_commit(struct ring_buffer *buffer,
1673 struct ring_buffer_event *event,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001674 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001675{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001676 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001677
Steven Rostedte77405a2009-09-02 14:17:06 -04001678 ftrace_trace_stack(buffer, flags, 6, pc);
1679 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001680}
1681
Steven Rostedte77405a2009-09-02 14:17:06 -04001682void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1683 struct ring_buffer_event *event,
1684 unsigned long flags, int pc)
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001685{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001686 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001687}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001688EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001689
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001690static struct ring_buffer *temp_buffer;
1691
Steven Rostedtef5580d2009-02-27 19:38:04 -05001692struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001693trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
1694 struct ftrace_event_file *ftrace_file,
1695 int type, unsigned long len,
1696 unsigned long flags, int pc)
1697{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001698 struct ring_buffer_event *entry;
1699
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001700 *current_rb = ftrace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001701 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001702 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001703 /*
1704 * If tracing is off, but we have triggers enabled
1705 * we still need to look at the event data. Use the temp_buffer
1706 * to store the trace event for the tigger to use. It's recusive
1707 * safe and will not be recorded anywhere.
1708 */
1709 if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
1710 *current_rb = temp_buffer;
1711 entry = trace_buffer_lock_reserve(*current_rb,
1712 type, len, flags, pc);
1713 }
1714 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001715}
1716EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1717
1718struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001719trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1720 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001721 unsigned long flags, int pc)
1722{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001723 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001724 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001725 type, len, flags, pc);
1726}
Steven Rostedt94487d62009-05-05 19:22:53 -04001727EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001728
Steven Rostedte77405a2009-09-02 14:17:06 -04001729void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1730 struct ring_buffer_event *event,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001731 unsigned long flags, int pc)
1732{
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001733 __trace_buffer_unlock_commit(buffer, event, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001734}
Steven Rostedt94487d62009-05-05 19:22:53 -04001735EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001736
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001737void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1738 struct ring_buffer_event *event,
1739 unsigned long flags, int pc,
1740 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001741{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001742 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001743
1744 ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1745 ftrace_trace_userstack(buffer, flags, pc);
1746}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001747EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001748
Steven Rostedte77405a2009-09-02 14:17:06 -04001749void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1750 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001751{
Steven Rostedte77405a2009-09-02 14:17:06 -04001752 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001753}
Steven Rostedt12acd472009-04-17 16:01:56 -04001754EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001755
Ingo Molnare309b412008-05-12 21:20:51 +02001756void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001757trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001758 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1759 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001760{
Tom Zanussie1112b42009-03-31 00:48:49 -05001761 struct ftrace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001762 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001763 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001764 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001765
Steven Rostedtd7690412008-10-01 00:29:53 -04001766 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001767 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001768 return;
1769
Steven Rostedte77405a2009-09-02 14:17:06 -04001770 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001771 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001772 if (!event)
1773 return;
1774 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001775 entry->ip = ip;
1776 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001777
Tom Zanussif306cc82013-10-24 08:34:17 -05001778 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001779 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001780}
1781
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001782#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001783
1784#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1785struct ftrace_stack {
1786 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1787};
1788
1789static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1790static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1791
Steven Rostedte77405a2009-09-02 14:17:06 -04001792static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001793 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001794 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001795{
Tom Zanussie1112b42009-03-31 00:48:49 -05001796 struct ftrace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001797 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001798 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001799 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001800 int use_stack;
1801 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001802
1803 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001804 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001805
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001806 /*
1807 * Since events can happen in NMIs there's no safe way to
1808 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1809 * or NMI comes in, it will just have to use the default
1810 * FTRACE_STACK_SIZE.
1811 */
1812 preempt_disable_notrace();
1813
Shan Wei82146522012-11-19 13:21:01 +08001814 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001815 /*
1816 * We don't need any atomic variables, just a barrier.
1817 * If an interrupt comes in, we don't care, because it would
1818 * have exited and put the counter back to what we want.
1819 * We just need a barrier to keep gcc from moving things
1820 * around.
1821 */
1822 barrier();
1823 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001824 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001825 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1826
1827 if (regs)
1828 save_stack_trace_regs(regs, &trace);
1829 else
1830 save_stack_trace(&trace);
1831
1832 if (trace.nr_entries > size)
1833 size = trace.nr_entries;
1834 } else
1835 /* From now on, use_stack is a boolean */
1836 use_stack = 0;
1837
1838 size *= sizeof(unsigned long);
1839
1840 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1841 sizeof(*entry) + size, flags, pc);
1842 if (!event)
1843 goto out;
1844 entry = ring_buffer_event_data(event);
1845
1846 memset(&entry->caller, 0, size);
1847
1848 if (use_stack)
1849 memcpy(&entry->caller, trace.entries,
1850 trace.nr_entries * sizeof(unsigned long));
1851 else {
1852 trace.max_entries = FTRACE_STACK_ENTRIES;
1853 trace.entries = entry->caller;
1854 if (regs)
1855 save_stack_trace_regs(regs, &trace);
1856 else
1857 save_stack_trace(&trace);
1858 }
1859
1860 entry->size = trace.nr_entries;
1861
Tom Zanussif306cc82013-10-24 08:34:17 -05001862 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001863 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001864
1865 out:
1866 /* Again, don't let gcc optimize things here */
1867 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001868 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001869 preempt_enable_notrace();
1870
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001871}
1872
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001873void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1874 int skip, int pc, struct pt_regs *regs)
1875{
1876 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1877 return;
1878
1879 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1880}
1881
Steven Rostedte77405a2009-09-02 14:17:06 -04001882void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1883 int skip, int pc)
Steven Rostedt53614992009-01-15 19:12:40 -05001884{
1885 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1886 return;
1887
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001888 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
Steven Rostedt53614992009-01-15 19:12:40 -05001889}
1890
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001891void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1892 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001893{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001894 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001895}
1896
Steven Rostedt03889382009-12-11 09:48:22 -05001897/**
1898 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001899 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001900 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001901void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001902{
1903 unsigned long flags;
1904
1905 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001906 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001907
1908 local_save_flags(flags);
1909
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001910 /*
1911 * Skip 3 more, seems to get us at the caller of
1912 * this function.
1913 */
1914 skip += 3;
1915 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1916 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001917}
1918
Steven Rostedt91e86e52010-11-10 12:56:12 +01001919static DEFINE_PER_CPU(int, user_stack_count);
1920
Steven Rostedte77405a2009-09-02 14:17:06 -04001921void
1922ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001923{
Tom Zanussie1112b42009-03-31 00:48:49 -05001924 struct ftrace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001925 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001926 struct userstack_entry *entry;
1927 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001928
1929 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1930 return;
1931
Steven Rostedtb6345872010-03-12 20:03:30 -05001932 /*
1933 * NMIs can not handle page faults, even with fix ups.
1934 * The save user stack can (and often does) fault.
1935 */
1936 if (unlikely(in_nmi()))
1937 return;
1938
Steven Rostedt91e86e52010-11-10 12:56:12 +01001939 /*
1940 * prevent recursion, since the user stack tracing may
1941 * trigger other kernel events.
1942 */
1943 preempt_disable();
1944 if (__this_cpu_read(user_stack_count))
1945 goto out;
1946
1947 __this_cpu_inc(user_stack_count);
1948
Steven Rostedte77405a2009-09-02 14:17:06 -04001949 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001950 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001951 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001952 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001953 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001954
Steven Rostedt48659d32009-09-11 11:36:23 -04001955 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001956 memset(&entry->caller, 0, sizeof(entry->caller));
1957
1958 trace.nr_entries = 0;
1959 trace.max_entries = FTRACE_STACK_ENTRIES;
1960 trace.skip = 0;
1961 trace.entries = entry->caller;
1962
1963 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001964 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001965 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001966
Li Zefan1dbd1952010-12-09 15:47:56 +08001967 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001968 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001969 out:
1970 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001971}
1972
Hannes Eder4fd27352009-02-10 19:44:12 +01001973#ifdef UNUSED
1974static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001975{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001976 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001977}
Hannes Eder4fd27352009-02-10 19:44:12 +01001978#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001979
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001980#endif /* CONFIG_STACKTRACE */
1981
Steven Rostedt07d777f2011-09-22 14:01:55 -04001982/* created for use with alloc_percpu */
1983struct trace_buffer_struct {
1984 char buffer[TRACE_BUF_SIZE];
1985};
1986
1987static struct trace_buffer_struct *trace_percpu_buffer;
1988static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1989static struct trace_buffer_struct *trace_percpu_irq_buffer;
1990static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1991
1992/*
1993 * The buffer used is dependent on the context. There is a per cpu
1994 * buffer for normal context, softirq contex, hard irq context and
1995 * for NMI context. Thise allows for lockless recording.
1996 *
1997 * Note, if the buffers failed to be allocated, then this returns NULL
1998 */
1999static char *get_trace_buf(void)
2000{
2001 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002002
2003 /*
2004 * If we have allocated per cpu buffers, then we do not
2005 * need to do any locking.
2006 */
2007 if (in_nmi())
2008 percpu_buffer = trace_percpu_nmi_buffer;
2009 else if (in_irq())
2010 percpu_buffer = trace_percpu_irq_buffer;
2011 else if (in_softirq())
2012 percpu_buffer = trace_percpu_sirq_buffer;
2013 else
2014 percpu_buffer = trace_percpu_buffer;
2015
2016 if (!percpu_buffer)
2017 return NULL;
2018
Shan Weid8a03492012-11-13 09:53:04 +08002019 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002020}
2021
2022static int alloc_percpu_trace_buffer(void)
2023{
2024 struct trace_buffer_struct *buffers;
2025 struct trace_buffer_struct *sirq_buffers;
2026 struct trace_buffer_struct *irq_buffers;
2027 struct trace_buffer_struct *nmi_buffers;
2028
2029 buffers = alloc_percpu(struct trace_buffer_struct);
2030 if (!buffers)
2031 goto err_warn;
2032
2033 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2034 if (!sirq_buffers)
2035 goto err_sirq;
2036
2037 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2038 if (!irq_buffers)
2039 goto err_irq;
2040
2041 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2042 if (!nmi_buffers)
2043 goto err_nmi;
2044
2045 trace_percpu_buffer = buffers;
2046 trace_percpu_sirq_buffer = sirq_buffers;
2047 trace_percpu_irq_buffer = irq_buffers;
2048 trace_percpu_nmi_buffer = nmi_buffers;
2049
2050 return 0;
2051
2052 err_nmi:
2053 free_percpu(irq_buffers);
2054 err_irq:
2055 free_percpu(sirq_buffers);
2056 err_sirq:
2057 free_percpu(buffers);
2058 err_warn:
2059 WARN(1, "Could not allocate percpu trace_printk buffer");
2060 return -ENOMEM;
2061}
2062
Steven Rostedt81698832012-10-11 10:15:05 -04002063static int buffers_allocated;
2064
Steven Rostedt07d777f2011-09-22 14:01:55 -04002065void trace_printk_init_buffers(void)
2066{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002067 if (buffers_allocated)
2068 return;
2069
2070 if (alloc_percpu_trace_buffer())
2071 return;
2072
Steven Rostedt2184db42014-05-28 13:14:40 -04002073 /* trace_printk() is for debug use only. Don't use it in production. */
2074
Borislav Petkov69a1c992015-01-27 17:17:20 +01002075 pr_warning("\n");
2076 pr_warning("**********************************************************\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002077 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2078 pr_warning("** **\n");
2079 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2080 pr_warning("** **\n");
2081 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
Frans Klavereff264e2014-11-07 15:53:44 +01002082 pr_warning("** unsafe for production use. **\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002083 pr_warning("** **\n");
2084 pr_warning("** If you see this message and you are not debugging **\n");
2085 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2086 pr_warning("** **\n");
2087 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2088 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002089
Steven Rostedtb382ede62012-10-10 21:44:34 -04002090 /* Expand the buffers to set size */
2091 tracing_update_buffers();
2092
Steven Rostedt07d777f2011-09-22 14:01:55 -04002093 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002094
2095 /*
2096 * trace_printk_init_buffers() can be called by modules.
2097 * If that happens, then we need to start cmdline recording
2098 * directly here. If the global_trace.buffer is already
2099 * allocated here, then this was called by module code.
2100 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002101 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002102 tracing_start_cmdline_record();
2103}
2104
2105void trace_printk_start_comm(void)
2106{
2107 /* Start tracing comms if trace printk is set */
2108 if (!buffers_allocated)
2109 return;
2110 tracing_start_cmdline_record();
2111}
2112
2113static void trace_printk_start_stop_comm(int enabled)
2114{
2115 if (!buffers_allocated)
2116 return;
2117
2118 if (enabled)
2119 tracing_start_cmdline_record();
2120 else
2121 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002122}
2123
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002124/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002125 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002126 *
2127 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002128int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002129{
Tom Zanussie1112b42009-03-31 00:48:49 -05002130 struct ftrace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002131 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002132 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002133 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002134 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002135 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002136 char *tbuffer;
2137 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002138
2139 if (unlikely(tracing_selftest_running || tracing_disabled))
2140 return 0;
2141
2142 /* Don't pollute graph traces with trace_vprintk internals */
2143 pause_graph_tracing();
2144
2145 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002146 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002147
Steven Rostedt07d777f2011-09-22 14:01:55 -04002148 tbuffer = get_trace_buf();
2149 if (!tbuffer) {
2150 len = 0;
2151 goto out;
2152 }
2153
2154 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2155
2156 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002157 goto out;
2158
Steven Rostedt07d777f2011-09-22 14:01:55 -04002159 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002160 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002161 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002162 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2163 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002164 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002165 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002166 entry = ring_buffer_event_data(event);
2167 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002168 entry->fmt = fmt;
2169
Steven Rostedt07d777f2011-09-22 14:01:55 -04002170 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002171 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002172 __buffer_unlock_commit(buffer, event);
Steven Rostedtd9313692010-01-06 17:27:11 -05002173 ftrace_trace_stack(buffer, flags, 6, pc);
2174 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002175
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002176out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002177 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002178 unpause_graph_tracing();
2179
2180 return len;
2181}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002182EXPORT_SYMBOL_GPL(trace_vbprintk);
2183
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002184static int
2185__trace_array_vprintk(struct ring_buffer *buffer,
2186 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002187{
Tom Zanussie1112b42009-03-31 00:48:49 -05002188 struct ftrace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002189 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002190 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002191 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002192 unsigned long flags;
2193 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002194
2195 if (tracing_disabled || tracing_selftest_running)
2196 return 0;
2197
Steven Rostedt07d777f2011-09-22 14:01:55 -04002198 /* Don't pollute graph traces with trace_vprintk internals */
2199 pause_graph_tracing();
2200
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002201 pc = preempt_count();
2202 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002203
Steven Rostedt07d777f2011-09-22 14:01:55 -04002204
2205 tbuffer = get_trace_buf();
2206 if (!tbuffer) {
2207 len = 0;
2208 goto out;
2209 }
2210
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002211 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002212
Steven Rostedt07d777f2011-09-22 14:01:55 -04002213 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002214 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002215 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002216 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002217 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002218 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002219 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002220 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002221
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002222 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002223 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002224 __buffer_unlock_commit(buffer, event);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002225 ftrace_trace_stack(buffer, flags, 6, pc);
Steven Rostedtd9313692010-01-06 17:27:11 -05002226 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002227 out:
2228 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002229 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002230
2231 return len;
2232}
Steven Rostedt659372d2009-09-03 19:11:07 -04002233
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002234int trace_array_vprintk(struct trace_array *tr,
2235 unsigned long ip, const char *fmt, va_list args)
2236{
2237 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2238}
2239
2240int trace_array_printk(struct trace_array *tr,
2241 unsigned long ip, const char *fmt, ...)
2242{
2243 int ret;
2244 va_list ap;
2245
2246 if (!(trace_flags & TRACE_ITER_PRINTK))
2247 return 0;
2248
2249 va_start(ap, fmt);
2250 ret = trace_array_vprintk(tr, ip, fmt, ap);
2251 va_end(ap);
2252 return ret;
2253}
2254
2255int trace_array_printk_buf(struct ring_buffer *buffer,
2256 unsigned long ip, const char *fmt, ...)
2257{
2258 int ret;
2259 va_list ap;
2260
2261 if (!(trace_flags & TRACE_ITER_PRINTK))
2262 return 0;
2263
2264 va_start(ap, fmt);
2265 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2266 va_end(ap);
2267 return ret;
2268}
2269
Steven Rostedt659372d2009-09-03 19:11:07 -04002270int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2271{
Steven Rostedta813a152009-10-09 01:41:35 -04002272 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002273}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002274EXPORT_SYMBOL_GPL(trace_vprintk);
2275
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002276static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002277{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002278 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2279
Steven Rostedt5a90f572008-09-03 17:42:51 -04002280 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002281 if (buf_iter)
2282 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002283}
2284
Ingo Molnare309b412008-05-12 21:20:51 +02002285static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002286peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2287 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002288{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002289 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002290 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002291
Steven Rostedtd7690412008-10-01 00:29:53 -04002292 if (buf_iter)
2293 event = ring_buffer_iter_peek(buf_iter, ts);
2294 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002295 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002296 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002297
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002298 if (event) {
2299 iter->ent_size = ring_buffer_event_length(event);
2300 return ring_buffer_event_data(event);
2301 }
2302 iter->ent_size = 0;
2303 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002304}
Steven Rostedtd7690412008-10-01 00:29:53 -04002305
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002306static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002307__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2308 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002309{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002310 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002311 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002312 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002313 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002314 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002315 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002316 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002317 int cpu;
2318
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002319 /*
2320 * If we are in a per_cpu trace file, don't bother by iterating over
2321 * all cpu and peek directly.
2322 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002323 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002324 if (ring_buffer_empty_cpu(buffer, cpu_file))
2325 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002326 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002327 if (ent_cpu)
2328 *ent_cpu = cpu_file;
2329
2330 return ent;
2331 }
2332
Steven Rostedtab464282008-05-12 21:21:00 +02002333 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002334
2335 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002336 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002337
Steven Rostedtbc21b472010-03-31 19:49:26 -04002338 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002339
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002340 /*
2341 * Pick the entry with the smallest timestamp:
2342 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002343 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002344 next = ent;
2345 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002346 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002347 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002348 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002349 }
2350 }
2351
Steven Rostedt12b5da32012-03-27 10:43:28 -04002352 iter->ent_size = next_size;
2353
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002354 if (ent_cpu)
2355 *ent_cpu = next_cpu;
2356
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002357 if (ent_ts)
2358 *ent_ts = next_ts;
2359
Steven Rostedtbc21b472010-03-31 19:49:26 -04002360 if (missing_events)
2361 *missing_events = next_lost;
2362
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002363 return next;
2364}
2365
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002366/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002367struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2368 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002369{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002370 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002371}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002372
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002373/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002374void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002375{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002376 iter->ent = __find_next_entry(iter, &iter->cpu,
2377 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002378
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002379 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002380 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002381
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002382 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002383}
2384
Ingo Molnare309b412008-05-12 21:20:51 +02002385static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002386{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002387 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002388 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002389}
2390
Ingo Molnare309b412008-05-12 21:20:51 +02002391static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002392{
2393 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002394 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002395 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002396
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002397 WARN_ON_ONCE(iter->leftover);
2398
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002399 (*pos)++;
2400
2401 /* can't go backwards */
2402 if (iter->idx > i)
2403 return NULL;
2404
2405 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002406 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002407 else
2408 ent = iter;
2409
2410 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002411 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002412
2413 iter->pos = *pos;
2414
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002415 return ent;
2416}
2417
Jason Wessel955b61e2010-08-05 09:22:23 -05002418void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002419{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002420 struct ring_buffer_event *event;
2421 struct ring_buffer_iter *buf_iter;
2422 unsigned long entries = 0;
2423 u64 ts;
2424
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002425 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002426
Steven Rostedt6d158a82012-06-27 20:46:14 -04002427 buf_iter = trace_buffer_iter(iter, cpu);
2428 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002429 return;
2430
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002431 ring_buffer_iter_reset(buf_iter);
2432
2433 /*
2434 * We could have the case with the max latency tracers
2435 * that a reset never took place on a cpu. This is evident
2436 * by the timestamp being before the start of the buffer.
2437 */
2438 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002439 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002440 break;
2441 entries++;
2442 ring_buffer_read(buf_iter, NULL);
2443 }
2444
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002445 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002446}
2447
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002448/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002449 * The current tracer is copied to avoid a global locking
2450 * all around.
2451 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002452static void *s_start(struct seq_file *m, loff_t *pos)
2453{
2454 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002455 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002456 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002457 void *p = NULL;
2458 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002459 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002460
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002461 /*
2462 * copy the tracer to avoid using a global lock all around.
2463 * iter->trace is a copy of current_trace, the pointer to the
2464 * name may be used instead of a strcmp(), as iter->trace->name
2465 * will point to the same string as current_trace->name.
2466 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002467 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002468 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2469 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002470 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002471
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002472#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002473 if (iter->snapshot && iter->trace->use_max_tr)
2474 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002475#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002476
2477 if (!iter->snapshot)
2478 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002479
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002480 if (*pos != iter->pos) {
2481 iter->ent = NULL;
2482 iter->cpu = 0;
2483 iter->idx = -1;
2484
Steven Rostedtae3b5092013-01-23 15:22:59 -05002485 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002486 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002487 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002488 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002489 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002490
Lai Jiangshanac91d852010-03-02 17:54:50 +08002491 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002492 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2493 ;
2494
2495 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002496 /*
2497 * If we overflowed the seq_file before, then we want
2498 * to just reuse the trace_seq buffer again.
2499 */
2500 if (iter->leftover)
2501 p = iter;
2502 else {
2503 l = *pos - 1;
2504 p = s_next(m, p, &l);
2505 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002506 }
2507
Lai Jiangshan4f535962009-05-18 19:35:34 +08002508 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002509 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002510 return p;
2511}
2512
2513static void s_stop(struct seq_file *m, void *p)
2514{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002515 struct trace_iterator *iter = m->private;
2516
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002517#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002518 if (iter->snapshot && iter->trace->use_max_tr)
2519 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002520#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002521
2522 if (!iter->snapshot)
2523 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002524
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002525 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002526 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002527}
2528
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002529static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002530get_total_entries(struct trace_buffer *buf,
2531 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002532{
2533 unsigned long count;
2534 int cpu;
2535
2536 *total = 0;
2537 *entries = 0;
2538
2539 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002540 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002541 /*
2542 * If this buffer has skipped entries, then we hold all
2543 * entries for the trace and we need to ignore the
2544 * ones before the time stamp.
2545 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002546 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2547 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002548 /* total is the same as the entries */
2549 *total += count;
2550 } else
2551 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002552 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002553 *entries += count;
2554 }
2555}
2556
Ingo Molnare309b412008-05-12 21:20:51 +02002557static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002558{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002559 seq_puts(m, "# _------=> CPU# \n"
2560 "# / _-----=> irqs-off \n"
2561 "# | / _----=> need-resched \n"
2562 "# || / _---=> hardirq/softirq \n"
2563 "# ||| / _--=> preempt-depth \n"
2564 "# |||| / delay \n"
2565 "# cmd pid ||||| time | caller \n"
2566 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002567}
2568
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002569static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002570{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002571 unsigned long total;
2572 unsigned long entries;
2573
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002574 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002575 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2576 entries, total, num_online_cpus());
2577 seq_puts(m, "#\n");
2578}
2579
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002580static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002581{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002582 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002583 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2584 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002585}
2586
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002587static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002588{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002589 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002590 seq_puts(m, "# _-----=> irqs-off\n"
2591 "# / _----=> need-resched\n"
2592 "# | / _---=> hardirq/softirq\n"
2593 "# || / _--=> preempt-depth\n"
2594 "# ||| / delay\n"
2595 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2596 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002597}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002598
Jiri Olsa62b915f2010-04-02 19:01:22 +02002599void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002600print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2601{
2602 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002603 struct trace_buffer *buf = iter->trace_buffer;
2604 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002605 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002606 unsigned long entries;
2607 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002608 const char *name = "preemption";
2609
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002610 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002611
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002612 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002613
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002614 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002615 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002616 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002617 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002618 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002619 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002620 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002621 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002622 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002623 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002624#if defined(CONFIG_PREEMPT_NONE)
2625 "server",
2626#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2627 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002628#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002629 "preempt",
2630#else
2631 "unknown",
2632#endif
2633 /* These are reserved for later use */
2634 0, 0, 0, 0);
2635#ifdef CONFIG_SMP
2636 seq_printf(m, " #P:%d)\n", num_online_cpus());
2637#else
2638 seq_puts(m, ")\n");
2639#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002640 seq_puts(m, "# -----------------\n");
2641 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002642 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002643 data->comm, data->pid,
2644 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002645 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002646 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002647
2648 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002649 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002650 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2651 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002652 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002653 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2654 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002655 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002656 }
2657
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002658 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002659}
2660
Steven Rostedta3097202008-11-07 22:36:02 -05002661static void test_cpu_buff_start(struct trace_iterator *iter)
2662{
2663 struct trace_seq *s = &iter->seq;
2664
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002665 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2666 return;
2667
2668 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2669 return;
2670
Rusty Russell44623442009-01-01 10:12:23 +10302671 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002672 return;
2673
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002674 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002675 return;
2676
Rusty Russell44623442009-01-01 10:12:23 +10302677 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002678
2679 /* Don't print started cpu buffer for the first entry of the trace */
2680 if (iter->idx > 1)
2681 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2682 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002683}
2684
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002685static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002686{
Steven Rostedt214023c2008-05-12 21:20:46 +02002687 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002688 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002689 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002690 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002691
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002692 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002693
Steven Rostedta3097202008-11-07 22:36:02 -05002694 test_cpu_buff_start(iter);
2695
Steven Rostedtf633cef2008-12-23 23:24:13 -05002696 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002697
2698 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002699 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2700 trace_print_lat_context(iter);
2701 else
2702 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002703 }
2704
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002705 if (trace_seq_has_overflowed(s))
2706 return TRACE_TYPE_PARTIAL_LINE;
2707
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002708 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002709 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002710
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002711 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002712
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002713 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002714}
2715
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002716static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002717{
2718 struct trace_seq *s = &iter->seq;
2719 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002720 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002721
2722 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002723
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002724 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2725 trace_seq_printf(s, "%d %d %llu ",
2726 entry->pid, iter->cpu, iter->ts);
2727
2728 if (trace_seq_has_overflowed(s))
2729 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002730
Steven Rostedtf633cef2008-12-23 23:24:13 -05002731 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002732 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002733 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002734
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002735 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002736
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002737 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002738}
2739
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002740static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002741{
2742 struct trace_seq *s = &iter->seq;
2743 unsigned char newline = '\n';
2744 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002745 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002746
2747 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002748
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002749 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002750 SEQ_PUT_HEX_FIELD(s, entry->pid);
2751 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2752 SEQ_PUT_HEX_FIELD(s, iter->ts);
2753 if (trace_seq_has_overflowed(s))
2754 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002755 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002756
Steven Rostedtf633cef2008-12-23 23:24:13 -05002757 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002758 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002759 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002760 if (ret != TRACE_TYPE_HANDLED)
2761 return ret;
2762 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002763
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002764 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002765
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002766 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002767}
2768
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002769static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002770{
2771 struct trace_seq *s = &iter->seq;
2772 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002773 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002774
2775 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002776
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002777 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002778 SEQ_PUT_FIELD(s, entry->pid);
2779 SEQ_PUT_FIELD(s, iter->cpu);
2780 SEQ_PUT_FIELD(s, iter->ts);
2781 if (trace_seq_has_overflowed(s))
2782 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002783 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002784
Steven Rostedtf633cef2008-12-23 23:24:13 -05002785 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002786 return event ? event->funcs->binary(iter, 0, event) :
2787 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002788}
2789
Jiri Olsa62b915f2010-04-02 19:01:22 +02002790int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002791{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002792 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002793 int cpu;
2794
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002795 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002796 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002797 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002798 buf_iter = trace_buffer_iter(iter, cpu);
2799 if (buf_iter) {
2800 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002801 return 0;
2802 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002803 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002804 return 0;
2805 }
2806 return 1;
2807 }
2808
Steven Rostedtab464282008-05-12 21:21:00 +02002809 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002810 buf_iter = trace_buffer_iter(iter, cpu);
2811 if (buf_iter) {
2812 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002813 return 0;
2814 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002815 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002816 return 0;
2817 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002818 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002819
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002820 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002821}
2822
Lai Jiangshan4f535962009-05-18 19:35:34 +08002823/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002824enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002825{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002826 enum print_line_t ret;
2827
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002828 if (iter->lost_events) {
2829 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2830 iter->cpu, iter->lost_events);
2831 if (trace_seq_has_overflowed(&iter->seq))
2832 return TRACE_TYPE_PARTIAL_LINE;
2833 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002834
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002835 if (iter->trace && iter->trace->print_line) {
2836 ret = iter->trace->print_line(iter);
2837 if (ret != TRACE_TYPE_UNHANDLED)
2838 return ret;
2839 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002840
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002841 if (iter->ent->type == TRACE_BPUTS &&
2842 trace_flags & TRACE_ITER_PRINTK &&
2843 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2844 return trace_print_bputs_msg_only(iter);
2845
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002846 if (iter->ent->type == TRACE_BPRINT &&
2847 trace_flags & TRACE_ITER_PRINTK &&
2848 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002849 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002850
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002851 if (iter->ent->type == TRACE_PRINT &&
2852 trace_flags & TRACE_ITER_PRINTK &&
2853 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002854 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002855
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002856 if (trace_flags & TRACE_ITER_BIN)
2857 return print_bin_fmt(iter);
2858
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002859 if (trace_flags & TRACE_ITER_HEX)
2860 return print_hex_fmt(iter);
2861
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002862 if (trace_flags & TRACE_ITER_RAW)
2863 return print_raw_fmt(iter);
2864
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002865 return print_trace_fmt(iter);
2866}
2867
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002868void trace_latency_header(struct seq_file *m)
2869{
2870 struct trace_iterator *iter = m->private;
2871
2872 /* print nothing if the buffers are empty */
2873 if (trace_empty(iter))
2874 return;
2875
2876 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2877 print_trace_header(m, iter);
2878
2879 if (!(trace_flags & TRACE_ITER_VERBOSE))
2880 print_lat_help_header(m);
2881}
2882
Jiri Olsa62b915f2010-04-02 19:01:22 +02002883void trace_default_header(struct seq_file *m)
2884{
2885 struct trace_iterator *iter = m->private;
2886
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002887 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2888 return;
2889
Jiri Olsa62b915f2010-04-02 19:01:22 +02002890 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2891 /* print nothing if the buffers are empty */
2892 if (trace_empty(iter))
2893 return;
2894 print_trace_header(m, iter);
2895 if (!(trace_flags & TRACE_ITER_VERBOSE))
2896 print_lat_help_header(m);
2897 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002898 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2899 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002900 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002901 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002902 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002903 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002904 }
2905}
2906
Steven Rostedte0a413f2011-09-29 21:26:16 -04002907static void test_ftrace_alive(struct seq_file *m)
2908{
2909 if (!ftrace_is_dead())
2910 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002911 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2912 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002913}
2914
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002915#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002916static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002917{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002918 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2919 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2920 "# Takes a snapshot of the main buffer.\n"
2921 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2922 "# (Doesn't have to be '2' works with any number that\n"
2923 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002924}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002925
2926static void show_snapshot_percpu_help(struct seq_file *m)
2927{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002928 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002929#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002930 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2931 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002932#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002933 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2934 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002935#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002936 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2937 "# (Doesn't have to be '2' works with any number that\n"
2938 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002939}
2940
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002941static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2942{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002943 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002944 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002945 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002946 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002947
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002948 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002949 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2950 show_snapshot_main_help(m);
2951 else
2952 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002953}
2954#else
2955/* Should never be called */
2956static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2957#endif
2958
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002959static int s_show(struct seq_file *m, void *v)
2960{
2961 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002962 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002963
2964 if (iter->ent == NULL) {
2965 if (iter->tr) {
2966 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2967 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002968 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002969 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002970 if (iter->snapshot && trace_empty(iter))
2971 print_snapshot_help(m, iter);
2972 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002973 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002974 else
2975 trace_default_header(m);
2976
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002977 } else if (iter->leftover) {
2978 /*
2979 * If we filled the seq_file buffer earlier, we
2980 * want to just show it now.
2981 */
2982 ret = trace_print_seq(m, &iter->seq);
2983
2984 /* ret should this time be zero, but you never know */
2985 iter->leftover = ret;
2986
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002987 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002988 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002989 ret = trace_print_seq(m, &iter->seq);
2990 /*
2991 * If we overflow the seq_file buffer, then it will
2992 * ask us for this data again at start up.
2993 * Use that instead.
2994 * ret is 0 if seq_file write succeeded.
2995 * -1 otherwise.
2996 */
2997 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002998 }
2999
3000 return 0;
3001}
3002
Oleg Nesterov649e9c72013-07-23 17:25:54 +02003003/*
3004 * Should be used after trace_array_get(), trace_types_lock
3005 * ensures that i_cdev was already initialized.
3006 */
3007static inline int tracing_get_cpu(struct inode *inode)
3008{
3009 if (inode->i_cdev) /* See trace_create_cpu_file() */
3010 return (long)inode->i_cdev - 1;
3011 return RING_BUFFER_ALL_CPUS;
3012}
3013
James Morris88e9d342009-09-22 16:43:43 -07003014static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003015 .start = s_start,
3016 .next = s_next,
3017 .stop = s_stop,
3018 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003019};
3020
Ingo Molnare309b412008-05-12 21:20:51 +02003021static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003022__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003023{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003024 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003025 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003026 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003027
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003028 if (tracing_disabled)
3029 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003030
Jiri Olsa50e18b92012-04-25 10:23:39 +02003031 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003032 if (!iter)
3033 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003034
Steven Rostedt6d158a82012-06-27 20:46:14 -04003035 iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
3036 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003037 if (!iter->buffer_iter)
3038 goto release;
3039
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003040 /*
3041 * We make a copy of the current tracer to avoid concurrent
3042 * changes on it while we are reading.
3043 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003044 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003045 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003046 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003047 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003048
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003049 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003050
Li Zefan79f55992009-06-15 14:58:26 +08003051 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003052 goto fail;
3053
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003054 iter->tr = tr;
3055
3056#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003057 /* Currently only the top directory has a snapshot */
3058 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003059 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003060 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003061#endif
3062 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003063 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003064 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003065 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003066 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003067
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003068 /* Notify the tracer early; before we stop tracing. */
3069 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003070 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003071
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003072 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003073 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003074 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3075
David Sharp8be07092012-11-13 12:18:22 -08003076 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003077 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003078 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3079
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003080 /* stop the trace while dumping if we are not opening "snapshot" */
3081 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003082 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003083
Steven Rostedtae3b5092013-01-23 15:22:59 -05003084 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003085 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003086 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003087 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003088 }
3089 ring_buffer_read_prepare_sync();
3090 for_each_tracing_cpu(cpu) {
3091 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003092 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003093 }
3094 } else {
3095 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003096 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003097 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003098 ring_buffer_read_prepare_sync();
3099 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003100 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003101 }
3102
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003103 mutex_unlock(&trace_types_lock);
3104
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003105 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003106
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003107 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003108 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003109 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003110 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003111release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003112 seq_release_private(inode, file);
3113 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003114}
3115
3116int tracing_open_generic(struct inode *inode, struct file *filp)
3117{
Steven Rostedt60a11772008-05-12 21:20:44 +02003118 if (tracing_disabled)
3119 return -ENODEV;
3120
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003121 filp->private_data = inode->i_private;
3122 return 0;
3123}
3124
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003125bool tracing_is_disabled(void)
3126{
3127 return (tracing_disabled) ? true: false;
3128}
3129
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003130/*
3131 * Open and update trace_array ref count.
3132 * Must have the current trace_array passed to it.
3133 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003134static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003135{
3136 struct trace_array *tr = inode->i_private;
3137
3138 if (tracing_disabled)
3139 return -ENODEV;
3140
3141 if (trace_array_get(tr) < 0)
3142 return -ENODEV;
3143
3144 filp->private_data = inode->i_private;
3145
3146 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003147}
3148
Hannes Eder4fd27352009-02-10 19:44:12 +01003149static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003150{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003151 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003152 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003153 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003154 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003155
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003156 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003157 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003158 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003159 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003160
Oleg Nesterov6484c712013-07-23 17:26:10 +02003161 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003162 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003163 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003164
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003165 for_each_tracing_cpu(cpu) {
3166 if (iter->buffer_iter[cpu])
3167 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3168 }
3169
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003170 if (iter->trace && iter->trace->close)
3171 iter->trace->close(iter);
3172
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003173 if (!iter->snapshot)
3174 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003175 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003176
3177 __trace_array_put(tr);
3178
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003179 mutex_unlock(&trace_types_lock);
3180
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003181 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003182 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003183 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003184 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003185 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003186
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003187 return 0;
3188}
3189
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003190static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3191{
3192 struct trace_array *tr = inode->i_private;
3193
3194 trace_array_put(tr);
3195 return 0;
3196}
3197
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003198static int tracing_single_release_tr(struct inode *inode, struct file *file)
3199{
3200 struct trace_array *tr = inode->i_private;
3201
3202 trace_array_put(tr);
3203
3204 return single_release(inode, file);
3205}
3206
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003207static int tracing_open(struct inode *inode, struct file *file)
3208{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003209 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003210 struct trace_iterator *iter;
3211 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003212
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003213 if (trace_array_get(tr) < 0)
3214 return -ENODEV;
3215
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003216 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003217 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3218 int cpu = tracing_get_cpu(inode);
3219
3220 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003221 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003222 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003223 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003224 }
3225
3226 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003227 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003228 if (IS_ERR(iter))
3229 ret = PTR_ERR(iter);
3230 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3231 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3232 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003233
3234 if (ret < 0)
3235 trace_array_put(tr);
3236
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003237 return ret;
3238}
3239
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003240/*
3241 * Some tracers are not suitable for instance buffers.
3242 * A tracer is always available for the global array (toplevel)
3243 * or if it explicitly states that it is.
3244 */
3245static bool
3246trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3247{
3248 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3249}
3250
3251/* Find the next tracer that this trace array may use */
3252static struct tracer *
3253get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3254{
3255 while (t && !trace_ok_for_array(t, tr))
3256 t = t->next;
3257
3258 return t;
3259}
3260
Ingo Molnare309b412008-05-12 21:20:51 +02003261static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003262t_next(struct seq_file *m, void *v, loff_t *pos)
3263{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003264 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003265 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003266
3267 (*pos)++;
3268
3269 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003270 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003271
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003272 return t;
3273}
3274
3275static void *t_start(struct seq_file *m, loff_t *pos)
3276{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003277 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003278 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003279 loff_t l = 0;
3280
3281 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003282
3283 t = get_tracer_for_array(tr, trace_types);
3284 for (; t && l < *pos; t = t_next(m, t, &l))
3285 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003286
3287 return t;
3288}
3289
3290static void t_stop(struct seq_file *m, void *p)
3291{
3292 mutex_unlock(&trace_types_lock);
3293}
3294
3295static int t_show(struct seq_file *m, void *v)
3296{
3297 struct tracer *t = v;
3298
3299 if (!t)
3300 return 0;
3301
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003302 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003303 if (t->next)
3304 seq_putc(m, ' ');
3305 else
3306 seq_putc(m, '\n');
3307
3308 return 0;
3309}
3310
James Morris88e9d342009-09-22 16:43:43 -07003311static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003312 .start = t_start,
3313 .next = t_next,
3314 .stop = t_stop,
3315 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003316};
3317
3318static int show_traces_open(struct inode *inode, struct file *file)
3319{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003320 struct trace_array *tr = inode->i_private;
3321 struct seq_file *m;
3322 int ret;
3323
Steven Rostedt60a11772008-05-12 21:20:44 +02003324 if (tracing_disabled)
3325 return -ENODEV;
3326
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003327 ret = seq_open(file, &show_traces_seq_ops);
3328 if (ret)
3329 return ret;
3330
3331 m = file->private_data;
3332 m->private = tr;
3333
3334 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003335}
3336
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003337static ssize_t
3338tracing_write_stub(struct file *filp, const char __user *ubuf,
3339 size_t count, loff_t *ppos)
3340{
3341 return count;
3342}
3343
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003344loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003345{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003346 int ret;
3347
Slava Pestov364829b2010-11-24 15:13:16 -08003348 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003349 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003350 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003351 file->f_pos = ret = 0;
3352
3353 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003354}
3355
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003356static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003357 .open = tracing_open,
3358 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003359 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003360 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003361 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003362};
3363
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003364static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003365 .open = show_traces_open,
3366 .read = seq_read,
3367 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003368 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003369};
3370
Ingo Molnar36dfe922008-05-12 21:20:52 +02003371/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003372 * The tracer itself will not take this lock, but still we want
3373 * to provide a consistent cpumask to user-space:
3374 */
3375static DEFINE_MUTEX(tracing_cpumask_update_lock);
3376
3377/*
3378 * Temporary storage for the character representation of the
3379 * CPU bitmask (and one more byte for the newline):
3380 */
3381static char mask_str[NR_CPUS + 1];
3382
Ingo Molnarc7078de2008-05-12 21:20:52 +02003383static ssize_t
3384tracing_cpumask_read(struct file *filp, char __user *ubuf,
3385 size_t count, loff_t *ppos)
3386{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003387 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003388 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003389
3390 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003391
Tejun Heo1a402432015-02-13 14:37:39 -08003392 len = snprintf(mask_str, count, "%*pb\n",
3393 cpumask_pr_args(tr->tracing_cpumask));
3394 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003395 count = -EINVAL;
3396 goto out_err;
3397 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02003398 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3399
3400out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003401 mutex_unlock(&tracing_cpumask_update_lock);
3402
3403 return count;
3404}
3405
3406static ssize_t
3407tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3408 size_t count, loff_t *ppos)
3409{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003410 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303411 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003412 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303413
3414 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3415 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003416
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303417 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003418 if (err)
3419 goto err_unlock;
3420
Li Zefan215368e2009-06-15 10:56:42 +08003421 mutex_lock(&tracing_cpumask_update_lock);
3422
Steven Rostedta5e25882008-12-02 15:34:05 -05003423 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003424 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003425 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003426 /*
3427 * Increase/decrease the disabled counter if we are
3428 * about to flip a bit in the cpumask:
3429 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003430 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303431 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003432 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3433 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003434 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003435 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303436 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003437 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3438 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003439 }
3440 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003441 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003442 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003443
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003444 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003445
Ingo Molnarc7078de2008-05-12 21:20:52 +02003446 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303447 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003448
Ingo Molnarc7078de2008-05-12 21:20:52 +02003449 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003450
3451err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003452 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003453
3454 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003455}
3456
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003457static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003458 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003459 .read = tracing_cpumask_read,
3460 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003461 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003462 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003463};
3464
Li Zefanfdb372e2009-12-08 11:15:59 +08003465static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003466{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003467 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003468 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003469 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003470 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003471
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003472 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003473 tracer_flags = tr->current_trace->flags->val;
3474 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003475
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003476 for (i = 0; trace_options[i]; i++) {
3477 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003478 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003479 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003480 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003481 }
3482
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003483 for (i = 0; trace_opts[i].name; i++) {
3484 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003485 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003486 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003487 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003488 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003489 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003490
Li Zefanfdb372e2009-12-08 11:15:59 +08003491 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003492}
3493
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003494static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003495 struct tracer_flags *tracer_flags,
3496 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003497{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003498 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003499 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003500
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003501 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003502 if (ret)
3503 return ret;
3504
3505 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003506 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003507 else
Zhaolei77708412009-08-07 18:53:21 +08003508 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003509 return 0;
3510}
3511
Li Zefan8d18eaa2009-12-08 11:17:06 +08003512/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003513static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003514{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003515 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003516 struct tracer_flags *tracer_flags = trace->flags;
3517 struct tracer_opt *opts = NULL;
3518 int i;
3519
3520 for (i = 0; tracer_flags->opts[i].name; i++) {
3521 opts = &tracer_flags->opts[i];
3522
3523 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003524 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003525 }
3526
3527 return -EINVAL;
3528}
3529
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003530/* Some tracers require overwrite to stay enabled */
3531int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3532{
3533 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3534 return -1;
3535
3536 return 0;
3537}
3538
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003539int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003540{
3541 /* do nothing if flag is already set */
3542 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003543 return 0;
3544
3545 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003546 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003547 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003548 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003549
3550 if (enabled)
3551 trace_flags |= mask;
3552 else
3553 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003554
3555 if (mask == TRACE_ITER_RECORD_CMD)
3556 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003557
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003558 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003559 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003560#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003561 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003562#endif
3563 }
Steven Rostedt81698832012-10-11 10:15:05 -04003564
3565 if (mask == TRACE_ITER_PRINTK)
3566 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003567
3568 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003569}
3570
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003571static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003572{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003573 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003574 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003575 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003576 int i;
3577
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003578 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003579
Li Zefan8d18eaa2009-12-08 11:17:06 +08003580 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003581 neg = 1;
3582 cmp += 2;
3583 }
3584
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003585 mutex_lock(&trace_types_lock);
3586
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003587 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003588 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003589 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003590 break;
3591 }
3592 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003593
3594 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003595 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003596 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003597
3598 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003599
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003600 return ret;
3601}
3602
3603static ssize_t
3604tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3605 size_t cnt, loff_t *ppos)
3606{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003607 struct seq_file *m = filp->private_data;
3608 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003609 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003610 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003611
3612 if (cnt >= sizeof(buf))
3613 return -EINVAL;
3614
3615 if (copy_from_user(&buf, ubuf, cnt))
3616 return -EFAULT;
3617
Steven Rostedta8dd2172013-01-09 20:54:17 -05003618 buf[cnt] = 0;
3619
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003620 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003621 if (ret < 0)
3622 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003623
Jiri Olsacf8517c2009-10-23 19:36:16 -04003624 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003625
3626 return cnt;
3627}
3628
Li Zefanfdb372e2009-12-08 11:15:59 +08003629static int tracing_trace_options_open(struct inode *inode, struct file *file)
3630{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003631 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003632 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003633
Li Zefanfdb372e2009-12-08 11:15:59 +08003634 if (tracing_disabled)
3635 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003636
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003637 if (trace_array_get(tr) < 0)
3638 return -ENODEV;
3639
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003640 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3641 if (ret < 0)
3642 trace_array_put(tr);
3643
3644 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003645}
3646
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003647static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003648 .open = tracing_trace_options_open,
3649 .read = seq_read,
3650 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003651 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003652 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003653};
3654
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003655static const char readme_msg[] =
3656 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003657 "# echo 0 > tracing_on : quick way to disable tracing\n"
3658 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3659 " Important files:\n"
3660 " trace\t\t\t- The static contents of the buffer\n"
3661 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3662 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3663 " current_tracer\t- function and latency tracers\n"
3664 " available_tracers\t- list of configured tracers for current_tracer\n"
3665 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3666 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3667 " trace_clock\t\t-change the clock used to order events\n"
3668 " local: Per cpu clock but may not be synced across CPUs\n"
3669 " global: Synced across CPUs but slows tracing down.\n"
3670 " counter: Not a clock, but just an increment\n"
3671 " uptime: Jiffy counter from time of boot\n"
3672 " perf: Same clock that perf events use\n"
3673#ifdef CONFIG_X86_64
3674 " x86-tsc: TSC cycle counter\n"
3675#endif
3676 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3677 " tracing_cpumask\t- Limit which CPUs to trace\n"
3678 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3679 "\t\t\t Remove sub-buffer with rmdir\n"
3680 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003681 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3682 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003683 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003684#ifdef CONFIG_DYNAMIC_FTRACE
3685 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003686 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3687 "\t\t\t functions\n"
3688 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3689 "\t modules: Can select a group via module\n"
3690 "\t Format: :mod:<module-name>\n"
3691 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3692 "\t triggers: a command to perform when function is hit\n"
3693 "\t Format: <function>:<trigger>[:count]\n"
3694 "\t trigger: traceon, traceoff\n"
3695 "\t\t enable_event:<system>:<event>\n"
3696 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003697#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003698 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003699#endif
3700#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003701 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003702#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003703 "\t\t dump\n"
3704 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003705 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3706 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3707 "\t The first one will disable tracing every time do_fault is hit\n"
3708 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3709 "\t The first time do trap is hit and it disables tracing, the\n"
3710 "\t counter will decrement to 2. If tracing is already disabled,\n"
3711 "\t the counter will not decrement. It only decrements when the\n"
3712 "\t trigger did work\n"
3713 "\t To remove trigger without count:\n"
3714 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3715 "\t To remove trigger with a count:\n"
3716 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003717 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003718 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3719 "\t modules: Can select a group via module command :mod:\n"
3720 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003721#endif /* CONFIG_DYNAMIC_FTRACE */
3722#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003723 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3724 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003725#endif
3726#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3727 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003728 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003729 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3730#endif
3731#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003732 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3733 "\t\t\t snapshot buffer. Read the contents for more\n"
3734 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003735#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003736#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003737 " stack_trace\t\t- Shows the max stack trace when active\n"
3738 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003739 "\t\t\t Write into this file to reset the max size (trigger a\n"
3740 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003741#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003742 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3743 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003744#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003745#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003746 " events/\t\t- Directory containing all trace event subsystems:\n"
3747 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3748 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003749 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3750 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003751 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003752 " events/<system>/<event>/\t- Directory containing control files for\n"
3753 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003754 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3755 " filter\t\t- If set, only events passing filter are traced\n"
3756 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003757 "\t Format: <trigger>[:count][if <filter>]\n"
3758 "\t trigger: traceon, traceoff\n"
3759 "\t enable_event:<system>:<event>\n"
3760 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003761#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003762 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003763#endif
3764#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003765 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003766#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003767 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3768 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3769 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3770 "\t events/block/block_unplug/trigger\n"
3771 "\t The first disables tracing every time block_unplug is hit.\n"
3772 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3773 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3774 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3775 "\t Like function triggers, the counter is only decremented if it\n"
3776 "\t enabled or disabled tracing.\n"
3777 "\t To remove a trigger without a count:\n"
3778 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3779 "\t To remove a trigger with a count:\n"
3780 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3781 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003782;
3783
3784static ssize_t
3785tracing_readme_read(struct file *filp, char __user *ubuf,
3786 size_t cnt, loff_t *ppos)
3787{
3788 return simple_read_from_buffer(ubuf, cnt, ppos,
3789 readme_msg, strlen(readme_msg));
3790}
3791
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003792static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003793 .open = tracing_open_generic,
3794 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003795 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003796};
3797
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003798static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003799{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003800 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003801
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003802 if (*pos || m->count)
3803 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003804
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003805 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003806
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003807 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3808 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003809 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003810 continue;
3811
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003812 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003813 }
3814
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003815 return NULL;
3816}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003817
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003818static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3819{
3820 void *v;
3821 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003822
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003823 preempt_disable();
3824 arch_spin_lock(&trace_cmdline_lock);
3825
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003826 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003827 while (l <= *pos) {
3828 v = saved_cmdlines_next(m, v, &l);
3829 if (!v)
3830 return NULL;
3831 }
3832
3833 return v;
3834}
3835
3836static void saved_cmdlines_stop(struct seq_file *m, void *v)
3837{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003838 arch_spin_unlock(&trace_cmdline_lock);
3839 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003840}
3841
3842static int saved_cmdlines_show(struct seq_file *m, void *v)
3843{
3844 char buf[TASK_COMM_LEN];
3845 unsigned int *pid = v;
3846
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003847 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003848 seq_printf(m, "%d %s\n", *pid, buf);
3849 return 0;
3850}
3851
3852static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3853 .start = saved_cmdlines_start,
3854 .next = saved_cmdlines_next,
3855 .stop = saved_cmdlines_stop,
3856 .show = saved_cmdlines_show,
3857};
3858
3859static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3860{
3861 if (tracing_disabled)
3862 return -ENODEV;
3863
3864 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003865}
3866
3867static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003868 .open = tracing_saved_cmdlines_open,
3869 .read = seq_read,
3870 .llseek = seq_lseek,
3871 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003872};
3873
3874static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003875tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3876 size_t cnt, loff_t *ppos)
3877{
3878 char buf[64];
3879 int r;
3880
3881 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003882 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003883 arch_spin_unlock(&trace_cmdline_lock);
3884
3885 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3886}
3887
3888static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3889{
3890 kfree(s->saved_cmdlines);
3891 kfree(s->map_cmdline_to_pid);
3892 kfree(s);
3893}
3894
3895static int tracing_resize_saved_cmdlines(unsigned int val)
3896{
3897 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3898
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003899 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003900 if (!s)
3901 return -ENOMEM;
3902
3903 if (allocate_cmdlines_buffer(val, s) < 0) {
3904 kfree(s);
3905 return -ENOMEM;
3906 }
3907
3908 arch_spin_lock(&trace_cmdline_lock);
3909 savedcmd_temp = savedcmd;
3910 savedcmd = s;
3911 arch_spin_unlock(&trace_cmdline_lock);
3912 free_saved_cmdlines_buffer(savedcmd_temp);
3913
3914 return 0;
3915}
3916
3917static ssize_t
3918tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3919 size_t cnt, loff_t *ppos)
3920{
3921 unsigned long val;
3922 int ret;
3923
3924 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3925 if (ret)
3926 return ret;
3927
3928 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3929 if (!val || val > PID_MAX_DEFAULT)
3930 return -EINVAL;
3931
3932 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3933 if (ret < 0)
3934 return ret;
3935
3936 *ppos += cnt;
3937
3938 return cnt;
3939}
3940
3941static const struct file_operations tracing_saved_cmdlines_size_fops = {
3942 .open = tracing_open_generic,
3943 .read = tracing_saved_cmdlines_size_read,
3944 .write = tracing_saved_cmdlines_size_write,
3945};
3946
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04003947#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3948static union trace_enum_map_item *
3949update_enum_map(union trace_enum_map_item *ptr)
3950{
3951 if (!ptr->map.enum_string) {
3952 if (ptr->tail.next) {
3953 ptr = ptr->tail.next;
3954 /* Set ptr to the next real item (skip head) */
3955 ptr++;
3956 } else
3957 return NULL;
3958 }
3959 return ptr;
3960}
3961
3962static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3963{
3964 union trace_enum_map_item *ptr = v;
3965
3966 /*
3967 * Paranoid! If ptr points to end, we don't want to increment past it.
3968 * This really should never happen.
3969 */
3970 ptr = update_enum_map(ptr);
3971 if (WARN_ON_ONCE(!ptr))
3972 return NULL;
3973
3974 ptr++;
3975
3976 (*pos)++;
3977
3978 ptr = update_enum_map(ptr);
3979
3980 return ptr;
3981}
3982
3983static void *enum_map_start(struct seq_file *m, loff_t *pos)
3984{
3985 union trace_enum_map_item *v;
3986 loff_t l = 0;
3987
3988 mutex_lock(&trace_enum_mutex);
3989
3990 v = trace_enum_maps;
3991 if (v)
3992 v++;
3993
3994 while (v && l < *pos) {
3995 v = enum_map_next(m, v, &l);
3996 }
3997
3998 return v;
3999}
4000
4001static void enum_map_stop(struct seq_file *m, void *v)
4002{
4003 mutex_unlock(&trace_enum_mutex);
4004}
4005
4006static int enum_map_show(struct seq_file *m, void *v)
4007{
4008 union trace_enum_map_item *ptr = v;
4009
4010 seq_printf(m, "%s %ld (%s)\n",
4011 ptr->map.enum_string, ptr->map.enum_value,
4012 ptr->map.system);
4013
4014 return 0;
4015}
4016
4017static const struct seq_operations tracing_enum_map_seq_ops = {
4018 .start = enum_map_start,
4019 .next = enum_map_next,
4020 .stop = enum_map_stop,
4021 .show = enum_map_show,
4022};
4023
4024static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4025{
4026 if (tracing_disabled)
4027 return -ENODEV;
4028
4029 return seq_open(filp, &tracing_enum_map_seq_ops);
4030}
4031
4032static const struct file_operations tracing_enum_map_fops = {
4033 .open = tracing_enum_map_open,
4034 .read = seq_read,
4035 .llseek = seq_lseek,
4036 .release = seq_release,
4037};
4038
4039static inline union trace_enum_map_item *
4040trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4041{
4042 /* Return tail of array given the head */
4043 return ptr + ptr->head.length + 1;
4044}
4045
4046static void
4047trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4048 int len)
4049{
4050 struct trace_enum_map **stop;
4051 struct trace_enum_map **map;
4052 union trace_enum_map_item *map_array;
4053 union trace_enum_map_item *ptr;
4054
4055 stop = start + len;
4056
4057 /*
4058 * The trace_enum_maps contains the map plus a head and tail item,
4059 * where the head holds the module and length of array, and the
4060 * tail holds a pointer to the next list.
4061 */
4062 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4063 if (!map_array) {
4064 pr_warning("Unable to allocate trace enum mapping\n");
4065 return;
4066 }
4067
4068 mutex_lock(&trace_enum_mutex);
4069
4070 if (!trace_enum_maps)
4071 trace_enum_maps = map_array;
4072 else {
4073 ptr = trace_enum_maps;
4074 for (;;) {
4075 ptr = trace_enum_jmp_to_tail(ptr);
4076 if (!ptr->tail.next)
4077 break;
4078 ptr = ptr->tail.next;
4079
4080 }
4081 ptr->tail.next = map_array;
4082 }
4083 map_array->head.mod = mod;
4084 map_array->head.length = len;
4085 map_array++;
4086
4087 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4088 map_array->map = **map;
4089 map_array++;
4090 }
4091 memset(map_array, 0, sizeof(*map_array));
4092
4093 mutex_unlock(&trace_enum_mutex);
4094}
4095
4096static void trace_create_enum_file(struct dentry *d_tracer)
4097{
4098 trace_create_file("enum_map", 0444, d_tracer,
4099 NULL, &tracing_enum_map_fops);
4100}
4101
4102#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4103static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4104static inline void trace_insert_enum_map_file(struct module *mod,
4105 struct trace_enum_map **start, int len) { }
4106#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4107
4108static void trace_insert_enum_map(struct module *mod,
4109 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004110{
4111 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004112
4113 if (len <= 0)
4114 return;
4115
4116 map = start;
4117
4118 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004119
4120 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004121}
4122
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004123static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004124tracing_set_trace_read(struct file *filp, char __user *ubuf,
4125 size_t cnt, loff_t *ppos)
4126{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004127 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004128 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004129 int r;
4130
4131 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004132 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004133 mutex_unlock(&trace_types_lock);
4134
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004135 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004136}
4137
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004138int tracer_init(struct tracer *t, struct trace_array *tr)
4139{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004140 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004141 return t->init(tr);
4142}
4143
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004144static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004145{
4146 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004147
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004148 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004149 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004150}
4151
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004152#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004153/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004154static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4155 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004156{
4157 int cpu, ret = 0;
4158
4159 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4160 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004161 ret = ring_buffer_resize(trace_buf->buffer,
4162 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004163 if (ret < 0)
4164 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004165 per_cpu_ptr(trace_buf->data, cpu)->entries =
4166 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004167 }
4168 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004169 ret = ring_buffer_resize(trace_buf->buffer,
4170 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004171 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004172 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4173 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004174 }
4175
4176 return ret;
4177}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004178#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004179
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004180static int __tracing_resize_ring_buffer(struct trace_array *tr,
4181 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004182{
4183 int ret;
4184
4185 /*
4186 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004187 * we use the size that was given, and we can forget about
4188 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004189 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004190 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004191
Steven Rostedtb382ede62012-10-10 21:44:34 -04004192 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004193 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004194 return 0;
4195
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004196 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004197 if (ret < 0)
4198 return ret;
4199
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004200#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004201 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4202 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004203 goto out;
4204
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004205 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004206 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004207 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4208 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004209 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004210 /*
4211 * AARGH! We are left with different
4212 * size max buffer!!!!
4213 * The max buffer is our "snapshot" buffer.
4214 * When a tracer needs a snapshot (one of the
4215 * latency tracers), it swaps the max buffer
4216 * with the saved snap shot. We succeeded to
4217 * update the size of the main buffer, but failed to
4218 * update the size of the max buffer. But when we tried
4219 * to reset the main buffer to the original size, we
4220 * failed there too. This is very unlikely to
4221 * happen, but if it does, warn and kill all
4222 * tracing.
4223 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004224 WARN_ON(1);
4225 tracing_disabled = 1;
4226 }
4227 return ret;
4228 }
4229
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004230 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004231 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004232 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004233 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004234
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004235 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004236#endif /* CONFIG_TRACER_MAX_TRACE */
4237
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004238 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004239 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004240 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004241 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004242
4243 return ret;
4244}
4245
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004246static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4247 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004248{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004249 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004250
4251 mutex_lock(&trace_types_lock);
4252
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004253 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4254 /* make sure, this cpu is enabled in the mask */
4255 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4256 ret = -EINVAL;
4257 goto out;
4258 }
4259 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004260
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004261 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004262 if (ret < 0)
4263 ret = -ENOMEM;
4264
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004265out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004266 mutex_unlock(&trace_types_lock);
4267
4268 return ret;
4269}
4270
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004271
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004272/**
4273 * tracing_update_buffers - used by tracing facility to expand ring buffers
4274 *
4275 * To save on memory when the tracing is never used on a system with it
4276 * configured in. The ring buffers are set to a minimum size. But once
4277 * a user starts to use the tracing facility, then they need to grow
4278 * to their default size.
4279 *
4280 * This function is to be called when a tracer is about to be used.
4281 */
4282int tracing_update_buffers(void)
4283{
4284 int ret = 0;
4285
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004286 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004287 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004288 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004289 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004290 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004291
4292 return ret;
4293}
4294
Steven Rostedt577b7852009-02-26 23:43:05 -05004295struct trace_option_dentry;
4296
4297static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004298create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004299
4300static void
4301destroy_trace_option_files(struct trace_option_dentry *topts);
4302
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004303/*
4304 * Used to clear out the tracer before deletion of an instance.
4305 * Must have trace_types_lock held.
4306 */
4307static void tracing_set_nop(struct trace_array *tr)
4308{
4309 if (tr->current_trace == &nop_trace)
4310 return;
4311
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004312 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004313
4314 if (tr->current_trace->reset)
4315 tr->current_trace->reset(tr);
4316
4317 tr->current_trace = &nop_trace;
4318}
4319
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004320static int tracing_set_tracer(struct trace_array *tr, const char *buf)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004321{
Steven Rostedt577b7852009-02-26 23:43:05 -05004322 static struct trace_option_dentry *topts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004323 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004324#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004325 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004326#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004327 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004328
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004329 mutex_lock(&trace_types_lock);
4330
Steven Rostedt73c51622009-03-11 13:42:01 -04004331 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004332 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004333 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004334 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004335 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004336 ret = 0;
4337 }
4338
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004339 for (t = trace_types; t; t = t->next) {
4340 if (strcmp(t->name, buf) == 0)
4341 break;
4342 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004343 if (!t) {
4344 ret = -EINVAL;
4345 goto out;
4346 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004347 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004348 goto out;
4349
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004350 /* Some tracers are only allowed for the top level buffer */
4351 if (!trace_ok_for_array(t, tr)) {
4352 ret = -EINVAL;
4353 goto out;
4354 }
4355
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004356 /* If trace pipe files are being read, we can't change the tracer */
4357 if (tr->current_trace->ref) {
4358 ret = -EBUSY;
4359 goto out;
4360 }
4361
Steven Rostedt9f029e82008-11-12 15:24:24 -05004362 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004363
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004364 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004365
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004366 if (tr->current_trace->reset)
4367 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004368
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004369 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004370 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004371
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004372#ifdef CONFIG_TRACER_MAX_TRACE
4373 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004374
4375 if (had_max_tr && !t->use_max_tr) {
4376 /*
4377 * We need to make sure that the update_max_tr sees that
4378 * current_trace changed to nop_trace to keep it from
4379 * swapping the buffers after we resize it.
4380 * The update_max_tr is called from interrupts disabled
4381 * so a synchronized_sched() is sufficient.
4382 */
4383 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004384 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004385 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004386#endif
Steven Rostedt (Red Hat)f1b21c92014-01-14 12:33:33 -05004387 /* Currently, only the top instance has options */
4388 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
4389 destroy_trace_option_files(topts);
4390 topts = create_trace_option_files(tr, t);
4391 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004392
4393#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004394 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004395 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004396 if (ret < 0)
4397 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004398 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004399#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004400
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004401 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004402 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004403 if (ret)
4404 goto out;
4405 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004406
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004407 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004408 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004409 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004410 out:
4411 mutex_unlock(&trace_types_lock);
4412
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004413 return ret;
4414}
4415
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004416static ssize_t
4417tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4418 size_t cnt, loff_t *ppos)
4419{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004420 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004421 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004422 int i;
4423 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004424 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004425
Steven Rostedt60063a62008-10-28 10:44:24 -04004426 ret = cnt;
4427
Li Zefanee6c2c12009-09-18 14:06:47 +08004428 if (cnt > MAX_TRACER_SIZE)
4429 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004430
4431 if (copy_from_user(&buf, ubuf, cnt))
4432 return -EFAULT;
4433
4434 buf[cnt] = 0;
4435
4436 /* strip ending whitespace. */
4437 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4438 buf[i] = 0;
4439
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004440 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004441 if (err)
4442 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004443
Jiri Olsacf8517c2009-10-23 19:36:16 -04004444 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004445
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004446 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004447}
4448
4449static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004450tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4451 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004452{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004453 char buf[64];
4454 int r;
4455
Steven Rostedtcffae432008-05-12 21:21:00 +02004456 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004457 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004458 if (r > sizeof(buf))
4459 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004460 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004461}
4462
4463static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004464tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4465 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004466{
Hannes Eder5e398412009-02-10 19:44:34 +01004467 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004468 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004469
Peter Huewe22fe9b52011-06-07 21:58:27 +02004470 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4471 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004472 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004473
4474 *ptr = val * 1000;
4475
4476 return cnt;
4477}
4478
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004479static ssize_t
4480tracing_thresh_read(struct file *filp, char __user *ubuf,
4481 size_t cnt, loff_t *ppos)
4482{
4483 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4484}
4485
4486static ssize_t
4487tracing_thresh_write(struct file *filp, const char __user *ubuf,
4488 size_t cnt, loff_t *ppos)
4489{
4490 struct trace_array *tr = filp->private_data;
4491 int ret;
4492
4493 mutex_lock(&trace_types_lock);
4494 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4495 if (ret < 0)
4496 goto out;
4497
4498 if (tr->current_trace->update_thresh) {
4499 ret = tr->current_trace->update_thresh(tr);
4500 if (ret < 0)
4501 goto out;
4502 }
4503
4504 ret = cnt;
4505out:
4506 mutex_unlock(&trace_types_lock);
4507
4508 return ret;
4509}
4510
4511static ssize_t
4512tracing_max_lat_read(struct file *filp, char __user *ubuf,
4513 size_t cnt, loff_t *ppos)
4514{
4515 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4516}
4517
4518static ssize_t
4519tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4520 size_t cnt, loff_t *ppos)
4521{
4522 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4523}
4524
Steven Rostedtb3806b42008-05-12 21:20:46 +02004525static int tracing_open_pipe(struct inode *inode, struct file *filp)
4526{
Oleg Nesterov15544202013-07-23 17:25:57 +02004527 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004528 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004529 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004530
4531 if (tracing_disabled)
4532 return -ENODEV;
4533
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004534 if (trace_array_get(tr) < 0)
4535 return -ENODEV;
4536
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004537 mutex_lock(&trace_types_lock);
4538
Steven Rostedtb3806b42008-05-12 21:20:46 +02004539 /* create a buffer to store the information to pass to userspace */
4540 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004541 if (!iter) {
4542 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004543 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004544 goto out;
4545 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004546
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004547 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004548 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004549
4550 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4551 ret = -ENOMEM;
4552 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304553 }
4554
Steven Rostedta3097202008-11-07 22:36:02 -05004555 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304556 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004557
Steven Rostedt112f38a72009-06-01 15:16:05 -04004558 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4559 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4560
David Sharp8be07092012-11-13 12:18:22 -08004561 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004562 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004563 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4564
Oleg Nesterov15544202013-07-23 17:25:57 +02004565 iter->tr = tr;
4566 iter->trace_buffer = &tr->trace_buffer;
4567 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004568 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004569 filp->private_data = iter;
4570
Steven Rostedt107bad82008-05-12 21:21:01 +02004571 if (iter->trace->pipe_open)
4572 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004573
Arnd Bergmannb4447862010-07-07 23:40:11 +02004574 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004575
4576 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004577out:
4578 mutex_unlock(&trace_types_lock);
4579 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004580
4581fail:
4582 kfree(iter->trace);
4583 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004584 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004585 mutex_unlock(&trace_types_lock);
4586 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004587}
4588
4589static int tracing_release_pipe(struct inode *inode, struct file *file)
4590{
4591 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004592 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004593
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004594 mutex_lock(&trace_types_lock);
4595
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004596 tr->current_trace->ref--;
4597
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004598 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004599 iter->trace->pipe_close(iter);
4600
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004601 mutex_unlock(&trace_types_lock);
4602
Rusty Russell44623442009-01-01 10:12:23 +10304603 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004604 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004605 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004606
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004607 trace_array_put(tr);
4608
Steven Rostedtb3806b42008-05-12 21:20:46 +02004609 return 0;
4610}
4611
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004612static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004613trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004614{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004615 /* Iterators are static, they should be filled or empty */
4616 if (trace_buffer_iter(iter, iter->cpu_file))
4617 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004618
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004619 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004620 /*
4621 * Always select as readable when in blocking mode
4622 */
4623 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004624 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004625 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004626 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004627}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004628
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004629static unsigned int
4630tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4631{
4632 struct trace_iterator *iter = filp->private_data;
4633
4634 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004635}
4636
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004637/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004638static int tracing_wait_pipe(struct file *filp)
4639{
4640 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004641 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004642
4643 while (trace_empty(iter)) {
4644
4645 if ((filp->f_flags & O_NONBLOCK)) {
4646 return -EAGAIN;
4647 }
4648
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004649 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004650 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004651 * We still block if tracing is disabled, but we have never
4652 * read anything. This allows a user to cat this file, and
4653 * then enable tracing. But after we have read something,
4654 * we give an EOF when tracing is again disabled.
4655 *
4656 * iter->pos will be 0 if we haven't read anything.
4657 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004658 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004659 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004660
4661 mutex_unlock(&iter->mutex);
4662
Rabin Vincente30f53a2014-11-10 19:46:34 +01004663 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004664
4665 mutex_lock(&iter->mutex);
4666
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004667 if (ret)
4668 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004669 }
4670
4671 return 1;
4672}
4673
Steven Rostedtb3806b42008-05-12 21:20:46 +02004674/*
4675 * Consumer reader.
4676 */
4677static ssize_t
4678tracing_read_pipe(struct file *filp, char __user *ubuf,
4679 size_t cnt, loff_t *ppos)
4680{
4681 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004682 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004683
4684 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004685 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4686 if (sret != -EBUSY)
4687 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004688
Steven Rostedtf9520752009-03-02 14:04:40 -05004689 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004690
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004691 /*
4692 * Avoid more than one consumer on a single file descriptor
4693 * This is just a matter of traces coherency, the ring buffer itself
4694 * is protected.
4695 */
4696 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004697 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004698 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4699 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004700 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004701 }
4702
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004703waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004704 sret = tracing_wait_pipe(filp);
4705 if (sret <= 0)
4706 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004707
4708 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004709 if (trace_empty(iter)) {
4710 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004711 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004712 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004713
4714 if (cnt >= PAGE_SIZE)
4715 cnt = PAGE_SIZE - 1;
4716
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004717 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004718 memset(&iter->seq, 0,
4719 sizeof(struct trace_iterator) -
4720 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004721 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004722 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004723
Lai Jiangshan4f535962009-05-18 19:35:34 +08004724 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004725 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004726 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004727 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004728 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004729
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004730 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004731 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004732 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004733 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004734 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004735 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004736 if (ret != TRACE_TYPE_NO_CONSUME)
4737 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004738
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004739 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004740 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004741
4742 /*
4743 * Setting the full flag means we reached the trace_seq buffer
4744 * size and we should leave by partial output condition above.
4745 * One of the trace_seq_* functions is not used properly.
4746 */
4747 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4748 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004749 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004750 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004751 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004752
Steven Rostedtb3806b42008-05-12 21:20:46 +02004753 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004754 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004755 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004756 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004757
4758 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004759 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004760 * entries, go back to wait for more entries.
4761 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004762 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004763 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004764
Steven Rostedt107bad82008-05-12 21:21:01 +02004765out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004766 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004767
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004768 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004769}
4770
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004771static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4772 unsigned int idx)
4773{
4774 __free_page(spd->pages[idx]);
4775}
4776
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004777static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004778 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004779 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004780 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004781 .steal = generic_pipe_buf_steal,
4782 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004783};
4784
Steven Rostedt34cd4992009-02-09 12:06:29 -05004785static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004786tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004787{
4788 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004789 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004790 int ret;
4791
4792 /* Seq buffer is page-sized, exactly what we need. */
4793 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004794 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004795 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004796
4797 if (trace_seq_has_overflowed(&iter->seq)) {
4798 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004799 break;
4800 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004801
4802 /*
4803 * This should not be hit, because it should only
4804 * be set if the iter->seq overflowed. But check it
4805 * anyway to be safe.
4806 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05004807 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004808 iter->seq.seq.len = save_len;
4809 break;
4810 }
4811
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004812 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004813 if (rem < count) {
4814 rem = 0;
4815 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004816 break;
4817 }
4818
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004819 if (ret != TRACE_TYPE_NO_CONSUME)
4820 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004821 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004822 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004823 rem = 0;
4824 iter->ent = NULL;
4825 break;
4826 }
4827 }
4828
4829 return rem;
4830}
4831
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004832static ssize_t tracing_splice_read_pipe(struct file *filp,
4833 loff_t *ppos,
4834 struct pipe_inode_info *pipe,
4835 size_t len,
4836 unsigned int flags)
4837{
Jens Axboe35f3d142010-05-20 10:43:18 +02004838 struct page *pages_def[PIPE_DEF_BUFFERS];
4839 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004840 struct trace_iterator *iter = filp->private_data;
4841 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004842 .pages = pages_def,
4843 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004844 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004845 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004846 .flags = flags,
4847 .ops = &tracing_pipe_buf_ops,
4848 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004849 };
4850 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004851 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004852 unsigned int i;
4853
Jens Axboe35f3d142010-05-20 10:43:18 +02004854 if (splice_grow_spd(pipe, &spd))
4855 return -ENOMEM;
4856
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004857 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004858
4859 if (iter->trace->splice_read) {
4860 ret = iter->trace->splice_read(iter, filp,
4861 ppos, pipe, len, flags);
4862 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004863 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004864 }
4865
4866 ret = tracing_wait_pipe(filp);
4867 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004868 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004869
Jason Wessel955b61e2010-08-05 09:22:23 -05004870 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004871 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004872 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004873 }
4874
Lai Jiangshan4f535962009-05-18 19:35:34 +08004875 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004876 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004877
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004878 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004879 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004880 spd.pages[i] = alloc_page(GFP_KERNEL);
4881 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004882 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004883
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004884 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004885
4886 /* Copy the data into the page, so we can start over. */
4887 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004888 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004889 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004890 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004891 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004892 break;
4893 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004894 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004895 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004896
Steven Rostedtf9520752009-03-02 14:04:40 -05004897 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004898 }
4899
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004900 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004901 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004902 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004903
4904 spd.nr_pages = i;
4905
Jens Axboe35f3d142010-05-20 10:43:18 +02004906 ret = splice_to_pipe(pipe, &spd);
4907out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004908 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004909 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004910
Steven Rostedt34cd4992009-02-09 12:06:29 -05004911out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004912 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004913 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004914}
4915
Steven Rostedta98a3c32008-05-12 21:20:59 +02004916static ssize_t
4917tracing_entries_read(struct file *filp, char __user *ubuf,
4918 size_t cnt, loff_t *ppos)
4919{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004920 struct inode *inode = file_inode(filp);
4921 struct trace_array *tr = inode->i_private;
4922 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004923 char buf[64];
4924 int r = 0;
4925 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004926
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004927 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004928
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004929 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004930 int cpu, buf_size_same;
4931 unsigned long size;
4932
4933 size = 0;
4934 buf_size_same = 1;
4935 /* check if all cpu sizes are same */
4936 for_each_tracing_cpu(cpu) {
4937 /* fill in the size from first enabled cpu */
4938 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004939 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4940 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004941 buf_size_same = 0;
4942 break;
4943 }
4944 }
4945
4946 if (buf_size_same) {
4947 if (!ring_buffer_expanded)
4948 r = sprintf(buf, "%lu (expanded: %lu)\n",
4949 size >> 10,
4950 trace_buf_size >> 10);
4951 else
4952 r = sprintf(buf, "%lu\n", size >> 10);
4953 } else
4954 r = sprintf(buf, "X\n");
4955 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004956 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004957
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004958 mutex_unlock(&trace_types_lock);
4959
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004960 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4961 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004962}
4963
4964static ssize_t
4965tracing_entries_write(struct file *filp, const char __user *ubuf,
4966 size_t cnt, loff_t *ppos)
4967{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004968 struct inode *inode = file_inode(filp);
4969 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004970 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004971 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004972
Peter Huewe22fe9b52011-06-07 21:58:27 +02004973 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4974 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004975 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004976
4977 /* must have at least 1 entry */
4978 if (!val)
4979 return -EINVAL;
4980
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004981 /* value is in KB */
4982 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004983 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004984 if (ret < 0)
4985 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004986
Jiri Olsacf8517c2009-10-23 19:36:16 -04004987 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004988
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004989 return cnt;
4990}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004991
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004992static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004993tracing_total_entries_read(struct file *filp, char __user *ubuf,
4994 size_t cnt, loff_t *ppos)
4995{
4996 struct trace_array *tr = filp->private_data;
4997 char buf[64];
4998 int r, cpu;
4999 unsigned long size = 0, expanded_size = 0;
5000
5001 mutex_lock(&trace_types_lock);
5002 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005003 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005004 if (!ring_buffer_expanded)
5005 expanded_size += trace_buf_size >> 10;
5006 }
5007 if (ring_buffer_expanded)
5008 r = sprintf(buf, "%lu\n", size);
5009 else
5010 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5011 mutex_unlock(&trace_types_lock);
5012
5013 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5014}
5015
5016static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005017tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5018 size_t cnt, loff_t *ppos)
5019{
5020 /*
5021 * There is no need to read what the user has written, this function
5022 * is just to make sure that there is no error when "echo" is used
5023 */
5024
5025 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005026
5027 return cnt;
5028}
5029
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005030static int
5031tracing_free_buffer_release(struct inode *inode, struct file *filp)
5032{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005033 struct trace_array *tr = inode->i_private;
5034
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005035 /* disable tracing ? */
5036 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005037 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005038 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005039 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005040
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005041 trace_array_put(tr);
5042
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005043 return 0;
5044}
5045
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005046static ssize_t
5047tracing_mark_write(struct file *filp, const char __user *ubuf,
5048 size_t cnt, loff_t *fpos)
5049{
Steven Rostedtd696b582011-09-22 11:50:27 -04005050 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005051 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005052 struct ring_buffer_event *event;
5053 struct ring_buffer *buffer;
5054 struct print_entry *entry;
5055 unsigned long irq_flags;
5056 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005057 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005058 int nr_pages = 1;
5059 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005060 int offset;
5061 int size;
5062 int len;
5063 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005064 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005065
Steven Rostedtc76f0692008-11-07 22:36:02 -05005066 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005067 return -EINVAL;
5068
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005069 if (!(trace_flags & TRACE_ITER_MARKERS))
5070 return -EINVAL;
5071
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005072 if (cnt > TRACE_BUF_SIZE)
5073 cnt = TRACE_BUF_SIZE;
5074
Steven Rostedtd696b582011-09-22 11:50:27 -04005075 /*
5076 * Userspace is injecting traces into the kernel trace buffer.
5077 * We want to be as non intrusive as possible.
5078 * To do so, we do not want to allocate any special buffers
5079 * or take any locks, but instead write the userspace data
5080 * straight into the ring buffer.
5081 *
5082 * First we need to pin the userspace buffer into memory,
5083 * which, most likely it is, because it just referenced it.
5084 * But there's no guarantee that it is. By using get_user_pages_fast()
5085 * and kmap_atomic/kunmap_atomic() we can get access to the
5086 * pages directly. We then write the data directly into the
5087 * ring buffer.
5088 */
5089 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005090
Steven Rostedtd696b582011-09-22 11:50:27 -04005091 /* check if we cross pages */
5092 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5093 nr_pages = 2;
5094
5095 offset = addr & (PAGE_SIZE - 1);
5096 addr &= PAGE_MASK;
5097
5098 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5099 if (ret < nr_pages) {
5100 while (--ret >= 0)
5101 put_page(pages[ret]);
5102 written = -EFAULT;
5103 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005104 }
5105
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005106 for (i = 0; i < nr_pages; i++)
5107 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005108
5109 local_save_flags(irq_flags);
5110 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005111 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005112 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5113 irq_flags, preempt_count());
5114 if (!event) {
5115 /* Ring buffer disabled, return as if not open for write */
5116 written = -EBADF;
5117 goto out_unlock;
5118 }
5119
5120 entry = ring_buffer_event_data(event);
5121 entry->ip = _THIS_IP_;
5122
5123 if (nr_pages == 2) {
5124 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005125 memcpy(&entry->buf, map_page[0] + offset, len);
5126 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005127 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005128 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005129
5130 if (entry->buf[cnt - 1] != '\n') {
5131 entry->buf[cnt] = '\n';
5132 entry->buf[cnt + 1] = '\0';
5133 } else
5134 entry->buf[cnt] = '\0';
5135
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005136 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005137
5138 written = cnt;
5139
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005140 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005141
Steven Rostedtd696b582011-09-22 11:50:27 -04005142 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005143 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005144 kunmap_atomic(map_page[i]);
5145 put_page(pages[i]);
5146 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005147 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005148 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005149}
5150
Li Zefan13f16d22009-12-08 11:16:11 +08005151static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005152{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005153 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005154 int i;
5155
5156 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005157 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005158 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005159 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5160 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005161 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005162
Li Zefan13f16d22009-12-08 11:16:11 +08005163 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005164}
5165
Steven Rostedte1e232c2014-02-10 23:38:46 -05005166static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005167{
Zhaolei5079f322009-08-25 16:12:56 +08005168 int i;
5169
Zhaolei5079f322009-08-25 16:12:56 +08005170 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5171 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5172 break;
5173 }
5174 if (i == ARRAY_SIZE(trace_clocks))
5175 return -EINVAL;
5176
Zhaolei5079f322009-08-25 16:12:56 +08005177 mutex_lock(&trace_types_lock);
5178
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005179 tr->clock_id = i;
5180
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005181 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005182
David Sharp60303ed2012-10-11 16:27:52 -07005183 /*
5184 * New clock may not be consistent with the previous clock.
5185 * Reset the buffer so that it doesn't have incomparable timestamps.
5186 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005187 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005188
5189#ifdef CONFIG_TRACER_MAX_TRACE
5190 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5191 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005192 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005193#endif
David Sharp60303ed2012-10-11 16:27:52 -07005194
Zhaolei5079f322009-08-25 16:12:56 +08005195 mutex_unlock(&trace_types_lock);
5196
Steven Rostedte1e232c2014-02-10 23:38:46 -05005197 return 0;
5198}
5199
5200static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5201 size_t cnt, loff_t *fpos)
5202{
5203 struct seq_file *m = filp->private_data;
5204 struct trace_array *tr = m->private;
5205 char buf[64];
5206 const char *clockstr;
5207 int ret;
5208
5209 if (cnt >= sizeof(buf))
5210 return -EINVAL;
5211
5212 if (copy_from_user(&buf, ubuf, cnt))
5213 return -EFAULT;
5214
5215 buf[cnt] = 0;
5216
5217 clockstr = strstrip(buf);
5218
5219 ret = tracing_set_clock(tr, clockstr);
5220 if (ret)
5221 return ret;
5222
Zhaolei5079f322009-08-25 16:12:56 +08005223 *fpos += cnt;
5224
5225 return cnt;
5226}
5227
Li Zefan13f16d22009-12-08 11:16:11 +08005228static int tracing_clock_open(struct inode *inode, struct file *file)
5229{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005230 struct trace_array *tr = inode->i_private;
5231 int ret;
5232
Li Zefan13f16d22009-12-08 11:16:11 +08005233 if (tracing_disabled)
5234 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005235
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005236 if (trace_array_get(tr))
5237 return -ENODEV;
5238
5239 ret = single_open(file, tracing_clock_show, inode->i_private);
5240 if (ret < 0)
5241 trace_array_put(tr);
5242
5243 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005244}
5245
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005246struct ftrace_buffer_info {
5247 struct trace_iterator iter;
5248 void *spare;
5249 unsigned int read;
5250};
5251
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005252#ifdef CONFIG_TRACER_SNAPSHOT
5253static int tracing_snapshot_open(struct inode *inode, struct file *file)
5254{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005255 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005256 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005257 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005258 int ret = 0;
5259
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005260 if (trace_array_get(tr) < 0)
5261 return -ENODEV;
5262
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005263 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005264 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005265 if (IS_ERR(iter))
5266 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005267 } else {
5268 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005269 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005270 m = kzalloc(sizeof(*m), GFP_KERNEL);
5271 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005272 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005273 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5274 if (!iter) {
5275 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005276 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005277 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005278 ret = 0;
5279
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005280 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005281 iter->trace_buffer = &tr->max_buffer;
5282 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005283 m->private = iter;
5284 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005285 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005286out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005287 if (ret < 0)
5288 trace_array_put(tr);
5289
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005290 return ret;
5291}
5292
5293static ssize_t
5294tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5295 loff_t *ppos)
5296{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005297 struct seq_file *m = filp->private_data;
5298 struct trace_iterator *iter = m->private;
5299 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005300 unsigned long val;
5301 int ret;
5302
5303 ret = tracing_update_buffers();
5304 if (ret < 0)
5305 return ret;
5306
5307 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5308 if (ret)
5309 return ret;
5310
5311 mutex_lock(&trace_types_lock);
5312
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005313 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005314 ret = -EBUSY;
5315 goto out;
5316 }
5317
5318 switch (val) {
5319 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005320 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5321 ret = -EINVAL;
5322 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005323 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005324 if (tr->allocated_snapshot)
5325 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005326 break;
5327 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005328/* Only allow per-cpu swap if the ring buffer supports it */
5329#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5330 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5331 ret = -EINVAL;
5332 break;
5333 }
5334#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005335 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005336 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005337 if (ret < 0)
5338 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005339 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005340 local_irq_disable();
5341 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005342 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005343 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005344 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005345 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005346 local_irq_enable();
5347 break;
5348 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005349 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005350 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5351 tracing_reset_online_cpus(&tr->max_buffer);
5352 else
5353 tracing_reset(&tr->max_buffer, iter->cpu_file);
5354 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005355 break;
5356 }
5357
5358 if (ret >= 0) {
5359 *ppos += cnt;
5360 ret = cnt;
5361 }
5362out:
5363 mutex_unlock(&trace_types_lock);
5364 return ret;
5365}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005366
5367static int tracing_snapshot_release(struct inode *inode, struct file *file)
5368{
5369 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005370 int ret;
5371
5372 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005373
5374 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005375 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005376
5377 /* If write only, the seq_file is just a stub */
5378 if (m)
5379 kfree(m->private);
5380 kfree(m);
5381
5382 return 0;
5383}
5384
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005385static int tracing_buffers_open(struct inode *inode, struct file *filp);
5386static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5387 size_t count, loff_t *ppos);
5388static int tracing_buffers_release(struct inode *inode, struct file *file);
5389static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5390 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5391
5392static int snapshot_raw_open(struct inode *inode, struct file *filp)
5393{
5394 struct ftrace_buffer_info *info;
5395 int ret;
5396
5397 ret = tracing_buffers_open(inode, filp);
5398 if (ret < 0)
5399 return ret;
5400
5401 info = filp->private_data;
5402
5403 if (info->iter.trace->use_max_tr) {
5404 tracing_buffers_release(inode, filp);
5405 return -EBUSY;
5406 }
5407
5408 info->iter.snapshot = true;
5409 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5410
5411 return ret;
5412}
5413
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005414#endif /* CONFIG_TRACER_SNAPSHOT */
5415
5416
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005417static const struct file_operations tracing_thresh_fops = {
5418 .open = tracing_open_generic,
5419 .read = tracing_thresh_read,
5420 .write = tracing_thresh_write,
5421 .llseek = generic_file_llseek,
5422};
5423
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005424static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005425 .open = tracing_open_generic,
5426 .read = tracing_max_lat_read,
5427 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005428 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005429};
5430
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005431static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005432 .open = tracing_open_generic,
5433 .read = tracing_set_trace_read,
5434 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005435 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005436};
5437
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005438static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005439 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005440 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005441 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005442 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005443 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005444 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005445};
5446
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005447static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005448 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005449 .read = tracing_entries_read,
5450 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005451 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005452 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005453};
5454
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005455static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005456 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005457 .read = tracing_total_entries_read,
5458 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005459 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005460};
5461
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005462static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005463 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005464 .write = tracing_free_buffer_write,
5465 .release = tracing_free_buffer_release,
5466};
5467
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005468static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005469 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005470 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005471 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005472 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005473};
5474
Zhaolei5079f322009-08-25 16:12:56 +08005475static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005476 .open = tracing_clock_open,
5477 .read = seq_read,
5478 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005479 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005480 .write = tracing_clock_write,
5481};
5482
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005483#ifdef CONFIG_TRACER_SNAPSHOT
5484static const struct file_operations snapshot_fops = {
5485 .open = tracing_snapshot_open,
5486 .read = seq_read,
5487 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005488 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005489 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005490};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005491
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005492static const struct file_operations snapshot_raw_fops = {
5493 .open = snapshot_raw_open,
5494 .read = tracing_buffers_read,
5495 .release = tracing_buffers_release,
5496 .splice_read = tracing_buffers_splice_read,
5497 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005498};
5499
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005500#endif /* CONFIG_TRACER_SNAPSHOT */
5501
Steven Rostedt2cadf912008-12-01 22:20:19 -05005502static int tracing_buffers_open(struct inode *inode, struct file *filp)
5503{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005504 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005505 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005506 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005507
5508 if (tracing_disabled)
5509 return -ENODEV;
5510
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005511 if (trace_array_get(tr) < 0)
5512 return -ENODEV;
5513
Steven Rostedt2cadf912008-12-01 22:20:19 -05005514 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005515 if (!info) {
5516 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005517 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005518 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005519
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005520 mutex_lock(&trace_types_lock);
5521
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005522 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005523 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005524 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005525 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005526 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005527 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005528 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005529
5530 filp->private_data = info;
5531
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005532 tr->current_trace->ref++;
5533
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005534 mutex_unlock(&trace_types_lock);
5535
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005536 ret = nonseekable_open(inode, filp);
5537 if (ret < 0)
5538 trace_array_put(tr);
5539
5540 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005541}
5542
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005543static unsigned int
5544tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5545{
5546 struct ftrace_buffer_info *info = filp->private_data;
5547 struct trace_iterator *iter = &info->iter;
5548
5549 return trace_poll(iter, filp, poll_table);
5550}
5551
Steven Rostedt2cadf912008-12-01 22:20:19 -05005552static ssize_t
5553tracing_buffers_read(struct file *filp, char __user *ubuf,
5554 size_t count, loff_t *ppos)
5555{
5556 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005557 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005558 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005559 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005560
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005561 if (!count)
5562 return 0;
5563
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005564#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005565 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5566 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005567#endif
5568
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005569 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005570 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5571 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005572 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005573 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005574
Steven Rostedt2cadf912008-12-01 22:20:19 -05005575 /* Do we have previous read data to read? */
5576 if (info->read < PAGE_SIZE)
5577 goto read;
5578
Steven Rostedtb6273442013-02-28 13:44:11 -05005579 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005580 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005581 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005582 &info->spare,
5583 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005584 iter->cpu_file, 0);
5585 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005586
5587 if (ret < 0) {
5588 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005589 if ((filp->f_flags & O_NONBLOCK))
5590 return -EAGAIN;
5591
Rabin Vincente30f53a2014-11-10 19:46:34 +01005592 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005593 if (ret)
5594 return ret;
5595
Steven Rostedtb6273442013-02-28 13:44:11 -05005596 goto again;
5597 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005598 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005599 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005600
Steven Rostedt436fc282011-10-14 10:44:25 -04005601 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005602 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005603 size = PAGE_SIZE - info->read;
5604 if (size > count)
5605 size = count;
5606
5607 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005608 if (ret == size)
5609 return -EFAULT;
5610
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005611 size -= ret;
5612
Steven Rostedt2cadf912008-12-01 22:20:19 -05005613 *ppos += size;
5614 info->read += size;
5615
5616 return size;
5617}
5618
5619static int tracing_buffers_release(struct inode *inode, struct file *file)
5620{
5621 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005622 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005623
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005624 mutex_lock(&trace_types_lock);
5625
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005626 iter->tr->current_trace->ref--;
5627
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005628 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005629
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005630 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005631 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005632 kfree(info);
5633
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005634 mutex_unlock(&trace_types_lock);
5635
Steven Rostedt2cadf912008-12-01 22:20:19 -05005636 return 0;
5637}
5638
5639struct buffer_ref {
5640 struct ring_buffer *buffer;
5641 void *page;
5642 int ref;
5643};
5644
5645static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5646 struct pipe_buffer *buf)
5647{
5648 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5649
5650 if (--ref->ref)
5651 return;
5652
5653 ring_buffer_free_read_page(ref->buffer, ref->page);
5654 kfree(ref);
5655 buf->private = 0;
5656}
5657
Steven Rostedt2cadf912008-12-01 22:20:19 -05005658static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5659 struct pipe_buffer *buf)
5660{
5661 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5662
5663 ref->ref++;
5664}
5665
5666/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005667static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005668 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005669 .confirm = generic_pipe_buf_confirm,
5670 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005671 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005672 .get = buffer_pipe_buf_get,
5673};
5674
5675/*
5676 * Callback from splice_to_pipe(), if we need to release some pages
5677 * at the end of the spd in case we error'ed out in filling the pipe.
5678 */
5679static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5680{
5681 struct buffer_ref *ref =
5682 (struct buffer_ref *)spd->partial[i].private;
5683
5684 if (--ref->ref)
5685 return;
5686
5687 ring_buffer_free_read_page(ref->buffer, ref->page);
5688 kfree(ref);
5689 spd->partial[i].private = 0;
5690}
5691
5692static ssize_t
5693tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5694 struct pipe_inode_info *pipe, size_t len,
5695 unsigned int flags)
5696{
5697 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005698 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005699 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5700 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005701 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005702 .pages = pages_def,
5703 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005704 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005705 .flags = flags,
5706 .ops = &buffer_pipe_buf_ops,
5707 .spd_release = buffer_spd_release,
5708 };
5709 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005710 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01005711 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005712
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005713#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005714 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5715 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005716#endif
5717
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005718 if (splice_grow_spd(pipe, &spd))
5719 return -ENOMEM;
Jens Axboe35f3d142010-05-20 10:43:18 +02005720
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005721 if (*ppos & (PAGE_SIZE - 1))
5722 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005723
5724 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005725 if (len < PAGE_SIZE)
5726 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005727 len &= PAGE_MASK;
5728 }
5729
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005730 again:
5731 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005732 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005733
Al Viroa786c062014-04-11 12:01:03 -04005734 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005735 struct page *page;
5736 int r;
5737
5738 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01005739 if (!ref) {
5740 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005741 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01005742 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005743
Steven Rostedt7267fa62009-04-29 00:16:21 -04005744 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005745 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005746 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005747 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005748 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005749 kfree(ref);
5750 break;
5751 }
5752
5753 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005754 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005755 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005756 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005757 kfree(ref);
5758 break;
5759 }
5760
5761 /*
5762 * zero out any left over data, this is going to
5763 * user land.
5764 */
5765 size = ring_buffer_page_len(ref->page);
5766 if (size < PAGE_SIZE)
5767 memset(ref->page + size, 0, PAGE_SIZE - size);
5768
5769 page = virt_to_page(ref->page);
5770
5771 spd.pages[i] = page;
5772 spd.partial[i].len = PAGE_SIZE;
5773 spd.partial[i].offset = 0;
5774 spd.partial[i].private = (unsigned long)ref;
5775 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005776 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005777
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005778 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005779 }
5780
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005781 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005782 spd.nr_pages = i;
5783
5784 /* did we read anything? */
5785 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005786 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005787 return ret;
Rabin Vincent07906da2014-11-06 22:26:07 +01005788
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005789 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5790 return -EAGAIN;
5791
Rabin Vincente30f53a2014-11-10 19:46:34 +01005792 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005793 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005794 return ret;
Rabin Vincente30f53a2014-11-10 19:46:34 +01005795
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005796 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005797 }
5798
5799 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005800 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005801
Steven Rostedt2cadf912008-12-01 22:20:19 -05005802 return ret;
5803}
5804
5805static const struct file_operations tracing_buffers_fops = {
5806 .open = tracing_buffers_open,
5807 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005808 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005809 .release = tracing_buffers_release,
5810 .splice_read = tracing_buffers_splice_read,
5811 .llseek = no_llseek,
5812};
5813
Steven Rostedtc8d77182009-04-29 18:03:45 -04005814static ssize_t
5815tracing_stats_read(struct file *filp, char __user *ubuf,
5816 size_t count, loff_t *ppos)
5817{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005818 struct inode *inode = file_inode(filp);
5819 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005820 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005821 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005822 struct trace_seq *s;
5823 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005824 unsigned long long t;
5825 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005826
Li Zefane4f2d102009-06-15 10:57:28 +08005827 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005828 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005829 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005830
5831 trace_seq_init(s);
5832
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005833 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005834 trace_seq_printf(s, "entries: %ld\n", cnt);
5835
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005836 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005837 trace_seq_printf(s, "overrun: %ld\n", cnt);
5838
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005839 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005840 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5841
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005842 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005843 trace_seq_printf(s, "bytes: %ld\n", cnt);
5844
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005845 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005846 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005847 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005848 usec_rem = do_div(t, USEC_PER_SEC);
5849 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5850 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005851
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005852 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005853 usec_rem = do_div(t, USEC_PER_SEC);
5854 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5855 } else {
5856 /* counter or tsc mode for trace_clock */
5857 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005858 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005859
5860 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005861 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005862 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005863
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005864 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005865 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5866
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005867 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005868 trace_seq_printf(s, "read events: %ld\n", cnt);
5869
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005870 count = simple_read_from_buffer(ubuf, count, ppos,
5871 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04005872
5873 kfree(s);
5874
5875 return count;
5876}
5877
5878static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005879 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005880 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005881 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005882 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005883};
5884
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005885#ifdef CONFIG_DYNAMIC_FTRACE
5886
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005887int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005888{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005889 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005890}
5891
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005892static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005893tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005894 size_t cnt, loff_t *ppos)
5895{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005896 static char ftrace_dyn_info_buffer[1024];
5897 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005898 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005899 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005900 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005901 int r;
5902
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005903 mutex_lock(&dyn_info_mutex);
5904 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005905
Steven Rostedta26a2a22008-10-31 00:03:22 -04005906 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005907 buf[r++] = '\n';
5908
5909 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5910
5911 mutex_unlock(&dyn_info_mutex);
5912
5913 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005914}
5915
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005916static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005917 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005918 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005919 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005920};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005921#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005922
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005923#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5924static void
5925ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005926{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005927 tracing_snapshot();
5928}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005929
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005930static void
5931ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5932{
5933 unsigned long *count = (long *)data;
5934
5935 if (!*count)
5936 return;
5937
5938 if (*count != -1)
5939 (*count)--;
5940
5941 tracing_snapshot();
5942}
5943
5944static int
5945ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5946 struct ftrace_probe_ops *ops, void *data)
5947{
5948 long count = (long)data;
5949
5950 seq_printf(m, "%ps:", (void *)ip);
5951
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005952 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005953
5954 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005955 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005956 else
5957 seq_printf(m, ":count=%ld\n", count);
5958
5959 return 0;
5960}
5961
5962static struct ftrace_probe_ops snapshot_probe_ops = {
5963 .func = ftrace_snapshot,
5964 .print = ftrace_snapshot_print,
5965};
5966
5967static struct ftrace_probe_ops snapshot_count_probe_ops = {
5968 .func = ftrace_count_snapshot,
5969 .print = ftrace_snapshot_print,
5970};
5971
5972static int
5973ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5974 char *glob, char *cmd, char *param, int enable)
5975{
5976 struct ftrace_probe_ops *ops;
5977 void *count = (void *)-1;
5978 char *number;
5979 int ret;
5980
5981 /* hash funcs only work with set_ftrace_filter */
5982 if (!enable)
5983 return -EINVAL;
5984
5985 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5986
5987 if (glob[0] == '!') {
5988 unregister_ftrace_function_probe_func(glob+1, ops);
5989 return 0;
5990 }
5991
5992 if (!param)
5993 goto out_reg;
5994
5995 number = strsep(&param, ":");
5996
5997 if (!strlen(number))
5998 goto out_reg;
5999
6000 /*
6001 * We use the callback data field (which is a pointer)
6002 * as our counter.
6003 */
6004 ret = kstrtoul(number, 0, (unsigned long *)&count);
6005 if (ret)
6006 return ret;
6007
6008 out_reg:
6009 ret = register_ftrace_function_probe(glob, ops, count);
6010
6011 if (ret >= 0)
6012 alloc_snapshot(&global_trace);
6013
6014 return ret < 0 ? ret : 0;
6015}
6016
6017static struct ftrace_func_command ftrace_snapshot_cmd = {
6018 .name = "snapshot",
6019 .func = ftrace_trace_snapshot_callback,
6020};
6021
Tom Zanussi38de93a2013-10-24 08:34:18 -05006022static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006023{
6024 return register_ftrace_command(&ftrace_snapshot_cmd);
6025}
6026#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006027static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006028#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006029
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006030static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006031{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006032 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006033}
6034
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006035static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6036{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006037 struct dentry *d_tracer;
6038
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006039 if (tr->percpu_dir)
6040 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006041
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006042 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006043 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006044 return NULL;
6045
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006046 tr->percpu_dir = debugfs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006047
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006048 WARN_ONCE(!tr->percpu_dir,
6049 "Could not create debugfs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006050
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006051 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006052}
6053
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006054static struct dentry *
6055trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6056 void *data, long cpu, const struct file_operations *fops)
6057{
6058 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6059
6060 if (ret) /* See tracing_get_cpu() */
6061 ret->d_inode->i_cdev = (void *)(cpu + 1);
6062 return ret;
6063}
6064
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006065static void
6066tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006067{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006068 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006069 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006070 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006071
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006072 if (!d_percpu)
6073 return;
6074
Steven Rostedtdd49a382010-10-20 21:51:26 -04006075 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006076 d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
6077 if (!d_cpu) {
6078 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
6079 return;
6080 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006081
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006082 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006083 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006084 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006085
6086 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006087 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006088 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006089
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006090 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006091 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006092
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006093 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006094 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006095
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006096 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006097 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006098
6099#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006100 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006101 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006102
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006103 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006104 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006105#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006106}
6107
Steven Rostedt60a11772008-05-12 21:20:44 +02006108#ifdef CONFIG_FTRACE_SELFTEST
6109/* Let selftest have access to static functions in this file */
6110#include "trace_selftest.c"
6111#endif
6112
Steven Rostedt577b7852009-02-26 23:43:05 -05006113struct trace_option_dentry {
6114 struct tracer_opt *opt;
6115 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006116 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006117 struct dentry *entry;
6118};
6119
6120static ssize_t
6121trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6122 loff_t *ppos)
6123{
6124 struct trace_option_dentry *topt = filp->private_data;
6125 char *buf;
6126
6127 if (topt->flags->val & topt->opt->bit)
6128 buf = "1\n";
6129 else
6130 buf = "0\n";
6131
6132 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6133}
6134
6135static ssize_t
6136trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6137 loff_t *ppos)
6138{
6139 struct trace_option_dentry *topt = filp->private_data;
6140 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006141 int ret;
6142
Peter Huewe22fe9b52011-06-07 21:58:27 +02006143 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6144 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006145 return ret;
6146
Li Zefan8d18eaa2009-12-08 11:17:06 +08006147 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006148 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006149
6150 if (!!(topt->flags->val & topt->opt->bit) != val) {
6151 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006152 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006153 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006154 mutex_unlock(&trace_types_lock);
6155 if (ret)
6156 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006157 }
6158
6159 *ppos += cnt;
6160
6161 return cnt;
6162}
6163
6164
6165static const struct file_operations trace_options_fops = {
6166 .open = tracing_open_generic,
6167 .read = trace_options_read,
6168 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006169 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006170};
6171
Steven Rostedta8259072009-02-26 22:19:12 -05006172static ssize_t
6173trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6174 loff_t *ppos)
6175{
6176 long index = (long)filp->private_data;
6177 char *buf;
6178
6179 if (trace_flags & (1 << index))
6180 buf = "1\n";
6181 else
6182 buf = "0\n";
6183
6184 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6185}
6186
6187static ssize_t
6188trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6189 loff_t *ppos)
6190{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006191 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006192 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05006193 unsigned long val;
6194 int ret;
6195
Peter Huewe22fe9b52011-06-07 21:58:27 +02006196 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6197 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006198 return ret;
6199
Zhaoleif2d84b62009-08-07 18:55:48 +08006200 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006201 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006202
6203 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006204 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006205 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006206
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006207 if (ret < 0)
6208 return ret;
6209
Steven Rostedta8259072009-02-26 22:19:12 -05006210 *ppos += cnt;
6211
6212 return cnt;
6213}
6214
Steven Rostedta8259072009-02-26 22:19:12 -05006215static const struct file_operations trace_options_core_fops = {
6216 .open = tracing_open_generic,
6217 .read = trace_options_core_read,
6218 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006219 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006220};
6221
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006222struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006223 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006224 struct dentry *parent,
6225 void *data,
6226 const struct file_operations *fops)
6227{
6228 struct dentry *ret;
6229
6230 ret = debugfs_create_file(name, mode, parent, data, fops);
6231 if (!ret)
6232 pr_warning("Could not create debugfs '%s' entry\n", name);
6233
6234 return ret;
6235}
6236
6237
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006238static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006239{
6240 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006241
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006242 if (tr->options)
6243 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006244
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006245 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006246 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006247 return NULL;
6248
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006249 tr->options = debugfs_create_dir("options", d_tracer);
6250 if (!tr->options) {
Steven Rostedta8259072009-02-26 22:19:12 -05006251 pr_warning("Could not create debugfs directory 'options'\n");
6252 return NULL;
6253 }
6254
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006255 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006256}
6257
Steven Rostedt577b7852009-02-26 23:43:05 -05006258static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006259create_trace_option_file(struct trace_array *tr,
6260 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006261 struct tracer_flags *flags,
6262 struct tracer_opt *opt)
6263{
6264 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006265
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006266 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006267 if (!t_options)
6268 return;
6269
6270 topt->flags = flags;
6271 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006272 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006273
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006274 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006275 &trace_options_fops);
6276
Steven Rostedt577b7852009-02-26 23:43:05 -05006277}
6278
6279static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006280create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006281{
6282 struct trace_option_dentry *topts;
6283 struct tracer_flags *flags;
6284 struct tracer_opt *opts;
6285 int cnt;
6286
6287 if (!tracer)
6288 return NULL;
6289
6290 flags = tracer->flags;
6291
6292 if (!flags || !flags->opts)
6293 return NULL;
6294
6295 opts = flags->opts;
6296
6297 for (cnt = 0; opts[cnt].name; cnt++)
6298 ;
6299
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006300 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006301 if (!topts)
6302 return NULL;
6303
6304 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006305 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006306 &opts[cnt]);
6307
6308 return topts;
6309}
6310
6311static void
6312destroy_trace_option_files(struct trace_option_dentry *topts)
6313{
6314 int cnt;
6315
6316 if (!topts)
6317 return;
6318
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006319 for (cnt = 0; topts[cnt].opt; cnt++)
6320 debugfs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006321
6322 kfree(topts);
6323}
6324
Steven Rostedta8259072009-02-26 22:19:12 -05006325static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006326create_trace_option_core_file(struct trace_array *tr,
6327 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006328{
6329 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006330
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006331 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006332 if (!t_options)
6333 return NULL;
6334
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006335 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006336 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006337}
6338
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006339static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006340{
6341 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006342 int i;
6343
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006344 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006345 if (!t_options)
6346 return;
6347
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006348 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006349 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006350}
6351
Steven Rostedt499e5472012-02-22 15:50:28 -05006352static ssize_t
6353rb_simple_read(struct file *filp, char __user *ubuf,
6354 size_t cnt, loff_t *ppos)
6355{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006356 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006357 char buf[64];
6358 int r;
6359
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006360 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006361 r = sprintf(buf, "%d\n", r);
6362
6363 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6364}
6365
6366static ssize_t
6367rb_simple_write(struct file *filp, const char __user *ubuf,
6368 size_t cnt, loff_t *ppos)
6369{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006370 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006371 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006372 unsigned long val;
6373 int ret;
6374
6375 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6376 if (ret)
6377 return ret;
6378
6379 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006380 mutex_lock(&trace_types_lock);
6381 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006382 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006383 if (tr->current_trace->start)
6384 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006385 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006386 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006387 if (tr->current_trace->stop)
6388 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006389 }
6390 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006391 }
6392
6393 (*ppos)++;
6394
6395 return cnt;
6396}
6397
6398static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006399 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006400 .read = rb_simple_read,
6401 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006402 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006403 .llseek = default_llseek,
6404};
6405
Steven Rostedt277ba042012-08-03 16:10:49 -04006406struct dentry *trace_instance_dir;
6407
6408static void
6409init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer);
6410
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006411static int
6412allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006413{
6414 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006415
6416 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6417
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006418 buf->tr = tr;
6419
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006420 buf->buffer = ring_buffer_alloc(size, rb_flags);
6421 if (!buf->buffer)
6422 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006423
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006424 buf->data = alloc_percpu(struct trace_array_cpu);
6425 if (!buf->data) {
6426 ring_buffer_free(buf->buffer);
6427 return -ENOMEM;
6428 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006429
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006430 /* Allocate the first page for all buffers */
6431 set_buffer_entries(&tr->trace_buffer,
6432 ring_buffer_size(tr->trace_buffer.buffer, 0));
6433
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006434 return 0;
6435}
6436
6437static int allocate_trace_buffers(struct trace_array *tr, int size)
6438{
6439 int ret;
6440
6441 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6442 if (ret)
6443 return ret;
6444
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006445#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006446 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6447 allocate_snapshot ? size : 1);
6448 if (WARN_ON(ret)) {
6449 ring_buffer_free(tr->trace_buffer.buffer);
6450 free_percpu(tr->trace_buffer.data);
6451 return -ENOMEM;
6452 }
6453 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006454
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006455 /*
6456 * Only the top level trace array gets its snapshot allocated
6457 * from the kernel command line.
6458 */
6459 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006460#endif
6461 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006462}
6463
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006464static void free_trace_buffer(struct trace_buffer *buf)
6465{
6466 if (buf->buffer) {
6467 ring_buffer_free(buf->buffer);
6468 buf->buffer = NULL;
6469 free_percpu(buf->data);
6470 buf->data = NULL;
6471 }
6472}
6473
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006474static void free_trace_buffers(struct trace_array *tr)
6475{
6476 if (!tr)
6477 return;
6478
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006479 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006480
6481#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006482 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006483#endif
6484}
6485
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006486static int new_instance_create(const char *name)
6487{
Steven Rostedt277ba042012-08-03 16:10:49 -04006488 struct trace_array *tr;
6489 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006490
6491 mutex_lock(&trace_types_lock);
6492
6493 ret = -EEXIST;
6494 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6495 if (tr->name && strcmp(tr->name, name) == 0)
6496 goto out_unlock;
6497 }
6498
6499 ret = -ENOMEM;
6500 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6501 if (!tr)
6502 goto out_unlock;
6503
6504 tr->name = kstrdup(name, GFP_KERNEL);
6505 if (!tr->name)
6506 goto out_free_tr;
6507
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006508 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6509 goto out_free_tr;
6510
6511 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6512
Steven Rostedt277ba042012-08-03 16:10:49 -04006513 raw_spin_lock_init(&tr->start_lock);
6514
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006515 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6516
Steven Rostedt277ba042012-08-03 16:10:49 -04006517 tr->current_trace = &nop_trace;
6518
6519 INIT_LIST_HEAD(&tr->systems);
6520 INIT_LIST_HEAD(&tr->events);
6521
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006522 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006523 goto out_free_tr;
6524
Steven Rostedt277ba042012-08-03 16:10:49 -04006525 tr->dir = debugfs_create_dir(name, trace_instance_dir);
6526 if (!tr->dir)
6527 goto out_free_tr;
6528
6529 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006530 if (ret) {
6531 debugfs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006532 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006533 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006534
6535 init_tracer_debugfs(tr, tr->dir);
6536
6537 list_add(&tr->list, &ftrace_trace_arrays);
6538
6539 mutex_unlock(&trace_types_lock);
6540
6541 return 0;
6542
6543 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006544 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006545 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006546 kfree(tr->name);
6547 kfree(tr);
6548
6549 out_unlock:
6550 mutex_unlock(&trace_types_lock);
6551
6552 return ret;
6553
6554}
6555
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006556static int instance_delete(const char *name)
6557{
6558 struct trace_array *tr;
6559 int found = 0;
6560 int ret;
6561
6562 mutex_lock(&trace_types_lock);
6563
6564 ret = -ENODEV;
6565 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6566 if (tr->name && strcmp(tr->name, name) == 0) {
6567 found = 1;
6568 break;
6569 }
6570 }
6571 if (!found)
6572 goto out_unlock;
6573
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006574 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006575 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006576 goto out_unlock;
6577
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006578 list_del(&tr->list);
6579
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006580 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006581 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006582 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006583 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006584 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006585
6586 kfree(tr->name);
6587 kfree(tr);
6588
6589 ret = 0;
6590
6591 out_unlock:
6592 mutex_unlock(&trace_types_lock);
6593
6594 return ret;
6595}
6596
Steven Rostedt277ba042012-08-03 16:10:49 -04006597static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t mode)
6598{
6599 struct dentry *parent;
6600 int ret;
6601
6602 /* Paranoid: Make sure the parent is the "instances" directory */
Al Viro946e51f2014-10-26 19:19:16 -04006603 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
Steven Rostedt277ba042012-08-03 16:10:49 -04006604 if (WARN_ON_ONCE(parent != trace_instance_dir))
6605 return -ENOENT;
6606
6607 /*
6608 * The inode mutex is locked, but debugfs_create_dir() will also
6609 * take the mutex. As the instances directory can not be destroyed
6610 * or changed in any other way, it is safe to unlock it, and
6611 * let the dentry try. If two users try to make the same dir at
6612 * the same time, then the new_instance_create() will determine the
6613 * winner.
6614 */
6615 mutex_unlock(&inode->i_mutex);
6616
6617 ret = new_instance_create(dentry->d_iname);
6618
6619 mutex_lock(&inode->i_mutex);
6620
6621 return ret;
6622}
6623
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006624static int instance_rmdir(struct inode *inode, struct dentry *dentry)
6625{
6626 struct dentry *parent;
6627 int ret;
6628
6629 /* Paranoid: Make sure the parent is the "instances" directory */
Al Viro946e51f2014-10-26 19:19:16 -04006630 parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006631 if (WARN_ON_ONCE(parent != trace_instance_dir))
6632 return -ENOENT;
6633
6634 /* The caller did a dget() on dentry */
6635 mutex_unlock(&dentry->d_inode->i_mutex);
6636
6637 /*
6638 * The inode mutex is locked, but debugfs_create_dir() will also
6639 * take the mutex. As the instances directory can not be destroyed
6640 * or changed in any other way, it is safe to unlock it, and
6641 * let the dentry try. If two users try to make the same dir at
6642 * the same time, then the instance_delete() will determine the
6643 * winner.
6644 */
6645 mutex_unlock(&inode->i_mutex);
6646
6647 ret = instance_delete(dentry->d_iname);
6648
6649 mutex_lock_nested(&inode->i_mutex, I_MUTEX_PARENT);
6650 mutex_lock(&dentry->d_inode->i_mutex);
6651
6652 return ret;
6653}
6654
Steven Rostedt277ba042012-08-03 16:10:49 -04006655static const struct inode_operations instance_dir_inode_operations = {
6656 .lookup = simple_lookup,
6657 .mkdir = instance_mkdir,
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006658 .rmdir = instance_rmdir,
Steven Rostedt277ba042012-08-03 16:10:49 -04006659};
6660
6661static __init void create_trace_instances(struct dentry *d_tracer)
6662{
6663 trace_instance_dir = debugfs_create_dir("instances", d_tracer);
6664 if (WARN_ON(!trace_instance_dir))
6665 return;
6666
6667 /* Hijack the dir inode operations, to allow mkdir */
6668 trace_instance_dir->d_inode->i_op = &instance_dir_inode_operations;
6669}
6670
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006671static void
6672init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
6673{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006674 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006675
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006676 trace_create_file("available_tracers", 0444, d_tracer,
6677 tr, &show_traces_fops);
6678
6679 trace_create_file("current_tracer", 0644, d_tracer,
6680 tr, &set_tracer_fops);
6681
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006682 trace_create_file("tracing_cpumask", 0644, d_tracer,
6683 tr, &tracing_cpumask_fops);
6684
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006685 trace_create_file("trace_options", 0644, d_tracer,
6686 tr, &tracing_iter_fops);
6687
6688 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006689 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006690
6691 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006692 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006693
6694 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006695 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006696
6697 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6698 tr, &tracing_total_entries_fops);
6699
Wang YanQing238ae932013-05-26 16:52:01 +08006700 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006701 tr, &tracing_free_buffer_fops);
6702
6703 trace_create_file("trace_marker", 0220, d_tracer,
6704 tr, &tracing_mark_fops);
6705
6706 trace_create_file("trace_clock", 0644, d_tracer, tr,
6707 &trace_clock_fops);
6708
6709 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006710 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006711
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006712#ifdef CONFIG_TRACER_MAX_TRACE
6713 trace_create_file("tracing_max_latency", 0644, d_tracer,
6714 &tr->max_latency, &tracing_max_lat_fops);
6715#endif
6716
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006717 if (ftrace_create_function_files(tr, d_tracer))
6718 WARN(1, "Could not allocate function filter files");
6719
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006720#ifdef CONFIG_TRACER_SNAPSHOT
6721 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006722 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006723#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006724
6725 for_each_tracing_cpu(cpu)
6726 tracing_init_debugfs_percpu(tr, cpu);
6727
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006728}
6729
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006730/**
6731 * tracing_init_dentry - initialize top level trace array
6732 *
6733 * This is called when creating files or directories in the tracing
6734 * directory. It is called via fs_initcall() by any of the boot up code
6735 * and expects to return the dentry of the top level tracing directory.
6736 */
6737struct dentry *tracing_init_dentry(void)
6738{
6739 struct trace_array *tr = &global_trace;
6740
6741 if (tr->dir)
6742 return tr->dir;
6743
6744 if (WARN_ON(!debugfs_initialized()))
6745 return ERR_PTR(-ENODEV);
6746
6747 tr->dir = debugfs_create_dir("tracing", NULL);
6748
6749 if (!tr->dir) {
6750 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6751 return ERR_PTR(-ENOMEM);
6752 }
6753
6754 return tr->dir;
6755}
6756
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006757extern struct trace_enum_map *__start_ftrace_enum_maps[];
6758extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6759
6760static void __init trace_enum_init(void)
6761{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006762 int len;
6763
6764 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006765 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006766}
6767
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006768#ifdef CONFIG_MODULES
6769static void trace_module_add_enums(struct module *mod)
6770{
6771 if (!mod->num_trace_enums)
6772 return;
6773
6774 /*
6775 * Modules with bad taint do not have events created, do
6776 * not bother with enums either.
6777 */
6778 if (trace_module_has_bad_taint(mod))
6779 return;
6780
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006781 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006782}
6783
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006784#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6785static void trace_module_remove_enums(struct module *mod)
6786{
6787 union trace_enum_map_item *map;
6788 union trace_enum_map_item **last = &trace_enum_maps;
6789
6790 if (!mod->num_trace_enums)
6791 return;
6792
6793 mutex_lock(&trace_enum_mutex);
6794
6795 map = trace_enum_maps;
6796
6797 while (map) {
6798 if (map->head.mod == mod)
6799 break;
6800 map = trace_enum_jmp_to_tail(map);
6801 last = &map->tail.next;
6802 map = map->tail.next;
6803 }
6804 if (!map)
6805 goto out;
6806
6807 *last = trace_enum_jmp_to_tail(map)->tail.next;
6808 kfree(map);
6809 out:
6810 mutex_unlock(&trace_enum_mutex);
6811}
6812#else
6813static inline void trace_module_remove_enums(struct module *mod) { }
6814#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6815
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006816static int trace_module_notify(struct notifier_block *self,
6817 unsigned long val, void *data)
6818{
6819 struct module *mod = data;
6820
6821 switch (val) {
6822 case MODULE_STATE_COMING:
6823 trace_module_add_enums(mod);
6824 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006825 case MODULE_STATE_GOING:
6826 trace_module_remove_enums(mod);
6827 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006828 }
6829
6830 return 0;
6831}
6832
6833static struct notifier_block trace_module_nb = {
6834 .notifier_call = trace_module_notify,
6835 .priority = 0,
6836};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006837#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006838
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006839static __init int tracer_init_debugfs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006840{
6841 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006842
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006843 trace_access_lock_init();
6844
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006845 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006846 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006847 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006848
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006849 init_tracer_debugfs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006850
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006851 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006852 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006853
Li Zefan339ae5d2009-04-17 10:34:30 +08006854 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006855 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006856
Avadh Patel69abe6a2009-04-10 16:04:48 -04006857 trace_create_file("saved_cmdlines", 0444, d_tracer,
6858 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006859
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006860 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6861 NULL, &tracing_saved_cmdlines_size_fops);
6862
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006863 trace_enum_init();
6864
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006865 trace_create_enum_file(d_tracer);
6866
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006867#ifdef CONFIG_MODULES
6868 register_module_notifier(&trace_module_nb);
6869#endif
6870
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006871#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006872 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6873 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006874#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006875
Steven Rostedt277ba042012-08-03 16:10:49 -04006876 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006877
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006878 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006879
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006880 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006881}
6882
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006883static int trace_panic_handler(struct notifier_block *this,
6884 unsigned long event, void *unused)
6885{
Steven Rostedt944ac422008-10-23 19:26:08 -04006886 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006887 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006888 return NOTIFY_OK;
6889}
6890
6891static struct notifier_block trace_panic_notifier = {
6892 .notifier_call = trace_panic_handler,
6893 .next = NULL,
6894 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6895};
6896
6897static int trace_die_handler(struct notifier_block *self,
6898 unsigned long val,
6899 void *data)
6900{
6901 switch (val) {
6902 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006903 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006904 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006905 break;
6906 default:
6907 break;
6908 }
6909 return NOTIFY_OK;
6910}
6911
6912static struct notifier_block trace_die_notifier = {
6913 .notifier_call = trace_die_handler,
6914 .priority = 200
6915};
6916
6917/*
6918 * printk is set to max of 1024, we really don't need it that big.
6919 * Nothing should be printing 1000 characters anyway.
6920 */
6921#define TRACE_MAX_PRINT 1000
6922
6923/*
6924 * Define here KERN_TRACE so that we have one place to modify
6925 * it if we decide to change what log level the ftrace dump
6926 * should be at.
6927 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006928#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006929
Jason Wessel955b61e2010-08-05 09:22:23 -05006930void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006931trace_printk_seq(struct trace_seq *s)
6932{
6933 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006934 if (s->seq.len >= TRACE_MAX_PRINT)
6935 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006936
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05006937 /*
6938 * More paranoid code. Although the buffer size is set to
6939 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6940 * an extra layer of protection.
6941 */
6942 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6943 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006944
6945 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006946 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006947
6948 printk(KERN_TRACE "%s", s->buffer);
6949
Steven Rostedtf9520752009-03-02 14:04:40 -05006950 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006951}
6952
Jason Wessel955b61e2010-08-05 09:22:23 -05006953void trace_init_global_iter(struct trace_iterator *iter)
6954{
6955 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006956 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006957 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006958 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006959
6960 if (iter->trace && iter->trace->open)
6961 iter->trace->open(iter);
6962
6963 /* Annotate start of buffers if we had overruns */
6964 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6965 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6966
6967 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6968 if (trace_clocks[iter->tr->clock_id].in_ns)
6969 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006970}
6971
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006972void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006973{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006974 /* use static because iter can be a bit big for the stack */
6975 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006976 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006977 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006978 unsigned long flags;
6979 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006980
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006981 /* Only allow one dump user at a time. */
6982 if (atomic_inc_return(&dump_running) != 1) {
6983 atomic_dec(&dump_running);
6984 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006985 }
6986
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006987 /*
6988 * Always turn off tracing when we dump.
6989 * We don't need to show trace output of what happens
6990 * between multiple crashes.
6991 *
6992 * If the user does a sysrq-z, then they can re-enable
6993 * tracing with echo 1 > tracing_on.
6994 */
6995 tracing_off();
6996
6997 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006998
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006999 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007000 trace_init_global_iter(&iter);
7001
Steven Rostedtd7690412008-10-01 00:29:53 -04007002 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007003 atomic_inc(&per_cpu_ptr(iter.tr->trace_buffer.data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007004 }
7005
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007006 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
7007
Török Edwinb54d3de2008-11-22 13:28:48 +02007008 /* don't look at user memory in panic mode */
7009 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
7010
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007011 switch (oops_dump_mode) {
7012 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007013 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007014 break;
7015 case DUMP_ORIG:
7016 iter.cpu_file = raw_smp_processor_id();
7017 break;
7018 case DUMP_NONE:
7019 goto out_enable;
7020 default:
7021 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007022 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007023 }
7024
7025 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007026
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007027 /* Did function tracer already get disabled? */
7028 if (ftrace_is_dead()) {
7029 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7030 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7031 }
7032
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007033 /*
7034 * We need to stop all tracing on all CPUS to read the
7035 * the next buffer. This is a bit expensive, but is
7036 * not done often. We fill all what we can read,
7037 * and then release the locks again.
7038 */
7039
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007040 while (!trace_empty(&iter)) {
7041
7042 if (!cnt)
7043 printk(KERN_TRACE "---------------------------------\n");
7044
7045 cnt++;
7046
7047 /* reset all but tr, trace, and overruns */
7048 memset(&iter.seq, 0,
7049 sizeof(struct trace_iterator) -
7050 offsetof(struct trace_iterator, seq));
7051 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7052 iter.pos = -1;
7053
Jason Wessel955b61e2010-08-05 09:22:23 -05007054 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007055 int ret;
7056
7057 ret = print_trace_line(&iter);
7058 if (ret != TRACE_TYPE_NO_CONSUME)
7059 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007060 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007061 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007062
7063 trace_printk_seq(&iter.seq);
7064 }
7065
7066 if (!cnt)
7067 printk(KERN_TRACE " (ftrace buffer empty)\n");
7068 else
7069 printk(KERN_TRACE "---------------------------------\n");
7070
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007071 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007072 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007073
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007074 for_each_tracing_cpu(cpu) {
7075 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007076 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007077 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007078 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007079}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007080EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007081
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007082__init static int tracer_alloc_buffers(void)
7083{
Steven Rostedt73c51622009-03-11 13:42:01 -04007084 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307085 int ret = -ENOMEM;
7086
7087 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7088 goto out;
7089
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007090 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307091 goto out_free_buffer_mask;
7092
Steven Rostedt07d777f2011-09-22 14:01:55 -04007093 /* Only allocate trace_printk buffers if a trace_printk exists */
7094 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007095 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007096 trace_printk_init_buffers();
7097
Steven Rostedt73c51622009-03-11 13:42:01 -04007098 /* To save memory, keep the ring buffer size to its minimum */
7099 if (ring_buffer_expanded)
7100 ring_buf_size = trace_buf_size;
7101 else
7102 ring_buf_size = 1;
7103
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307104 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007105 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007106
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007107 raw_spin_lock_init(&global_trace.start_lock);
7108
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007109 /* Used for event triggers */
7110 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7111 if (!temp_buffer)
7112 goto out_free_cpumask;
7113
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007114 if (trace_create_savedcmd() < 0)
7115 goto out_free_temp_buffer;
7116
Steven Rostedtab464282008-05-12 21:21:00 +02007117 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007118 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007119 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7120 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007121 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007122 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007123
Steven Rostedt499e5472012-02-22 15:50:28 -05007124 if (global_trace.buffer_disabled)
7125 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007126
Steven Rostedte1e232c2014-02-10 23:38:46 -05007127 if (trace_boot_clock) {
7128 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7129 if (ret < 0)
7130 pr_warning("Trace clock %s not defined, going back to default\n",
7131 trace_boot_clock);
7132 }
7133
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007134 /*
7135 * register_tracer() might reference current_trace, so it
7136 * needs to be set before we register anything. This is
7137 * just a bootstrap of current_trace anyway.
7138 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007139 global_trace.current_trace = &nop_trace;
7140
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007141 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7142
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007143 ftrace_init_global_array_ops(&global_trace);
7144
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007145 register_tracer(&nop_trace);
7146
Steven Rostedt60a11772008-05-12 21:20:44 +02007147 /* All seems OK, enable tracing */
7148 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007149
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007150 atomic_notifier_chain_register(&panic_notifier_list,
7151 &trace_panic_notifier);
7152
7153 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007154
Steven Rostedtae63b312012-05-03 23:09:03 -04007155 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7156
7157 INIT_LIST_HEAD(&global_trace.systems);
7158 INIT_LIST_HEAD(&global_trace.events);
7159 list_add(&global_trace.list, &ftrace_trace_arrays);
7160
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007161 while (trace_boot_options) {
7162 char *option;
7163
7164 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007165 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007166 }
7167
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007168 register_snapshot_cmd();
7169
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007170 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007171
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007172out_free_savedcmd:
7173 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007174out_free_temp_buffer:
7175 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307176out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007177 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307178out_free_buffer_mask:
7179 free_cpumask_var(tracing_buffer_mask);
7180out:
7181 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007182}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007183
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007184void __init trace_init(void)
7185{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007186 if (tracepoint_printk) {
7187 tracepoint_print_iter =
7188 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7189 if (WARN_ON(!tracepoint_print_iter))
7190 tracepoint_printk = 0;
7191 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007192 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007193 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007194}
7195
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007196__init static int clear_boot_tracer(void)
7197{
7198 /*
7199 * The default tracer at boot buffer is an init section.
7200 * This function is called in lateinit. If we did not
7201 * find the boot tracer, then clear it out, to prevent
7202 * later registration from accessing the buffer that is
7203 * about to be freed.
7204 */
7205 if (!default_bootup_tracer)
7206 return 0;
7207
7208 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7209 default_bootup_tracer);
7210 default_bootup_tracer = NULL;
7211
7212 return 0;
7213}
7214
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007215fs_initcall(tracer_init_debugfs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007216late_initcall(clear_boot_tracer);