blob: 865f3fad9ff0775f800096f6cce19510e49a8800 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050028#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050046#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020047
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010048/*
Steven Rostedt73c51622009-03-11 13:42:01 -040049 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050052bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040053
54/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010056 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010058 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * at the same time, giving false positive or negative results.
60 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010061static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062
Steven Rostedtb2821ae2009-02-02 21:38:32 -050063/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
Li Zefan020e5f82009-07-01 10:47:05 +080066bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050068/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010072/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050082static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010084{
85 return 0;
86}
Steven Rostedt0f048702008-11-05 16:05:44 -050087
88/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040089 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
95/*
Steven Rostedt0f048702008-11-05 16:05:44 -050096 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100101static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500102
Christoph Lameter9288f992009-10-07 19:17:45 -0400103DEFINE_PER_CPU(int, ftrace_cpu_disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -0400104
Jason Wessel955b61e2010-08-05 09:22:23 -0500105cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200106
Steven Rostedt944ac422008-10-23 19:26:08 -0400107/*
108 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
109 *
110 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
111 * is set, then ftrace_dump is called. This will output the contents
112 * of the ftrace buffers to the console. This is very useful for
113 * capturing traces that lead to crashes and outputing it to a
114 * serial console.
115 *
116 * It is default off, but you can enable it with either specifying
117 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200118 * /proc/sys/kernel/ftrace_dump_on_oops
119 * Set 1 if you want to dump buffers of all CPUs
120 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400121 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200122
123enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400124
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400125/* When set, tracing will stop when a WARN*() is hit */
126int __disable_trace_on_warning;
127
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400128#ifdef CONFIG_TRACE_ENUM_MAP_FILE
129/* Map of enums to their values, for "enum_map" file */
130struct trace_enum_map_head {
131 struct module *mod;
132 unsigned long length;
133};
134
135union trace_enum_map_item;
136
137struct trace_enum_map_tail {
138 /*
139 * "end" is first and points to NULL as it must be different
140 * than "mod" or "enum_string"
141 */
142 union trace_enum_map_item *next;
143 const char *end; /* points to NULL */
144};
145
146static DEFINE_MUTEX(trace_enum_mutex);
147
148/*
149 * The trace_enum_maps are saved in an array with two extra elements,
150 * one at the beginning, and one at the end. The beginning item contains
151 * the count of the saved maps (head.length), and the module they
152 * belong to if not built in (head.mod). The ending item contains a
153 * pointer to the next array of saved enum_map items.
154 */
155union trace_enum_map_item {
156 struct trace_enum_map map;
157 struct trace_enum_map_head head;
158 struct trace_enum_map_tail tail;
159};
160
161static union trace_enum_map_item *trace_enum_maps;
162#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
163
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500164static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500165
Li Zefanee6c2c12009-09-18 14:06:47 +0800166#define MAX_TRACER_SIZE 100
167static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500168static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100169
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500170static bool allocate_snapshot;
171
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200172static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100173{
Chen Gang67012ab2013-04-08 12:06:44 +0800174 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500175 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400176 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500177 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100178 return 1;
179}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200180__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100181
Steven Rostedt944ac422008-10-23 19:26:08 -0400182static int __init set_ftrace_dump_on_oops(char *str)
183{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200184 if (*str++ != '=' || !*str) {
185 ftrace_dump_on_oops = DUMP_ALL;
186 return 1;
187 }
188
189 if (!strcmp("orig_cpu", str)) {
190 ftrace_dump_on_oops = DUMP_ORIG;
191 return 1;
192 }
193
194 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400195}
196__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200197
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400198static int __init stop_trace_on_warning(char *str)
199{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200200 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
201 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400202 return 1;
203}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200204__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400205
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400206static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500207{
208 allocate_snapshot = true;
209 /* We also need the main ring buffer expanded */
210 ring_buffer_expanded = true;
211 return 1;
212}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400213__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500214
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400215
216static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
217static char *trace_boot_options __initdata;
218
219static int __init set_trace_boot_options(char *str)
220{
Chen Gang67012ab2013-04-08 12:06:44 +0800221 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400222 trace_boot_options = trace_boot_options_buf;
223 return 0;
224}
225__setup("trace_options=", set_trace_boot_options);
226
Steven Rostedte1e232c2014-02-10 23:38:46 -0500227static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
228static char *trace_boot_clock __initdata;
229
230static int __init set_trace_boot_clock(char *str)
231{
232 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
233 trace_boot_clock = trace_boot_clock_buf;
234 return 0;
235}
236__setup("trace_clock=", set_trace_boot_clock);
237
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500238static int __init set_tracepoint_printk(char *str)
239{
240 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
241 tracepoint_printk = 1;
242 return 1;
243}
244__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400245
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800246unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200247{
248 nsec += 500;
249 do_div(nsec, 1000);
250 return nsec;
251}
252
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200253/*
254 * The global_trace is the descriptor that holds the tracing
255 * buffers for the live tracing. For each CPU, it contains
256 * a link list of pages that will store trace entries. The
257 * page descriptor of the pages in the memory is used to hold
258 * the link list by linking the lru item in the page descriptor
259 * to each of the pages in the buffer per CPU.
260 *
261 * For each active CPU there is a data field that holds the
262 * pages for the buffer for that CPU. Each CPU has the same number
263 * of pages allocated for its buffer.
264 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200265static struct trace_array global_trace;
266
Steven Rostedtae63b312012-05-03 23:09:03 -0400267LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200268
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400269int trace_array_get(struct trace_array *this_tr)
270{
271 struct trace_array *tr;
272 int ret = -ENODEV;
273
274 mutex_lock(&trace_types_lock);
275 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
276 if (tr == this_tr) {
277 tr->ref++;
278 ret = 0;
279 break;
280 }
281 }
282 mutex_unlock(&trace_types_lock);
283
284 return ret;
285}
286
287static void __trace_array_put(struct trace_array *this_tr)
288{
289 WARN_ON(!this_tr->ref);
290 this_tr->ref--;
291}
292
293void trace_array_put(struct trace_array *this_tr)
294{
295 mutex_lock(&trace_types_lock);
296 __trace_array_put(this_tr);
297 mutex_unlock(&trace_types_lock);
298}
299
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400300int filter_check_discard(struct trace_event_file *file, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500301 struct ring_buffer *buffer,
302 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500303{
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400304 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
Tom Zanussif306cc82013-10-24 08:34:17 -0500305 !filter_match_preds(file->filter, rec)) {
306 ring_buffer_discard_commit(buffer, event);
307 return 1;
308 }
309
310 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500311}
Tom Zanussif306cc82013-10-24 08:34:17 -0500312EXPORT_SYMBOL_GPL(filter_check_discard);
313
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400314int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500315 struct ring_buffer *buffer,
316 struct ring_buffer_event *event)
317{
318 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
319 !filter_match_preds(call->filter, rec)) {
320 ring_buffer_discard_commit(buffer, event);
321 return 1;
322 }
323
324 return 0;
325}
326EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500327
Fabian Frederickad1438a2014-04-17 21:44:42 +0200328static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400329{
330 u64 ts;
331
332 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700333 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400334 return trace_clock_local();
335
Alexander Z Lam94571582013-08-02 18:36:16 -0700336 ts = ring_buffer_time_stamp(buf->buffer, cpu);
337 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400338
339 return ts;
340}
341
Alexander Z Lam94571582013-08-02 18:36:16 -0700342cycle_t ftrace_now(int cpu)
343{
344 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
345}
346
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400347/**
348 * tracing_is_enabled - Show if global_trace has been disabled
349 *
350 * Shows if the global trace has been enabled or not. It uses the
351 * mirror flag "buffer_disabled" to be used in fast paths such as for
352 * the irqsoff tracer. But it may be inaccurate due to races. If you
353 * need to know the accurate state, use tracing_is_on() which is a little
354 * slower, but accurate.
355 */
Steven Rostedt90369902008-11-05 16:05:44 -0500356int tracing_is_enabled(void)
357{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400358 /*
359 * For quick access (irqsoff uses this in fast path), just
360 * return the mirror variable of the state of the ring buffer.
361 * It's a little racy, but we don't really care.
362 */
363 smp_rmb();
364 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500365}
366
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200367/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400368 * trace_buf_size is the size in bytes that is allocated
369 * for a buffer. Note, the number of bytes is always rounded
370 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400371 *
372 * This number is purposely set to a low number of 16384.
373 * If the dump on oops happens, it will be much appreciated
374 * to not have to wait for all that output. Anyway this can be
375 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200376 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400377#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400378
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400379static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200380
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200381/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200382static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200383
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200384/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200385 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200386 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700387DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200388
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800389/*
390 * serialize the access of the ring buffer
391 *
392 * ring buffer serializes readers, but it is low level protection.
393 * The validity of the events (which returns by ring_buffer_peek() ..etc)
394 * are not protected by ring buffer.
395 *
396 * The content of events may become garbage if we allow other process consumes
397 * these events concurrently:
398 * A) the page of the consumed events may become a normal page
399 * (not reader page) in ring buffer, and this page will be rewrited
400 * by events producer.
401 * B) The page of the consumed events may become a page for splice_read,
402 * and this page will be returned to system.
403 *
404 * These primitives allow multi process access to different cpu ring buffer
405 * concurrently.
406 *
407 * These primitives don't distinguish read-only and read-consume access.
408 * Multi read-only access are also serialized.
409 */
410
411#ifdef CONFIG_SMP
412static DECLARE_RWSEM(all_cpu_access_lock);
413static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
414
415static inline void trace_access_lock(int cpu)
416{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500417 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800418 /* gain it for accessing the whole ring buffer. */
419 down_write(&all_cpu_access_lock);
420 } else {
421 /* gain it for accessing a cpu ring buffer. */
422
Steven Rostedtae3b5092013-01-23 15:22:59 -0500423 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800424 down_read(&all_cpu_access_lock);
425
426 /* Secondly block other access to this @cpu ring buffer. */
427 mutex_lock(&per_cpu(cpu_access_lock, cpu));
428 }
429}
430
431static inline void trace_access_unlock(int cpu)
432{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500433 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800434 up_write(&all_cpu_access_lock);
435 } else {
436 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
437 up_read(&all_cpu_access_lock);
438 }
439}
440
441static inline void trace_access_lock_init(void)
442{
443 int cpu;
444
445 for_each_possible_cpu(cpu)
446 mutex_init(&per_cpu(cpu_access_lock, cpu));
447}
448
449#else
450
451static DEFINE_MUTEX(access_lock);
452
453static inline void trace_access_lock(int cpu)
454{
455 (void)cpu;
456 mutex_lock(&access_lock);
457}
458
459static inline void trace_access_unlock(int cpu)
460{
461 (void)cpu;
462 mutex_unlock(&access_lock);
463}
464
465static inline void trace_access_lock_init(void)
466{
467}
468
469#endif
470
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400471#ifdef CONFIG_STACKTRACE
472static void __ftrace_trace_stack(struct ring_buffer *buffer,
473 unsigned long flags,
474 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400475static inline void ftrace_trace_stack(struct ring_buffer *buffer,
476 unsigned long flags,
477 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400478
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400479#else
480static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
481 unsigned long flags,
482 int skip, int pc, struct pt_regs *regs)
483{
484}
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400485static inline void ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400486 unsigned long flags,
487 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400488{
489}
490
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400491#endif
492
Steven Rostedtee6bce52008-11-12 17:52:37 -0500493/* trace_flags holds trace_options default values */
Steven Rostedt (Red Hat)729358d2015-09-29 10:15:10 -0400494unsigned long trace_flags =
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400495 FUNCTION_DEFAULT_FLAGS | FUNCTION_GRAPH_DEFAULT_FLAGS |
Steven Rostedt (Red Hat)729358d2015-09-29 10:15:10 -0400496 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
497 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO |
498 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
Steven Rostedt (Red Hat)8179e8a2015-09-29 10:24:56 -0400499 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS
Steven Rostedt (Red Hat)729358d2015-09-29 10:15:10 -0400500 ;
Vaibhav Nagarnaike7e2ee82011-05-10 13:27:21 -0700501
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400502static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400503{
504 if (tr->trace_buffer.buffer)
505 ring_buffer_record_on(tr->trace_buffer.buffer);
506 /*
507 * This flag is looked at when buffers haven't been allocated
508 * yet, or by some tracers (like irqsoff), that just want to
509 * know if the ring buffer has been disabled, but it can handle
510 * races of where it gets disabled but we still do a record.
511 * As the check is in the fast path of the tracers, it is more
512 * important to be fast than accurate.
513 */
514 tr->buffer_disabled = 0;
515 /* Make the flag seen by readers */
516 smp_wmb();
517}
518
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200519/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500520 * tracing_on - enable tracing buffers
521 *
522 * This function enables tracing buffers that may have been
523 * disabled with tracing_off.
524 */
525void tracing_on(void)
526{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400527 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500528}
529EXPORT_SYMBOL_GPL(tracing_on);
530
531/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500532 * __trace_puts - write a constant string into the trace buffer.
533 * @ip: The address of the caller
534 * @str: The constant string to write
535 * @size: The size of the string.
536 */
537int __trace_puts(unsigned long ip, const char *str, int size)
538{
539 struct ring_buffer_event *event;
540 struct ring_buffer *buffer;
541 struct print_entry *entry;
542 unsigned long irq_flags;
543 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800544 int pc;
545
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800546 if (!(trace_flags & TRACE_ITER_PRINTK))
547 return 0;
548
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800549 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500550
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500551 if (unlikely(tracing_selftest_running || tracing_disabled))
552 return 0;
553
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500554 alloc = sizeof(*entry) + size + 2; /* possible \n added */
555
556 local_save_flags(irq_flags);
557 buffer = global_trace.trace_buffer.buffer;
558 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800559 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500560 if (!event)
561 return 0;
562
563 entry = ring_buffer_event_data(event);
564 entry->ip = ip;
565
566 memcpy(&entry->buf, str, size);
567
568 /* Add a newline if necessary */
569 if (entry->buf[size - 1] != '\n') {
570 entry->buf[size] = '\n';
571 entry->buf[size + 1] = '\0';
572 } else
573 entry->buf[size] = '\0';
574
575 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400576 ftrace_trace_stack(buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500577
578 return size;
579}
580EXPORT_SYMBOL_GPL(__trace_puts);
581
582/**
583 * __trace_bputs - write the pointer to a constant string into trace buffer
584 * @ip: The address of the caller
585 * @str: The constant string to write to the buffer to
586 */
587int __trace_bputs(unsigned long ip, const char *str)
588{
589 struct ring_buffer_event *event;
590 struct ring_buffer *buffer;
591 struct bputs_entry *entry;
592 unsigned long irq_flags;
593 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800594 int pc;
595
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800596 if (!(trace_flags & TRACE_ITER_PRINTK))
597 return 0;
598
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800599 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500600
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500601 if (unlikely(tracing_selftest_running || tracing_disabled))
602 return 0;
603
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500604 local_save_flags(irq_flags);
605 buffer = global_trace.trace_buffer.buffer;
606 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800607 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500608 if (!event)
609 return 0;
610
611 entry = ring_buffer_event_data(event);
612 entry->ip = ip;
613 entry->str = str;
614
615 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400616 ftrace_trace_stack(buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500617
618 return 1;
619}
620EXPORT_SYMBOL_GPL(__trace_bputs);
621
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500622#ifdef CONFIG_TRACER_SNAPSHOT
623/**
624 * trace_snapshot - take a snapshot of the current buffer.
625 *
626 * This causes a swap between the snapshot buffer and the current live
627 * tracing buffer. You can use this to take snapshots of the live
628 * trace when some condition is triggered, but continue to trace.
629 *
630 * Note, make sure to allocate the snapshot with either
631 * a tracing_snapshot_alloc(), or by doing it manually
632 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
633 *
634 * If the snapshot buffer is not allocated, it will stop tracing.
635 * Basically making a permanent snapshot.
636 */
637void tracing_snapshot(void)
638{
639 struct trace_array *tr = &global_trace;
640 struct tracer *tracer = tr->current_trace;
641 unsigned long flags;
642
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500643 if (in_nmi()) {
644 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
645 internal_trace_puts("*** snapshot is being ignored ***\n");
646 return;
647 }
648
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500649 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500650 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
651 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500652 tracing_off();
653 return;
654 }
655
656 /* Note, snapshot can not be used when the tracer uses it */
657 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500658 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
659 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500660 return;
661 }
662
663 local_irq_save(flags);
664 update_max_tr(tr, current, smp_processor_id());
665 local_irq_restore(flags);
666}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500667EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500668
669static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
670 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400671static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
672
673static int alloc_snapshot(struct trace_array *tr)
674{
675 int ret;
676
677 if (!tr->allocated_snapshot) {
678
679 /* allocate spare buffer */
680 ret = resize_buffer_duplicate_size(&tr->max_buffer,
681 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
682 if (ret < 0)
683 return ret;
684
685 tr->allocated_snapshot = true;
686 }
687
688 return 0;
689}
690
Fabian Frederickad1438a2014-04-17 21:44:42 +0200691static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400692{
693 /*
694 * We don't free the ring buffer. instead, resize it because
695 * The max_tr ring buffer has some state (e.g. ring->clock) and
696 * we want preserve it.
697 */
698 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
699 set_buffer_entries(&tr->max_buffer, 1);
700 tracing_reset_online_cpus(&tr->max_buffer);
701 tr->allocated_snapshot = false;
702}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500703
704/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500705 * tracing_alloc_snapshot - allocate snapshot buffer.
706 *
707 * This only allocates the snapshot buffer if it isn't already
708 * allocated - it doesn't also take a snapshot.
709 *
710 * This is meant to be used in cases where the snapshot buffer needs
711 * to be set up for events that can't sleep but need to be able to
712 * trigger a snapshot.
713 */
714int tracing_alloc_snapshot(void)
715{
716 struct trace_array *tr = &global_trace;
717 int ret;
718
719 ret = alloc_snapshot(tr);
720 WARN_ON(ret < 0);
721
722 return ret;
723}
724EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
725
726/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500727 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
728 *
729 * This is similar to trace_snapshot(), but it will allocate the
730 * snapshot buffer if it isn't already allocated. Use this only
731 * where it is safe to sleep, as the allocation may sleep.
732 *
733 * This causes a swap between the snapshot buffer and the current live
734 * tracing buffer. You can use this to take snapshots of the live
735 * trace when some condition is triggered, but continue to trace.
736 */
737void tracing_snapshot_alloc(void)
738{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500739 int ret;
740
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500741 ret = tracing_alloc_snapshot();
742 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400743 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500744
745 tracing_snapshot();
746}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500747EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500748#else
749void tracing_snapshot(void)
750{
751 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
752}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500753EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500754int tracing_alloc_snapshot(void)
755{
756 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
757 return -ENODEV;
758}
759EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500760void tracing_snapshot_alloc(void)
761{
762 /* Give warning */
763 tracing_snapshot();
764}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500765EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500766#endif /* CONFIG_TRACER_SNAPSHOT */
767
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400768static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400769{
770 if (tr->trace_buffer.buffer)
771 ring_buffer_record_off(tr->trace_buffer.buffer);
772 /*
773 * This flag is looked at when buffers haven't been allocated
774 * yet, or by some tracers (like irqsoff), that just want to
775 * know if the ring buffer has been disabled, but it can handle
776 * races of where it gets disabled but we still do a record.
777 * As the check is in the fast path of the tracers, it is more
778 * important to be fast than accurate.
779 */
780 tr->buffer_disabled = 1;
781 /* Make the flag seen by readers */
782 smp_wmb();
783}
784
Steven Rostedt499e5472012-02-22 15:50:28 -0500785/**
786 * tracing_off - turn off tracing buffers
787 *
788 * This function stops the tracing buffers from recording data.
789 * It does not disable any overhead the tracers themselves may
790 * be causing. This function simply causes all recording to
791 * the ring buffers to fail.
792 */
793void tracing_off(void)
794{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400795 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500796}
797EXPORT_SYMBOL_GPL(tracing_off);
798
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400799void disable_trace_on_warning(void)
800{
801 if (__disable_trace_on_warning)
802 tracing_off();
803}
804
Steven Rostedt499e5472012-02-22 15:50:28 -0500805/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400806 * tracer_tracing_is_on - show real state of ring buffer enabled
807 * @tr : the trace array to know if ring buffer is enabled
808 *
809 * Shows real state of the ring buffer if it is enabled or not.
810 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400811static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400812{
813 if (tr->trace_buffer.buffer)
814 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
815 return !tr->buffer_disabled;
816}
817
Steven Rostedt499e5472012-02-22 15:50:28 -0500818/**
819 * tracing_is_on - show state of ring buffers enabled
820 */
821int tracing_is_on(void)
822{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400823 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500824}
825EXPORT_SYMBOL_GPL(tracing_is_on);
826
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400827static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200828{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400829 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200830
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200831 if (!str)
832 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800833 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200834 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800835 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200836 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400837 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200838 return 1;
839}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400840__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200841
Tim Bird0e950172010-02-25 15:36:43 -0800842static int __init set_tracing_thresh(char *str)
843{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800844 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800845 int ret;
846
847 if (!str)
848 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200849 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800850 if (ret < 0)
851 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800852 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800853 return 1;
854}
855__setup("tracing_thresh=", set_tracing_thresh);
856
Steven Rostedt57f50be2008-05-12 21:20:44 +0200857unsigned long nsecs_to_usecs(unsigned long nsecs)
858{
859 return nsecs / 1000;
860}
861
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -0400862/*
863 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
864 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
865 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
866 * of strings in the order that the enums were defined.
867 */
868#undef C
869#define C(a, b) b
870
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200871/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200872static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -0400873 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200874 NULL
875};
876
Zhaolei5079f322009-08-25 16:12:56 +0800877static struct {
878 u64 (*func)(void);
879 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800880 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800881} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000882 { trace_clock_local, "local", 1 },
883 { trace_clock_global, "global", 1 },
884 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700885 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000886 { trace_clock, "perf", 1 },
887 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -0700888 { ktime_get_raw_fast_ns, "mono_raw", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800889 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800890};
891
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200892/*
893 * trace_parser_get_init - gets the buffer for trace parser
894 */
895int trace_parser_get_init(struct trace_parser *parser, int size)
896{
897 memset(parser, 0, sizeof(*parser));
898
899 parser->buffer = kmalloc(size, GFP_KERNEL);
900 if (!parser->buffer)
901 return 1;
902
903 parser->size = size;
904 return 0;
905}
906
907/*
908 * trace_parser_put - frees the buffer for trace parser
909 */
910void trace_parser_put(struct trace_parser *parser)
911{
912 kfree(parser->buffer);
913}
914
915/*
916 * trace_get_user - reads the user input string separated by space
917 * (matched by isspace(ch))
918 *
919 * For each string found the 'struct trace_parser' is updated,
920 * and the function returns.
921 *
922 * Returns number of bytes read.
923 *
924 * See kernel/trace/trace.h for 'struct trace_parser' details.
925 */
926int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
927 size_t cnt, loff_t *ppos)
928{
929 char ch;
930 size_t read = 0;
931 ssize_t ret;
932
933 if (!*ppos)
934 trace_parser_clear(parser);
935
936 ret = get_user(ch, ubuf++);
937 if (ret)
938 goto out;
939
940 read++;
941 cnt--;
942
943 /*
944 * The parser is not finished with the last write,
945 * continue reading the user input without skipping spaces.
946 */
947 if (!parser->cont) {
948 /* skip white space */
949 while (cnt && isspace(ch)) {
950 ret = get_user(ch, ubuf++);
951 if (ret)
952 goto out;
953 read++;
954 cnt--;
955 }
956
957 /* only spaces were written */
958 if (isspace(ch)) {
959 *ppos += read;
960 ret = read;
961 goto out;
962 }
963
964 parser->idx = 0;
965 }
966
967 /* read the non-space input */
968 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800969 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200970 parser->buffer[parser->idx++] = ch;
971 else {
972 ret = -EINVAL;
973 goto out;
974 }
975 ret = get_user(ch, ubuf++);
976 if (ret)
977 goto out;
978 read++;
979 cnt--;
980 }
981
982 /* We either got finished input or we have to wait for another call. */
983 if (isspace(ch)) {
984 parser->buffer[parser->idx] = 0;
985 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400986 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200987 parser->cont = true;
988 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400989 } else {
990 ret = -EINVAL;
991 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200992 }
993
994 *ppos += read;
995 ret = read;
996
997out:
998 return ret;
999}
1000
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001001/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001002static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001003{
1004 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001005
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001006 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001007 return -EBUSY;
1008
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001009 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001010 if (cnt > len)
1011 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001012 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001013
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001014 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001015 return cnt;
1016}
1017
Tim Bird0e950172010-02-25 15:36:43 -08001018unsigned long __read_mostly tracing_thresh;
1019
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001020#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001021/*
1022 * Copy the new maximum trace into the separate maximum-trace
1023 * structure. (this way the maximum trace is permanently saved,
1024 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1025 */
1026static void
1027__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1028{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001029 struct trace_buffer *trace_buf = &tr->trace_buffer;
1030 struct trace_buffer *max_buf = &tr->max_buffer;
1031 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1032 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001033
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001034 max_buf->cpu = cpu;
1035 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001036
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001037 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001038 max_data->critical_start = data->critical_start;
1039 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001040
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001041 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001042 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001043 /*
1044 * If tsk == current, then use current_uid(), as that does not use
1045 * RCU. The irq tracer can be called out of RCU scope.
1046 */
1047 if (tsk == current)
1048 max_data->uid = current_uid();
1049 else
1050 max_data->uid = task_uid(tsk);
1051
Steven Rostedt8248ac02009-09-02 12:27:41 -04001052 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1053 max_data->policy = tsk->policy;
1054 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001055
1056 /* record this tasks comm */
1057 tracing_record_cmdline(tsk);
1058}
1059
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001060/**
1061 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1062 * @tr: tracer
1063 * @tsk: the task with the latency
1064 * @cpu: The cpu that initiated the trace.
1065 *
1066 * Flip the buffers between the @tr and the max_tr and record information
1067 * about which task was the cause of this latency.
1068 */
Ingo Molnare309b412008-05-12 21:20:51 +02001069void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001070update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1071{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001072 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001073
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001074 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001075 return;
1076
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001077 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001078
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001079 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001080 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001081 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001082 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001083 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001084
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001085 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001086
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001087 buf = tr->trace_buffer.buffer;
1088 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1089 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001090
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001091 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001092 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001093}
1094
1095/**
1096 * update_max_tr_single - only copy one trace over, and reset the rest
1097 * @tr - tracer
1098 * @tsk - task with the latency
1099 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001100 *
1101 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001102 */
Ingo Molnare309b412008-05-12 21:20:51 +02001103void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001104update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1105{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001106 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001107
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001108 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001109 return;
1110
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001111 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001112 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001113 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001114 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001115 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001116 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001117
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001118 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001119
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001120 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001121
Steven Rostedte8165db2009-09-03 19:13:05 -04001122 if (ret == -EBUSY) {
1123 /*
1124 * We failed to swap the buffer due to a commit taking
1125 * place on this CPU. We fail to record, but we reset
1126 * the max trace buffer (no one writes directly to it)
1127 * and flag that it failed.
1128 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001129 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001130 "Failed to swap buffers due to commit in progress\n");
1131 }
1132
Steven Rostedte8165db2009-09-03 19:13:05 -04001133 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001134
1135 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001136 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001137}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001138#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001139
Rabin Vincente30f53a2014-11-10 19:46:34 +01001140static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001141{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001142 /* Iterators are static, they should be filled or empty */
1143 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001144 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001145
Rabin Vincente30f53a2014-11-10 19:46:34 +01001146 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1147 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001148}
1149
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001150#ifdef CONFIG_FTRACE_STARTUP_TEST
1151static int run_tracer_selftest(struct tracer *type)
1152{
1153 struct trace_array *tr = &global_trace;
1154 struct tracer *saved_tracer = tr->current_trace;
1155 int ret;
1156
1157 if (!type->selftest || tracing_selftest_disabled)
1158 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001159
1160 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001161 * Run a selftest on this tracer.
1162 * Here we reset the trace buffer, and set the current
1163 * tracer to be this tracer. The tracer can then run some
1164 * internal tracing to verify that everything is in order.
1165 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001166 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001167 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001168
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001169 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001170
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001171#ifdef CONFIG_TRACER_MAX_TRACE
1172 if (type->use_max_tr) {
1173 /* If we expanded the buffers, make sure the max is expanded too */
1174 if (ring_buffer_expanded)
1175 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1176 RING_BUFFER_ALL_CPUS);
1177 tr->allocated_snapshot = true;
1178 }
1179#endif
1180
1181 /* the test is responsible for initializing and enabling */
1182 pr_info("Testing tracer %s: ", type->name);
1183 ret = type->selftest(type, tr);
1184 /* the test is responsible for resetting too */
1185 tr->current_trace = saved_tracer;
1186 if (ret) {
1187 printk(KERN_CONT "FAILED!\n");
1188 /* Add the warning after printing 'FAILED' */
1189 WARN_ON(1);
1190 return -1;
1191 }
1192 /* Only reset on passing, to avoid touching corrupted buffers */
1193 tracing_reset_online_cpus(&tr->trace_buffer);
1194
1195#ifdef CONFIG_TRACER_MAX_TRACE
1196 if (type->use_max_tr) {
1197 tr->allocated_snapshot = false;
1198
1199 /* Shrink the max buffer again */
1200 if (ring_buffer_expanded)
1201 ring_buffer_resize(tr->max_buffer.buffer, 1,
1202 RING_BUFFER_ALL_CPUS);
1203 }
1204#endif
1205
1206 printk(KERN_CONT "PASSED\n");
1207 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001208}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001209#else
1210static inline int run_tracer_selftest(struct tracer *type)
1211{
1212 return 0;
1213}
1214#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001215
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001216/**
1217 * register_tracer - register a tracer with the ftrace system.
1218 * @type - the plugin for the tracer
1219 *
1220 * Register a new plugin tracer.
1221 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001222int register_tracer(struct tracer *type)
1223{
1224 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001225 int ret = 0;
1226
1227 if (!type->name) {
1228 pr_info("Tracer must have a name\n");
1229 return -1;
1230 }
1231
Dan Carpenter24a461d2010-07-10 12:06:44 +02001232 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001233 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1234 return -1;
1235 }
1236
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001237 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001238
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001239 tracing_selftest_running = true;
1240
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001241 for (t = trace_types; t; t = t->next) {
1242 if (strcmp(type->name, t->name) == 0) {
1243 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001244 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001245 type->name);
1246 ret = -1;
1247 goto out;
1248 }
1249 }
1250
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001251 if (!type->set_flag)
1252 type->set_flag = &dummy_set_flag;
1253 if (!type->flags)
1254 type->flags = &dummy_tracer_flags;
1255 else
1256 if (!type->flags->opts)
1257 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001258
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001259 ret = run_tracer_selftest(type);
1260 if (ret < 0)
1261 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001262
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001263 type->next = trace_types;
1264 trace_types = type;
Steven Rostedt60a11772008-05-12 21:20:44 +02001265
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001266 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001267 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001268 mutex_unlock(&trace_types_lock);
1269
Steven Rostedtdac74942009-02-05 01:13:38 -05001270 if (ret || !default_bootup_tracer)
1271 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001272
Li Zefanee6c2c12009-09-18 14:06:47 +08001273 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001274 goto out_unlock;
1275
1276 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1277 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001278 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001279 default_bootup_tracer = NULL;
1280 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001281 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001282#ifdef CONFIG_FTRACE_STARTUP_TEST
1283 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1284 type->name);
1285#endif
1286
1287 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001288 return ret;
1289}
1290
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001291void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001292{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001293 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001294
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001295 if (!buffer)
1296 return;
1297
Steven Rostedtf6339032009-09-04 12:35:16 -04001298 ring_buffer_record_disable(buffer);
1299
1300 /* Make sure all commits have finished */
1301 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001302 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001303
1304 ring_buffer_record_enable(buffer);
1305}
1306
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001307void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001308{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001309 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001310 int cpu;
1311
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001312 if (!buffer)
1313 return;
1314
Steven Rostedt621968c2009-09-04 12:02:35 -04001315 ring_buffer_record_disable(buffer);
1316
1317 /* Make sure all commits have finished */
1318 synchronize_sched();
1319
Alexander Z Lam94571582013-08-02 18:36:16 -07001320 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001321
1322 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001323 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001324
1325 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001326}
1327
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001328/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001329void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001330{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001331 struct trace_array *tr;
1332
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001333 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001334 tracing_reset_online_cpus(&tr->trace_buffer);
1335#ifdef CONFIG_TRACER_MAX_TRACE
1336 tracing_reset_online_cpus(&tr->max_buffer);
1337#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001338 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001339}
1340
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001341#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001342#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001343static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001344struct saved_cmdlines_buffer {
1345 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1346 unsigned *map_cmdline_to_pid;
1347 unsigned cmdline_num;
1348 int cmdline_idx;
1349 char *saved_cmdlines;
1350};
1351static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001352
Steven Rostedt25b0b442008-05-12 21:21:00 +02001353/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001354static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001355
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001356static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001357{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001358 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1359}
1360
1361static inline void set_cmdline(int idx, const char *cmdline)
1362{
1363 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1364}
1365
1366static int allocate_cmdlines_buffer(unsigned int val,
1367 struct saved_cmdlines_buffer *s)
1368{
1369 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1370 GFP_KERNEL);
1371 if (!s->map_cmdline_to_pid)
1372 return -ENOMEM;
1373
1374 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1375 if (!s->saved_cmdlines) {
1376 kfree(s->map_cmdline_to_pid);
1377 return -ENOMEM;
1378 }
1379
1380 s->cmdline_idx = 0;
1381 s->cmdline_num = val;
1382 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1383 sizeof(s->map_pid_to_cmdline));
1384 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1385 val * sizeof(*s->map_cmdline_to_pid));
1386
1387 return 0;
1388}
1389
1390static int trace_create_savedcmd(void)
1391{
1392 int ret;
1393
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001394 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001395 if (!savedcmd)
1396 return -ENOMEM;
1397
1398 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1399 if (ret < 0) {
1400 kfree(savedcmd);
1401 savedcmd = NULL;
1402 return -ENOMEM;
1403 }
1404
1405 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001406}
1407
Carsten Emdeb5130b12009-09-13 01:43:07 +02001408int is_tracing_stopped(void)
1409{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001410 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001411}
1412
Steven Rostedt0f048702008-11-05 16:05:44 -05001413/**
1414 * tracing_start - quick start of the tracer
1415 *
1416 * If tracing is enabled but was stopped by tracing_stop,
1417 * this will start the tracer back up.
1418 */
1419void tracing_start(void)
1420{
1421 struct ring_buffer *buffer;
1422 unsigned long flags;
1423
1424 if (tracing_disabled)
1425 return;
1426
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001427 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1428 if (--global_trace.stop_count) {
1429 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001430 /* Someone screwed up their debugging */
1431 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001432 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001433 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001434 goto out;
1435 }
1436
Steven Rostedta2f80712010-03-12 19:56:00 -05001437 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001438 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001439
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001440 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001441 if (buffer)
1442 ring_buffer_record_enable(buffer);
1443
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001444#ifdef CONFIG_TRACER_MAX_TRACE
1445 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001446 if (buffer)
1447 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001448#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001449
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001450 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001451
Steven Rostedt0f048702008-11-05 16:05:44 -05001452 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001453 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1454}
1455
1456static void tracing_start_tr(struct trace_array *tr)
1457{
1458 struct ring_buffer *buffer;
1459 unsigned long flags;
1460
1461 if (tracing_disabled)
1462 return;
1463
1464 /* If global, we need to also start the max tracer */
1465 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1466 return tracing_start();
1467
1468 raw_spin_lock_irqsave(&tr->start_lock, flags);
1469
1470 if (--tr->stop_count) {
1471 if (tr->stop_count < 0) {
1472 /* Someone screwed up their debugging */
1473 WARN_ON_ONCE(1);
1474 tr->stop_count = 0;
1475 }
1476 goto out;
1477 }
1478
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001479 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001480 if (buffer)
1481 ring_buffer_record_enable(buffer);
1482
1483 out:
1484 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001485}
1486
1487/**
1488 * tracing_stop - quick stop of the tracer
1489 *
1490 * Light weight way to stop tracing. Use in conjunction with
1491 * tracing_start.
1492 */
1493void tracing_stop(void)
1494{
1495 struct ring_buffer *buffer;
1496 unsigned long flags;
1497
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001498 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1499 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001500 goto out;
1501
Steven Rostedta2f80712010-03-12 19:56:00 -05001502 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001503 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001504
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001505 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001506 if (buffer)
1507 ring_buffer_record_disable(buffer);
1508
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001509#ifdef CONFIG_TRACER_MAX_TRACE
1510 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001511 if (buffer)
1512 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001513#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001514
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001515 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001516
Steven Rostedt0f048702008-11-05 16:05:44 -05001517 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001518 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1519}
1520
1521static void tracing_stop_tr(struct trace_array *tr)
1522{
1523 struct ring_buffer *buffer;
1524 unsigned long flags;
1525
1526 /* If global, we need to also stop the max tracer */
1527 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1528 return tracing_stop();
1529
1530 raw_spin_lock_irqsave(&tr->start_lock, flags);
1531 if (tr->stop_count++)
1532 goto out;
1533
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001534 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001535 if (buffer)
1536 ring_buffer_record_disable(buffer);
1537
1538 out:
1539 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001540}
1541
Ingo Molnare309b412008-05-12 21:20:51 +02001542void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001543
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001544static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001545{
Carsten Emdea635cf02009-03-18 09:00:41 +01001546 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001547
1548 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001549 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001550
1551 /*
1552 * It's not the end of the world if we don't get
1553 * the lock, but we also don't want to spin
1554 * nor do we want to disable interrupts,
1555 * so if we miss here, then better luck next time.
1556 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001557 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001558 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001559
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001560 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001561 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001562 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001563
Carsten Emdea635cf02009-03-18 09:00:41 +01001564 /*
1565 * Check whether the cmdline buffer at idx has a pid
1566 * mapped. We are going to overwrite that entry so we
1567 * need to clear the map_pid_to_cmdline. Otherwise we
1568 * would read the new comm for the old pid.
1569 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001570 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001571 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001572 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001573
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001574 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1575 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001577 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001578 }
1579
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001580 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001581
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001582 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001583
1584 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001585}
1586
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001587static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001588{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001589 unsigned map;
1590
Steven Rostedt4ca53082009-03-16 19:20:15 -04001591 if (!pid) {
1592 strcpy(comm, "<idle>");
1593 return;
1594 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001595
Steven Rostedt74bf4072010-01-25 15:11:53 -05001596 if (WARN_ON_ONCE(pid < 0)) {
1597 strcpy(comm, "<XXX>");
1598 return;
1599 }
1600
Steven Rostedt4ca53082009-03-16 19:20:15 -04001601 if (pid > PID_MAX_DEFAULT) {
1602 strcpy(comm, "<...>");
1603 return;
1604 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001605
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001606 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001607 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001608 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001609 else
1610 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001611}
1612
1613void trace_find_cmdline(int pid, char comm[])
1614{
1615 preempt_disable();
1616 arch_spin_lock(&trace_cmdline_lock);
1617
1618 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001619
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001620 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001621 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001622}
1623
Ingo Molnare309b412008-05-12 21:20:51 +02001624void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001625{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001626 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001627 return;
1628
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001629 if (!__this_cpu_read(trace_cmdline_save))
1630 return;
1631
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001632 if (trace_save_cmdline(tsk))
1633 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001634}
1635
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001636void
Steven Rostedt38697052008-10-01 13:14:09 -04001637tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1638 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001639{
1640 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001641
Steven Rostedt777e2082008-09-29 23:02:42 -04001642 entry->preempt_count = pc & 0xff;
1643 entry->pid = (tsk) ? tsk->pid : 0;
1644 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001645#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001646 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001647#else
1648 TRACE_FLAG_IRQS_NOSUPPORT |
1649#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001650 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1651 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001652 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1653 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001654}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001655EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001656
Steven Rostedte77405a2009-09-02 14:17:06 -04001657struct ring_buffer_event *
1658trace_buffer_lock_reserve(struct ring_buffer *buffer,
1659 int type,
1660 unsigned long len,
1661 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001662{
1663 struct ring_buffer_event *event;
1664
Steven Rostedte77405a2009-09-02 14:17:06 -04001665 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001666 if (event != NULL) {
1667 struct trace_entry *ent = ring_buffer_event_data(event);
1668
1669 tracing_generic_entry_update(ent, flags, pc);
1670 ent->type = type;
1671 }
1672
1673 return event;
1674}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001675
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001676void
1677__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1678{
1679 __this_cpu_write(trace_cmdline_save, true);
1680 ring_buffer_unlock_commit(buffer, event);
1681}
1682
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04001683void trace_buffer_unlock_commit(struct trace_array *tr,
1684 struct ring_buffer *buffer,
1685 struct ring_buffer_event *event,
1686 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001687{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001688 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001689
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001690 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
Steven Rostedte77405a2009-09-02 14:17:06 -04001691 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001692}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001693EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001694
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001695static struct ring_buffer *temp_buffer;
1696
Steven Rostedtef5580d2009-02-27 19:38:04 -05001697struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001698trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001699 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001700 int type, unsigned long len,
1701 unsigned long flags, int pc)
1702{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001703 struct ring_buffer_event *entry;
1704
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001705 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001706 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001707 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001708 /*
1709 * If tracing is off, but we have triggers enabled
1710 * we still need to look at the event data. Use the temp_buffer
1711 * to store the trace event for the tigger to use. It's recusive
1712 * safe and will not be recorded anywhere.
1713 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001714 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001715 *current_rb = temp_buffer;
1716 entry = trace_buffer_lock_reserve(*current_rb,
1717 type, len, flags, pc);
1718 }
1719 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001720}
1721EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1722
1723struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001724trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1725 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001726 unsigned long flags, int pc)
1727{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001728 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001729 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001730 type, len, flags, pc);
1731}
Steven Rostedt94487d62009-05-05 19:22:53 -04001732EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001733
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04001734void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1735 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001736 struct ring_buffer_event *event,
1737 unsigned long flags, int pc,
1738 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001739{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001740 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001741
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001742 ftrace_trace_stack(buffer, flags, 6, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001743 ftrace_trace_userstack(buffer, flags, pc);
1744}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001745EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001746
Steven Rostedte77405a2009-09-02 14:17:06 -04001747void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1748 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001749{
Steven Rostedte77405a2009-09-02 14:17:06 -04001750 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001751}
Steven Rostedt12acd472009-04-17 16:01:56 -04001752EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001753
Ingo Molnare309b412008-05-12 21:20:51 +02001754void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001755trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001756 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1757 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001758{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001759 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001760 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001761 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001762 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001763
Steven Rostedtd7690412008-10-01 00:29:53 -04001764 /* If we are reading the ring buffer, don't trace */
Rusty Russelldd17c8f2009-10-29 22:34:15 +09001765 if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
Steven Rostedtd7690412008-10-01 00:29:53 -04001766 return;
1767
Steven Rostedte77405a2009-09-02 14:17:06 -04001768 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001769 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001770 if (!event)
1771 return;
1772 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001773 entry->ip = ip;
1774 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001775
Tom Zanussif306cc82013-10-24 08:34:17 -05001776 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001777 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001778}
1779
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001780#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001781
1782#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1783struct ftrace_stack {
1784 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1785};
1786
1787static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1788static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1789
Steven Rostedte77405a2009-09-02 14:17:06 -04001790static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001791 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001792 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001793{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001794 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001795 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001796 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001797 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001798 int use_stack;
1799 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001800
1801 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001802 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001803
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001804 /*
1805 * Since events can happen in NMIs there's no safe way to
1806 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1807 * or NMI comes in, it will just have to use the default
1808 * FTRACE_STACK_SIZE.
1809 */
1810 preempt_disable_notrace();
1811
Shan Wei82146522012-11-19 13:21:01 +08001812 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001813 /*
1814 * We don't need any atomic variables, just a barrier.
1815 * If an interrupt comes in, we don't care, because it would
1816 * have exited and put the counter back to what we want.
1817 * We just need a barrier to keep gcc from moving things
1818 * around.
1819 */
1820 barrier();
1821 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001822 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001823 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1824
1825 if (regs)
1826 save_stack_trace_regs(regs, &trace);
1827 else
1828 save_stack_trace(&trace);
1829
1830 if (trace.nr_entries > size)
1831 size = trace.nr_entries;
1832 } else
1833 /* From now on, use_stack is a boolean */
1834 use_stack = 0;
1835
1836 size *= sizeof(unsigned long);
1837
1838 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1839 sizeof(*entry) + size, flags, pc);
1840 if (!event)
1841 goto out;
1842 entry = ring_buffer_event_data(event);
1843
1844 memset(&entry->caller, 0, size);
1845
1846 if (use_stack)
1847 memcpy(&entry->caller, trace.entries,
1848 trace.nr_entries * sizeof(unsigned long));
1849 else {
1850 trace.max_entries = FTRACE_STACK_ENTRIES;
1851 trace.entries = entry->caller;
1852 if (regs)
1853 save_stack_trace_regs(regs, &trace);
1854 else
1855 save_stack_trace(&trace);
1856 }
1857
1858 entry->size = trace.nr_entries;
1859
Tom Zanussif306cc82013-10-24 08:34:17 -05001860 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001861 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001862
1863 out:
1864 /* Again, don't let gcc optimize things here */
1865 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001866 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001867 preempt_enable_notrace();
1868
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001869}
1870
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001871static inline void ftrace_trace_stack(struct ring_buffer *buffer,
1872 unsigned long flags,
1873 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05001874{
1875 if (!(trace_flags & TRACE_ITER_STACKTRACE))
1876 return;
1877
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001878 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05001879}
1880
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001881void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1882 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001883{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001884 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001885}
1886
Steven Rostedt03889382009-12-11 09:48:22 -05001887/**
1888 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001889 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001890 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001891void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001892{
1893 unsigned long flags;
1894
1895 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001896 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001897
1898 local_save_flags(flags);
1899
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001900 /*
1901 * Skip 3 more, seems to get us at the caller of
1902 * this function.
1903 */
1904 skip += 3;
1905 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1906 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001907}
1908
Steven Rostedt91e86e52010-11-10 12:56:12 +01001909static DEFINE_PER_CPU(int, user_stack_count);
1910
Steven Rostedte77405a2009-09-02 14:17:06 -04001911void
1912ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001913{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001914 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001915 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001916 struct userstack_entry *entry;
1917 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001918
1919 if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1920 return;
1921
Steven Rostedtb6345872010-03-12 20:03:30 -05001922 /*
1923 * NMIs can not handle page faults, even with fix ups.
1924 * The save user stack can (and often does) fault.
1925 */
1926 if (unlikely(in_nmi()))
1927 return;
1928
Steven Rostedt91e86e52010-11-10 12:56:12 +01001929 /*
1930 * prevent recursion, since the user stack tracing may
1931 * trigger other kernel events.
1932 */
1933 preempt_disable();
1934 if (__this_cpu_read(user_stack_count))
1935 goto out;
1936
1937 __this_cpu_inc(user_stack_count);
1938
Steven Rostedte77405a2009-09-02 14:17:06 -04001939 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001940 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001941 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001942 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001943 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001944
Steven Rostedt48659d32009-09-11 11:36:23 -04001945 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001946 memset(&entry->caller, 0, sizeof(entry->caller));
1947
1948 trace.nr_entries = 0;
1949 trace.max_entries = FTRACE_STACK_ENTRIES;
1950 trace.skip = 0;
1951 trace.entries = entry->caller;
1952
1953 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001954 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001955 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001956
Li Zefan1dbd1952010-12-09 15:47:56 +08001957 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001958 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001959 out:
1960 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001961}
1962
Hannes Eder4fd27352009-02-10 19:44:12 +01001963#ifdef UNUSED
1964static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001965{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001966 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001967}
Hannes Eder4fd27352009-02-10 19:44:12 +01001968#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001969
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001970#endif /* CONFIG_STACKTRACE */
1971
Steven Rostedt07d777f2011-09-22 14:01:55 -04001972/* created for use with alloc_percpu */
1973struct trace_buffer_struct {
1974 char buffer[TRACE_BUF_SIZE];
1975};
1976
1977static struct trace_buffer_struct *trace_percpu_buffer;
1978static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1979static struct trace_buffer_struct *trace_percpu_irq_buffer;
1980static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1981
1982/*
1983 * The buffer used is dependent on the context. There is a per cpu
1984 * buffer for normal context, softirq contex, hard irq context and
1985 * for NMI context. Thise allows for lockless recording.
1986 *
1987 * Note, if the buffers failed to be allocated, then this returns NULL
1988 */
1989static char *get_trace_buf(void)
1990{
1991 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04001992
1993 /*
1994 * If we have allocated per cpu buffers, then we do not
1995 * need to do any locking.
1996 */
1997 if (in_nmi())
1998 percpu_buffer = trace_percpu_nmi_buffer;
1999 else if (in_irq())
2000 percpu_buffer = trace_percpu_irq_buffer;
2001 else if (in_softirq())
2002 percpu_buffer = trace_percpu_sirq_buffer;
2003 else
2004 percpu_buffer = trace_percpu_buffer;
2005
2006 if (!percpu_buffer)
2007 return NULL;
2008
Shan Weid8a03492012-11-13 09:53:04 +08002009 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002010}
2011
2012static int alloc_percpu_trace_buffer(void)
2013{
2014 struct trace_buffer_struct *buffers;
2015 struct trace_buffer_struct *sirq_buffers;
2016 struct trace_buffer_struct *irq_buffers;
2017 struct trace_buffer_struct *nmi_buffers;
2018
2019 buffers = alloc_percpu(struct trace_buffer_struct);
2020 if (!buffers)
2021 goto err_warn;
2022
2023 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2024 if (!sirq_buffers)
2025 goto err_sirq;
2026
2027 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2028 if (!irq_buffers)
2029 goto err_irq;
2030
2031 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2032 if (!nmi_buffers)
2033 goto err_nmi;
2034
2035 trace_percpu_buffer = buffers;
2036 trace_percpu_sirq_buffer = sirq_buffers;
2037 trace_percpu_irq_buffer = irq_buffers;
2038 trace_percpu_nmi_buffer = nmi_buffers;
2039
2040 return 0;
2041
2042 err_nmi:
2043 free_percpu(irq_buffers);
2044 err_irq:
2045 free_percpu(sirq_buffers);
2046 err_sirq:
2047 free_percpu(buffers);
2048 err_warn:
2049 WARN(1, "Could not allocate percpu trace_printk buffer");
2050 return -ENOMEM;
2051}
2052
Steven Rostedt81698832012-10-11 10:15:05 -04002053static int buffers_allocated;
2054
Steven Rostedt07d777f2011-09-22 14:01:55 -04002055void trace_printk_init_buffers(void)
2056{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002057 if (buffers_allocated)
2058 return;
2059
2060 if (alloc_percpu_trace_buffer())
2061 return;
2062
Steven Rostedt2184db42014-05-28 13:14:40 -04002063 /* trace_printk() is for debug use only. Don't use it in production. */
2064
Borislav Petkov69a1c992015-01-27 17:17:20 +01002065 pr_warning("\n");
2066 pr_warning("**********************************************************\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002067 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2068 pr_warning("** **\n");
2069 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2070 pr_warning("** **\n");
2071 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
Frans Klavereff264e2014-11-07 15:53:44 +01002072 pr_warning("** unsafe for production use. **\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002073 pr_warning("** **\n");
2074 pr_warning("** If you see this message and you are not debugging **\n");
2075 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2076 pr_warning("** **\n");
2077 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2078 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002079
Steven Rostedtb382ede62012-10-10 21:44:34 -04002080 /* Expand the buffers to set size */
2081 tracing_update_buffers();
2082
Steven Rostedt07d777f2011-09-22 14:01:55 -04002083 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002084
2085 /*
2086 * trace_printk_init_buffers() can be called by modules.
2087 * If that happens, then we need to start cmdline recording
2088 * directly here. If the global_trace.buffer is already
2089 * allocated here, then this was called by module code.
2090 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002091 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002092 tracing_start_cmdline_record();
2093}
2094
2095void trace_printk_start_comm(void)
2096{
2097 /* Start tracing comms if trace printk is set */
2098 if (!buffers_allocated)
2099 return;
2100 tracing_start_cmdline_record();
2101}
2102
2103static void trace_printk_start_stop_comm(int enabled)
2104{
2105 if (!buffers_allocated)
2106 return;
2107
2108 if (enabled)
2109 tracing_start_cmdline_record();
2110 else
2111 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002112}
2113
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002114/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002115 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002116 *
2117 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002118int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002119{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002120 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002121 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002122 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002123 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002124 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002125 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002126 char *tbuffer;
2127 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002128
2129 if (unlikely(tracing_selftest_running || tracing_disabled))
2130 return 0;
2131
2132 /* Don't pollute graph traces with trace_vprintk internals */
2133 pause_graph_tracing();
2134
2135 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002136 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002137
Steven Rostedt07d777f2011-09-22 14:01:55 -04002138 tbuffer = get_trace_buf();
2139 if (!tbuffer) {
2140 len = 0;
2141 goto out;
2142 }
2143
2144 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2145
2146 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002147 goto out;
2148
Steven Rostedt07d777f2011-09-22 14:01:55 -04002149 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002150 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002151 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002152 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2153 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002154 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002155 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002156 entry = ring_buffer_event_data(event);
2157 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002158 entry->fmt = fmt;
2159
Steven Rostedt07d777f2011-09-22 14:01:55 -04002160 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002161 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002162 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002163 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002164 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002165
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002166out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002167 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002168 unpause_graph_tracing();
2169
2170 return len;
2171}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002172EXPORT_SYMBOL_GPL(trace_vbprintk);
2173
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002174static int
2175__trace_array_vprintk(struct ring_buffer *buffer,
2176 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002177{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002178 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002179 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002180 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002181 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002182 unsigned long flags;
2183 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002184
2185 if (tracing_disabled || tracing_selftest_running)
2186 return 0;
2187
Steven Rostedt07d777f2011-09-22 14:01:55 -04002188 /* Don't pollute graph traces with trace_vprintk internals */
2189 pause_graph_tracing();
2190
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002191 pc = preempt_count();
2192 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002193
Steven Rostedt07d777f2011-09-22 14:01:55 -04002194
2195 tbuffer = get_trace_buf();
2196 if (!tbuffer) {
2197 len = 0;
2198 goto out;
2199 }
2200
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002201 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002202
Steven Rostedt07d777f2011-09-22 14:01:55 -04002203 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002204 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002205 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002206 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002207 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002208 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002209 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002210 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002211
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002212 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002213 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002214 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04002215 ftrace_trace_stack(buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002216 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002217 out:
2218 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002219 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002220
2221 return len;
2222}
Steven Rostedt659372d2009-09-03 19:11:07 -04002223
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002224int trace_array_vprintk(struct trace_array *tr,
2225 unsigned long ip, const char *fmt, va_list args)
2226{
2227 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2228}
2229
2230int trace_array_printk(struct trace_array *tr,
2231 unsigned long ip, const char *fmt, ...)
2232{
2233 int ret;
2234 va_list ap;
2235
2236 if (!(trace_flags & TRACE_ITER_PRINTK))
2237 return 0;
2238
2239 va_start(ap, fmt);
2240 ret = trace_array_vprintk(tr, ip, fmt, ap);
2241 va_end(ap);
2242 return ret;
2243}
2244
2245int trace_array_printk_buf(struct ring_buffer *buffer,
2246 unsigned long ip, const char *fmt, ...)
2247{
2248 int ret;
2249 va_list ap;
2250
2251 if (!(trace_flags & TRACE_ITER_PRINTK))
2252 return 0;
2253
2254 va_start(ap, fmt);
2255 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2256 va_end(ap);
2257 return ret;
2258}
2259
Steven Rostedt659372d2009-09-03 19:11:07 -04002260int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2261{
Steven Rostedta813a152009-10-09 01:41:35 -04002262 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002263}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002264EXPORT_SYMBOL_GPL(trace_vprintk);
2265
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002266static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002267{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002268 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2269
Steven Rostedt5a90f572008-09-03 17:42:51 -04002270 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002271 if (buf_iter)
2272 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002273}
2274
Ingo Molnare309b412008-05-12 21:20:51 +02002275static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002276peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2277 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002278{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002279 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002280 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002281
Steven Rostedtd7690412008-10-01 00:29:53 -04002282 if (buf_iter)
2283 event = ring_buffer_iter_peek(buf_iter, ts);
2284 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002285 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002286 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002287
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002288 if (event) {
2289 iter->ent_size = ring_buffer_event_length(event);
2290 return ring_buffer_event_data(event);
2291 }
2292 iter->ent_size = 0;
2293 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002294}
Steven Rostedtd7690412008-10-01 00:29:53 -04002295
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002296static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002297__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2298 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002299{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002300 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002301 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002302 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002303 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002304 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002305 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002306 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002307 int cpu;
2308
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002309 /*
2310 * If we are in a per_cpu trace file, don't bother by iterating over
2311 * all cpu and peek directly.
2312 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002313 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002314 if (ring_buffer_empty_cpu(buffer, cpu_file))
2315 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002316 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002317 if (ent_cpu)
2318 *ent_cpu = cpu_file;
2319
2320 return ent;
2321 }
2322
Steven Rostedtab464282008-05-12 21:21:00 +02002323 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002324
2325 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002326 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002327
Steven Rostedtbc21b472010-03-31 19:49:26 -04002328 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002329
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002330 /*
2331 * Pick the entry with the smallest timestamp:
2332 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002333 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002334 next = ent;
2335 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002336 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002337 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002338 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002339 }
2340 }
2341
Steven Rostedt12b5da32012-03-27 10:43:28 -04002342 iter->ent_size = next_size;
2343
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002344 if (ent_cpu)
2345 *ent_cpu = next_cpu;
2346
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002347 if (ent_ts)
2348 *ent_ts = next_ts;
2349
Steven Rostedtbc21b472010-03-31 19:49:26 -04002350 if (missing_events)
2351 *missing_events = next_lost;
2352
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002353 return next;
2354}
2355
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002356/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002357struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2358 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002359{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002360 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002361}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002362
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002363/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002364void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002365{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002366 iter->ent = __find_next_entry(iter, &iter->cpu,
2367 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002368
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002369 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002370 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002371
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002372 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002373}
2374
Ingo Molnare309b412008-05-12 21:20:51 +02002375static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002376{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002377 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002378 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002379}
2380
Ingo Molnare309b412008-05-12 21:20:51 +02002381static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002382{
2383 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002384 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002385 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002386
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002387 WARN_ON_ONCE(iter->leftover);
2388
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002389 (*pos)++;
2390
2391 /* can't go backwards */
2392 if (iter->idx > i)
2393 return NULL;
2394
2395 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002396 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002397 else
2398 ent = iter;
2399
2400 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002401 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002402
2403 iter->pos = *pos;
2404
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002405 return ent;
2406}
2407
Jason Wessel955b61e2010-08-05 09:22:23 -05002408void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002409{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002410 struct ring_buffer_event *event;
2411 struct ring_buffer_iter *buf_iter;
2412 unsigned long entries = 0;
2413 u64 ts;
2414
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002415 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002416
Steven Rostedt6d158a82012-06-27 20:46:14 -04002417 buf_iter = trace_buffer_iter(iter, cpu);
2418 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002419 return;
2420
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002421 ring_buffer_iter_reset(buf_iter);
2422
2423 /*
2424 * We could have the case with the max latency tracers
2425 * that a reset never took place on a cpu. This is evident
2426 * by the timestamp being before the start of the buffer.
2427 */
2428 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002429 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002430 break;
2431 entries++;
2432 ring_buffer_read(buf_iter, NULL);
2433 }
2434
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002435 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002436}
2437
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002438/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002439 * The current tracer is copied to avoid a global locking
2440 * all around.
2441 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002442static void *s_start(struct seq_file *m, loff_t *pos)
2443{
2444 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002445 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002446 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002447 void *p = NULL;
2448 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002449 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002450
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002451 /*
2452 * copy the tracer to avoid using a global lock all around.
2453 * iter->trace is a copy of current_trace, the pointer to the
2454 * name may be used instead of a strcmp(), as iter->trace->name
2455 * will point to the same string as current_trace->name.
2456 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002457 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002458 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2459 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002460 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002461
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002462#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002463 if (iter->snapshot && iter->trace->use_max_tr)
2464 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002465#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002466
2467 if (!iter->snapshot)
2468 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002469
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002470 if (*pos != iter->pos) {
2471 iter->ent = NULL;
2472 iter->cpu = 0;
2473 iter->idx = -1;
2474
Steven Rostedtae3b5092013-01-23 15:22:59 -05002475 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002476 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002477 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002478 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002479 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002480
Lai Jiangshanac91d852010-03-02 17:54:50 +08002481 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002482 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2483 ;
2484
2485 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002486 /*
2487 * If we overflowed the seq_file before, then we want
2488 * to just reuse the trace_seq buffer again.
2489 */
2490 if (iter->leftover)
2491 p = iter;
2492 else {
2493 l = *pos - 1;
2494 p = s_next(m, p, &l);
2495 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002496 }
2497
Lai Jiangshan4f535962009-05-18 19:35:34 +08002498 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002499 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002500 return p;
2501}
2502
2503static void s_stop(struct seq_file *m, void *p)
2504{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002505 struct trace_iterator *iter = m->private;
2506
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002507#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002508 if (iter->snapshot && iter->trace->use_max_tr)
2509 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002510#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002511
2512 if (!iter->snapshot)
2513 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002514
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002515 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002516 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002517}
2518
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002519static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002520get_total_entries(struct trace_buffer *buf,
2521 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002522{
2523 unsigned long count;
2524 int cpu;
2525
2526 *total = 0;
2527 *entries = 0;
2528
2529 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002530 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002531 /*
2532 * If this buffer has skipped entries, then we hold all
2533 * entries for the trace and we need to ignore the
2534 * ones before the time stamp.
2535 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002536 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2537 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002538 /* total is the same as the entries */
2539 *total += count;
2540 } else
2541 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002542 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002543 *entries += count;
2544 }
2545}
2546
Ingo Molnare309b412008-05-12 21:20:51 +02002547static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002548{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002549 seq_puts(m, "# _------=> CPU# \n"
2550 "# / _-----=> irqs-off \n"
2551 "# | / _----=> need-resched \n"
2552 "# || / _---=> hardirq/softirq \n"
2553 "# ||| / _--=> preempt-depth \n"
2554 "# |||| / delay \n"
2555 "# cmd pid ||||| time | caller \n"
2556 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002557}
2558
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002559static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002560{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002561 unsigned long total;
2562 unsigned long entries;
2563
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002564 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002565 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2566 entries, total, num_online_cpus());
2567 seq_puts(m, "#\n");
2568}
2569
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002570static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002571{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002572 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002573 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2574 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002575}
2576
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002577static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002578{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002579 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002580 seq_puts(m, "# _-----=> irqs-off\n"
2581 "# / _----=> need-resched\n"
2582 "# | / _---=> hardirq/softirq\n"
2583 "# || / _--=> preempt-depth\n"
2584 "# ||| / delay\n"
2585 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2586 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002587}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002588
Jiri Olsa62b915f2010-04-02 19:01:22 +02002589void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002590print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2591{
2592 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002593 struct trace_buffer *buf = iter->trace_buffer;
2594 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002595 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002596 unsigned long entries;
2597 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002598 const char *name = "preemption";
2599
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002600 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002601
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002602 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002603
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002604 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002605 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002606 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002607 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002608 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002609 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002610 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002611 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002612 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002613 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002614#if defined(CONFIG_PREEMPT_NONE)
2615 "server",
2616#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2617 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002618#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002619 "preempt",
2620#else
2621 "unknown",
2622#endif
2623 /* These are reserved for later use */
2624 0, 0, 0, 0);
2625#ifdef CONFIG_SMP
2626 seq_printf(m, " #P:%d)\n", num_online_cpus());
2627#else
2628 seq_puts(m, ")\n");
2629#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002630 seq_puts(m, "# -----------------\n");
2631 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002632 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002633 data->comm, data->pid,
2634 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002635 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002636 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002637
2638 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002639 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002640 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2641 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002642 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002643 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2644 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002645 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002646 }
2647
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002648 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002649}
2650
Steven Rostedta3097202008-11-07 22:36:02 -05002651static void test_cpu_buff_start(struct trace_iterator *iter)
2652{
2653 struct trace_seq *s = &iter->seq;
2654
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002655 if (!(trace_flags & TRACE_ITER_ANNOTATE))
2656 return;
2657
2658 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2659 return;
2660
Rusty Russell44623442009-01-01 10:12:23 +10302661 if (cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002662 return;
2663
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002664 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002665 return;
2666
Rusty Russell44623442009-01-01 10:12:23 +10302667 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002668
2669 /* Don't print started cpu buffer for the first entry of the trace */
2670 if (iter->idx > 1)
2671 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2672 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002673}
2674
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002675static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002676{
Steven Rostedt214023c2008-05-12 21:20:46 +02002677 struct trace_seq *s = &iter->seq;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002678 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002679 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002680 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002681
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002682 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002683
Steven Rostedta3097202008-11-07 22:36:02 -05002684 test_cpu_buff_start(iter);
2685
Steven Rostedtf633cef2008-12-23 23:24:13 -05002686 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002687
2688 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002689 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2690 trace_print_lat_context(iter);
2691 else
2692 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002693 }
2694
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002695 if (trace_seq_has_overflowed(s))
2696 return TRACE_TYPE_PARTIAL_LINE;
2697
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002698 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002699 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002700
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002701 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002702
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002703 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002704}
2705
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002706static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002707{
2708 struct trace_seq *s = &iter->seq;
2709 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002710 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002711
2712 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002713
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002714 if (trace_flags & TRACE_ITER_CONTEXT_INFO)
2715 trace_seq_printf(s, "%d %d %llu ",
2716 entry->pid, iter->cpu, iter->ts);
2717
2718 if (trace_seq_has_overflowed(s))
2719 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002720
Steven Rostedtf633cef2008-12-23 23:24:13 -05002721 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002722 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002723 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002724
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002725 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002726
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002727 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002728}
2729
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002730static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002731{
2732 struct trace_seq *s = &iter->seq;
2733 unsigned char newline = '\n';
2734 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002735 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002736
2737 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002738
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002739 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002740 SEQ_PUT_HEX_FIELD(s, entry->pid);
2741 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2742 SEQ_PUT_HEX_FIELD(s, iter->ts);
2743 if (trace_seq_has_overflowed(s))
2744 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002745 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002746
Steven Rostedtf633cef2008-12-23 23:24:13 -05002747 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002748 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002749 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002750 if (ret != TRACE_TYPE_HANDLED)
2751 return ret;
2752 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002753
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002754 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002755
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002756 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002757}
2758
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002759static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002760{
2761 struct trace_seq *s = &iter->seq;
2762 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002763 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002764
2765 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002766
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002767 if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002768 SEQ_PUT_FIELD(s, entry->pid);
2769 SEQ_PUT_FIELD(s, iter->cpu);
2770 SEQ_PUT_FIELD(s, iter->ts);
2771 if (trace_seq_has_overflowed(s))
2772 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002773 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002774
Steven Rostedtf633cef2008-12-23 23:24:13 -05002775 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002776 return event ? event->funcs->binary(iter, 0, event) :
2777 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002778}
2779
Jiri Olsa62b915f2010-04-02 19:01:22 +02002780int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002781{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002782 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002783 int cpu;
2784
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002785 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002786 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002787 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002788 buf_iter = trace_buffer_iter(iter, cpu);
2789 if (buf_iter) {
2790 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002791 return 0;
2792 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002793 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002794 return 0;
2795 }
2796 return 1;
2797 }
2798
Steven Rostedtab464282008-05-12 21:21:00 +02002799 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002800 buf_iter = trace_buffer_iter(iter, cpu);
2801 if (buf_iter) {
2802 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002803 return 0;
2804 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002805 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002806 return 0;
2807 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002808 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002809
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002810 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002811}
2812
Lai Jiangshan4f535962009-05-18 19:35:34 +08002813/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002814enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002815{
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002816 enum print_line_t ret;
2817
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002818 if (iter->lost_events) {
2819 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2820 iter->cpu, iter->lost_events);
2821 if (trace_seq_has_overflowed(&iter->seq))
2822 return TRACE_TYPE_PARTIAL_LINE;
2823 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002824
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002825 if (iter->trace && iter->trace->print_line) {
2826 ret = iter->trace->print_line(iter);
2827 if (ret != TRACE_TYPE_UNHANDLED)
2828 return ret;
2829 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002830
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002831 if (iter->ent->type == TRACE_BPUTS &&
2832 trace_flags & TRACE_ITER_PRINTK &&
2833 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2834 return trace_print_bputs_msg_only(iter);
2835
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002836 if (iter->ent->type == TRACE_BPRINT &&
2837 trace_flags & TRACE_ITER_PRINTK &&
2838 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002839 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002840
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002841 if (iter->ent->type == TRACE_PRINT &&
2842 trace_flags & TRACE_ITER_PRINTK &&
2843 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002844 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002845
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002846 if (trace_flags & TRACE_ITER_BIN)
2847 return print_bin_fmt(iter);
2848
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002849 if (trace_flags & TRACE_ITER_HEX)
2850 return print_hex_fmt(iter);
2851
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002852 if (trace_flags & TRACE_ITER_RAW)
2853 return print_raw_fmt(iter);
2854
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002855 return print_trace_fmt(iter);
2856}
2857
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002858void trace_latency_header(struct seq_file *m)
2859{
2860 struct trace_iterator *iter = m->private;
2861
2862 /* print nothing if the buffers are empty */
2863 if (trace_empty(iter))
2864 return;
2865
2866 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2867 print_trace_header(m, iter);
2868
2869 if (!(trace_flags & TRACE_ITER_VERBOSE))
2870 print_lat_help_header(m);
2871}
2872
Jiri Olsa62b915f2010-04-02 19:01:22 +02002873void trace_default_header(struct seq_file *m)
2874{
2875 struct trace_iterator *iter = m->private;
2876
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002877 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2878 return;
2879
Jiri Olsa62b915f2010-04-02 19:01:22 +02002880 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2881 /* print nothing if the buffers are empty */
2882 if (trace_empty(iter))
2883 return;
2884 print_trace_header(m, iter);
2885 if (!(trace_flags & TRACE_ITER_VERBOSE))
2886 print_lat_help_header(m);
2887 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002888 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2889 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002890 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002891 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002892 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002893 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002894 }
2895}
2896
Steven Rostedte0a413f2011-09-29 21:26:16 -04002897static void test_ftrace_alive(struct seq_file *m)
2898{
2899 if (!ftrace_is_dead())
2900 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002901 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2902 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002903}
2904
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002905#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002906static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002907{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002908 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2909 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2910 "# Takes a snapshot of the main buffer.\n"
2911 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2912 "# (Doesn't have to be '2' works with any number that\n"
2913 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002914}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002915
2916static void show_snapshot_percpu_help(struct seq_file *m)
2917{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002918 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002919#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002920 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2921 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002922#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002923 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2924 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002925#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002926 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2927 "# (Doesn't have to be '2' works with any number that\n"
2928 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002929}
2930
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002931static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2932{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002933 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002934 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002935 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002936 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002937
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002938 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002939 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2940 show_snapshot_main_help(m);
2941 else
2942 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002943}
2944#else
2945/* Should never be called */
2946static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2947#endif
2948
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002949static int s_show(struct seq_file *m, void *v)
2950{
2951 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002952 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002953
2954 if (iter->ent == NULL) {
2955 if (iter->tr) {
2956 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2957 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002958 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002959 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002960 if (iter->snapshot && trace_empty(iter))
2961 print_snapshot_help(m, iter);
2962 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002963 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002964 else
2965 trace_default_header(m);
2966
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002967 } else if (iter->leftover) {
2968 /*
2969 * If we filled the seq_file buffer earlier, we
2970 * want to just show it now.
2971 */
2972 ret = trace_print_seq(m, &iter->seq);
2973
2974 /* ret should this time be zero, but you never know */
2975 iter->leftover = ret;
2976
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002977 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002978 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002979 ret = trace_print_seq(m, &iter->seq);
2980 /*
2981 * If we overflow the seq_file buffer, then it will
2982 * ask us for this data again at start up.
2983 * Use that instead.
2984 * ret is 0 if seq_file write succeeded.
2985 * -1 otherwise.
2986 */
2987 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002988 }
2989
2990 return 0;
2991}
2992
Oleg Nesterov649e9c72013-07-23 17:25:54 +02002993/*
2994 * Should be used after trace_array_get(), trace_types_lock
2995 * ensures that i_cdev was already initialized.
2996 */
2997static inline int tracing_get_cpu(struct inode *inode)
2998{
2999 if (inode->i_cdev) /* See trace_create_cpu_file() */
3000 return (long)inode->i_cdev - 1;
3001 return RING_BUFFER_ALL_CPUS;
3002}
3003
James Morris88e9d342009-09-22 16:43:43 -07003004static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003005 .start = s_start,
3006 .next = s_next,
3007 .stop = s_stop,
3008 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003009};
3010
Ingo Molnare309b412008-05-12 21:20:51 +02003011static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003012__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003013{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003014 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003015 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003016 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003017
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003018 if (tracing_disabled)
3019 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003020
Jiri Olsa50e18b92012-04-25 10:23:39 +02003021 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003022 if (!iter)
3023 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003024
Gil Fruchter72917232015-06-09 10:32:35 +03003025 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003026 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003027 if (!iter->buffer_iter)
3028 goto release;
3029
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003030 /*
3031 * We make a copy of the current tracer to avoid concurrent
3032 * changes on it while we are reading.
3033 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003034 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003035 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003036 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003037 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003038
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003039 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003040
Li Zefan79f55992009-06-15 14:58:26 +08003041 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003042 goto fail;
3043
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003044 iter->tr = tr;
3045
3046#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003047 /* Currently only the top directory has a snapshot */
3048 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003049 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003050 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003051#endif
3052 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003053 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003054 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003055 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003056 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003057
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003058 /* Notify the tracer early; before we stop tracing. */
3059 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003060 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003061
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003062 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003063 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003064 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3065
David Sharp8be07092012-11-13 12:18:22 -08003066 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003067 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003068 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3069
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003070 /* stop the trace while dumping if we are not opening "snapshot" */
3071 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003072 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003073
Steven Rostedtae3b5092013-01-23 15:22:59 -05003074 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003075 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003076 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003077 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003078 }
3079 ring_buffer_read_prepare_sync();
3080 for_each_tracing_cpu(cpu) {
3081 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003082 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003083 }
3084 } else {
3085 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003086 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003087 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003088 ring_buffer_read_prepare_sync();
3089 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003090 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003091 }
3092
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003093 mutex_unlock(&trace_types_lock);
3094
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003095 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003096
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003097 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003098 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003099 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003100 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003101release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003102 seq_release_private(inode, file);
3103 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003104}
3105
3106int tracing_open_generic(struct inode *inode, struct file *filp)
3107{
Steven Rostedt60a11772008-05-12 21:20:44 +02003108 if (tracing_disabled)
3109 return -ENODEV;
3110
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003111 filp->private_data = inode->i_private;
3112 return 0;
3113}
3114
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003115bool tracing_is_disabled(void)
3116{
3117 return (tracing_disabled) ? true: false;
3118}
3119
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003120/*
3121 * Open and update trace_array ref count.
3122 * Must have the current trace_array passed to it.
3123 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003124static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003125{
3126 struct trace_array *tr = inode->i_private;
3127
3128 if (tracing_disabled)
3129 return -ENODEV;
3130
3131 if (trace_array_get(tr) < 0)
3132 return -ENODEV;
3133
3134 filp->private_data = inode->i_private;
3135
3136 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003137}
3138
Hannes Eder4fd27352009-02-10 19:44:12 +01003139static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003140{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003141 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003142 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003143 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003144 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003145
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003146 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003147 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003148 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003149 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003150
Oleg Nesterov6484c712013-07-23 17:26:10 +02003151 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003152 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003153 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003154
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003155 for_each_tracing_cpu(cpu) {
3156 if (iter->buffer_iter[cpu])
3157 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3158 }
3159
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003160 if (iter->trace && iter->trace->close)
3161 iter->trace->close(iter);
3162
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003163 if (!iter->snapshot)
3164 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003165 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003166
3167 __trace_array_put(tr);
3168
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003169 mutex_unlock(&trace_types_lock);
3170
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003171 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003172 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003173 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003174 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003175 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003176
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003177 return 0;
3178}
3179
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003180static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3181{
3182 struct trace_array *tr = inode->i_private;
3183
3184 trace_array_put(tr);
3185 return 0;
3186}
3187
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003188static int tracing_single_release_tr(struct inode *inode, struct file *file)
3189{
3190 struct trace_array *tr = inode->i_private;
3191
3192 trace_array_put(tr);
3193
3194 return single_release(inode, file);
3195}
3196
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003197static int tracing_open(struct inode *inode, struct file *file)
3198{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003199 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003200 struct trace_iterator *iter;
3201 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003202
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003203 if (trace_array_get(tr) < 0)
3204 return -ENODEV;
3205
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003206 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003207 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3208 int cpu = tracing_get_cpu(inode);
3209
3210 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003211 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003212 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003213 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003214 }
3215
3216 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003217 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003218 if (IS_ERR(iter))
3219 ret = PTR_ERR(iter);
3220 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
3221 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3222 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003223
3224 if (ret < 0)
3225 trace_array_put(tr);
3226
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003227 return ret;
3228}
3229
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003230/*
3231 * Some tracers are not suitable for instance buffers.
3232 * A tracer is always available for the global array (toplevel)
3233 * or if it explicitly states that it is.
3234 */
3235static bool
3236trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3237{
3238 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3239}
3240
3241/* Find the next tracer that this trace array may use */
3242static struct tracer *
3243get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3244{
3245 while (t && !trace_ok_for_array(t, tr))
3246 t = t->next;
3247
3248 return t;
3249}
3250
Ingo Molnare309b412008-05-12 21:20:51 +02003251static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003252t_next(struct seq_file *m, void *v, loff_t *pos)
3253{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003254 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003255 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003256
3257 (*pos)++;
3258
3259 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003260 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003261
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003262 return t;
3263}
3264
3265static void *t_start(struct seq_file *m, loff_t *pos)
3266{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003267 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003268 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003269 loff_t l = 0;
3270
3271 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003272
3273 t = get_tracer_for_array(tr, trace_types);
3274 for (; t && l < *pos; t = t_next(m, t, &l))
3275 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003276
3277 return t;
3278}
3279
3280static void t_stop(struct seq_file *m, void *p)
3281{
3282 mutex_unlock(&trace_types_lock);
3283}
3284
3285static int t_show(struct seq_file *m, void *v)
3286{
3287 struct tracer *t = v;
3288
3289 if (!t)
3290 return 0;
3291
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003292 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003293 if (t->next)
3294 seq_putc(m, ' ');
3295 else
3296 seq_putc(m, '\n');
3297
3298 return 0;
3299}
3300
James Morris88e9d342009-09-22 16:43:43 -07003301static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003302 .start = t_start,
3303 .next = t_next,
3304 .stop = t_stop,
3305 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003306};
3307
3308static int show_traces_open(struct inode *inode, struct file *file)
3309{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003310 struct trace_array *tr = inode->i_private;
3311 struct seq_file *m;
3312 int ret;
3313
Steven Rostedt60a11772008-05-12 21:20:44 +02003314 if (tracing_disabled)
3315 return -ENODEV;
3316
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003317 ret = seq_open(file, &show_traces_seq_ops);
3318 if (ret)
3319 return ret;
3320
3321 m = file->private_data;
3322 m->private = tr;
3323
3324 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003325}
3326
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003327static ssize_t
3328tracing_write_stub(struct file *filp, const char __user *ubuf,
3329 size_t count, loff_t *ppos)
3330{
3331 return count;
3332}
3333
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003334loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003335{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003336 int ret;
3337
Slava Pestov364829b2010-11-24 15:13:16 -08003338 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003339 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003340 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003341 file->f_pos = ret = 0;
3342
3343 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003344}
3345
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003346static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003347 .open = tracing_open,
3348 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003349 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003350 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003351 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003352};
3353
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003354static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003355 .open = show_traces_open,
3356 .read = seq_read,
3357 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003358 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003359};
3360
Ingo Molnar36dfe922008-05-12 21:20:52 +02003361/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003362 * The tracer itself will not take this lock, but still we want
3363 * to provide a consistent cpumask to user-space:
3364 */
3365static DEFINE_MUTEX(tracing_cpumask_update_lock);
3366
3367/*
3368 * Temporary storage for the character representation of the
3369 * CPU bitmask (and one more byte for the newline):
3370 */
3371static char mask_str[NR_CPUS + 1];
3372
Ingo Molnarc7078de2008-05-12 21:20:52 +02003373static ssize_t
3374tracing_cpumask_read(struct file *filp, char __user *ubuf,
3375 size_t count, loff_t *ppos)
3376{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003377 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003378 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003379
3380 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003381
Tejun Heo1a402432015-02-13 14:37:39 -08003382 len = snprintf(mask_str, count, "%*pb\n",
3383 cpumask_pr_args(tr->tracing_cpumask));
3384 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003385 count = -EINVAL;
3386 goto out_err;
3387 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02003388 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3389
3390out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003391 mutex_unlock(&tracing_cpumask_update_lock);
3392
3393 return count;
3394}
3395
3396static ssize_t
3397tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3398 size_t count, loff_t *ppos)
3399{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003400 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303401 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003402 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303403
3404 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3405 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003406
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303407 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003408 if (err)
3409 goto err_unlock;
3410
Li Zefan215368e2009-06-15 10:56:42 +08003411 mutex_lock(&tracing_cpumask_update_lock);
3412
Steven Rostedta5e25882008-12-02 15:34:05 -05003413 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003414 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003415 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003416 /*
3417 * Increase/decrease the disabled counter if we are
3418 * about to flip a bit in the cpumask:
3419 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003420 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303421 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003422 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3423 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003424 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003425 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303426 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003427 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3428 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003429 }
3430 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003431 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003432 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003433
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003434 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003435
Ingo Molnarc7078de2008-05-12 21:20:52 +02003436 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303437 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003438
Ingo Molnarc7078de2008-05-12 21:20:52 +02003439 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003440
3441err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003442 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003443
3444 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003445}
3446
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003447static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003448 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003449 .read = tracing_cpumask_read,
3450 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003451 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003452 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003453};
3454
Li Zefanfdb372e2009-12-08 11:15:59 +08003455static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003456{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003457 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003458 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003459 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003460 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003461
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003462 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003463 tracer_flags = tr->current_trace->flags->val;
3464 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003465
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003466 for (i = 0; trace_options[i]; i++) {
3467 if (trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003468 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003469 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003470 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003471 }
3472
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003473 for (i = 0; trace_opts[i].name; i++) {
3474 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003475 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003476 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003477 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003478 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003479 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003480
Li Zefanfdb372e2009-12-08 11:15:59 +08003481 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003482}
3483
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003484static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003485 struct tracer_flags *tracer_flags,
3486 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003487{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003488 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003489 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003490
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003491 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003492 if (ret)
3493 return ret;
3494
3495 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003496 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003497 else
Zhaolei77708412009-08-07 18:53:21 +08003498 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003499 return 0;
3500}
3501
Li Zefan8d18eaa2009-12-08 11:17:06 +08003502/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003503static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003504{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003505 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003506 struct tracer_flags *tracer_flags = trace->flags;
3507 struct tracer_opt *opts = NULL;
3508 int i;
3509
3510 for (i = 0; tracer_flags->opts[i].name; i++) {
3511 opts = &tracer_flags->opts[i];
3512
3513 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003514 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003515 }
3516
3517 return -EINVAL;
3518}
3519
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003520/* Some tracers require overwrite to stay enabled */
3521int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3522{
3523 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3524 return -1;
3525
3526 return 0;
3527}
3528
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003529int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003530{
3531 /* do nothing if flag is already set */
3532 if (!!(trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003533 return 0;
3534
3535 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003536 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003537 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003538 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003539
3540 if (enabled)
3541 trace_flags |= mask;
3542 else
3543 trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003544
3545 if (mask == TRACE_ITER_RECORD_CMD)
3546 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003547
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003548 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003549 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003550#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003551 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003552#endif
3553 }
Steven Rostedt81698832012-10-11 10:15:05 -04003554
3555 if (mask == TRACE_ITER_PRINTK)
3556 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003557
3558 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003559}
3560
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003561static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003562{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003563 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003564 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003565 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003566 int i;
3567
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003568 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003569
Li Zefan8d18eaa2009-12-08 11:17:06 +08003570 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003571 neg = 1;
3572 cmp += 2;
3573 }
3574
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003575 mutex_lock(&trace_types_lock);
3576
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003577 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003578 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003579 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003580 break;
3581 }
3582 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003583
3584 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003585 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003586 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003587
3588 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003589
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003590 return ret;
3591}
3592
3593static ssize_t
3594tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3595 size_t cnt, loff_t *ppos)
3596{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003597 struct seq_file *m = filp->private_data;
3598 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003599 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003600 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003601
3602 if (cnt >= sizeof(buf))
3603 return -EINVAL;
3604
3605 if (copy_from_user(&buf, ubuf, cnt))
3606 return -EFAULT;
3607
Steven Rostedta8dd2172013-01-09 20:54:17 -05003608 buf[cnt] = 0;
3609
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003610 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003611 if (ret < 0)
3612 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003613
Jiri Olsacf8517c2009-10-23 19:36:16 -04003614 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003615
3616 return cnt;
3617}
3618
Li Zefanfdb372e2009-12-08 11:15:59 +08003619static int tracing_trace_options_open(struct inode *inode, struct file *file)
3620{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003621 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003622 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003623
Li Zefanfdb372e2009-12-08 11:15:59 +08003624 if (tracing_disabled)
3625 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003626
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003627 if (trace_array_get(tr) < 0)
3628 return -ENODEV;
3629
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003630 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3631 if (ret < 0)
3632 trace_array_put(tr);
3633
3634 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003635}
3636
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003637static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003638 .open = tracing_trace_options_open,
3639 .read = seq_read,
3640 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003641 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003642 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003643};
3644
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003645static const char readme_msg[] =
3646 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003647 "# echo 0 > tracing_on : quick way to disable tracing\n"
3648 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3649 " Important files:\n"
3650 " trace\t\t\t- The static contents of the buffer\n"
3651 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3652 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3653 " current_tracer\t- function and latency tracers\n"
3654 " available_tracers\t- list of configured tracers for current_tracer\n"
3655 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3656 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3657 " trace_clock\t\t-change the clock used to order events\n"
3658 " local: Per cpu clock but may not be synced across CPUs\n"
3659 " global: Synced across CPUs but slows tracing down.\n"
3660 " counter: Not a clock, but just an increment\n"
3661 " uptime: Jiffy counter from time of boot\n"
3662 " perf: Same clock that perf events use\n"
3663#ifdef CONFIG_X86_64
3664 " x86-tsc: TSC cycle counter\n"
3665#endif
3666 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3667 " tracing_cpumask\t- Limit which CPUs to trace\n"
3668 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3669 "\t\t\t Remove sub-buffer with rmdir\n"
3670 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003671 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3672 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003673 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003674#ifdef CONFIG_DYNAMIC_FTRACE
3675 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003676 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3677 "\t\t\t functions\n"
3678 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3679 "\t modules: Can select a group via module\n"
3680 "\t Format: :mod:<module-name>\n"
3681 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3682 "\t triggers: a command to perform when function is hit\n"
3683 "\t Format: <function>:<trigger>[:count]\n"
3684 "\t trigger: traceon, traceoff\n"
3685 "\t\t enable_event:<system>:<event>\n"
3686 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003687#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003688 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003689#endif
3690#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003691 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003692#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003693 "\t\t dump\n"
3694 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003695 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3696 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3697 "\t The first one will disable tracing every time do_fault is hit\n"
3698 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3699 "\t The first time do trap is hit and it disables tracing, the\n"
3700 "\t counter will decrement to 2. If tracing is already disabled,\n"
3701 "\t the counter will not decrement. It only decrements when the\n"
3702 "\t trigger did work\n"
3703 "\t To remove trigger without count:\n"
3704 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3705 "\t To remove trigger with a count:\n"
3706 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003707 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003708 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3709 "\t modules: Can select a group via module command :mod:\n"
3710 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003711#endif /* CONFIG_DYNAMIC_FTRACE */
3712#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003713 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3714 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003715#endif
3716#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3717 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003718 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003719 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3720#endif
3721#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003722 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3723 "\t\t\t snapshot buffer. Read the contents for more\n"
3724 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003725#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003726#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003727 " stack_trace\t\t- Shows the max stack trace when active\n"
3728 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003729 "\t\t\t Write into this file to reset the max size (trigger a\n"
3730 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003731#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003732 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3733 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003734#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003735#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003736 " events/\t\t- Directory containing all trace event subsystems:\n"
3737 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3738 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003739 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3740 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003741 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003742 " events/<system>/<event>/\t- Directory containing control files for\n"
3743 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003744 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3745 " filter\t\t- If set, only events passing filter are traced\n"
3746 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003747 "\t Format: <trigger>[:count][if <filter>]\n"
3748 "\t trigger: traceon, traceoff\n"
3749 "\t enable_event:<system>:<event>\n"
3750 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003751#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003752 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003753#endif
3754#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003755 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003756#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003757 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3758 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3759 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3760 "\t events/block/block_unplug/trigger\n"
3761 "\t The first disables tracing every time block_unplug is hit.\n"
3762 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3763 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3764 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3765 "\t Like function triggers, the counter is only decremented if it\n"
3766 "\t enabled or disabled tracing.\n"
3767 "\t To remove a trigger without a count:\n"
3768 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3769 "\t To remove a trigger with a count:\n"
3770 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3771 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003772;
3773
3774static ssize_t
3775tracing_readme_read(struct file *filp, char __user *ubuf,
3776 size_t cnt, loff_t *ppos)
3777{
3778 return simple_read_from_buffer(ubuf, cnt, ppos,
3779 readme_msg, strlen(readme_msg));
3780}
3781
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003782static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003783 .open = tracing_open_generic,
3784 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003785 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003786};
3787
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003788static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003789{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003790 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003791
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003792 if (*pos || m->count)
3793 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003794
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003795 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003796
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003797 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3798 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003799 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003800 continue;
3801
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003802 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003803 }
3804
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003805 return NULL;
3806}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003807
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003808static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3809{
3810 void *v;
3811 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003812
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003813 preempt_disable();
3814 arch_spin_lock(&trace_cmdline_lock);
3815
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003816 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003817 while (l <= *pos) {
3818 v = saved_cmdlines_next(m, v, &l);
3819 if (!v)
3820 return NULL;
3821 }
3822
3823 return v;
3824}
3825
3826static void saved_cmdlines_stop(struct seq_file *m, void *v)
3827{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003828 arch_spin_unlock(&trace_cmdline_lock);
3829 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003830}
3831
3832static int saved_cmdlines_show(struct seq_file *m, void *v)
3833{
3834 char buf[TASK_COMM_LEN];
3835 unsigned int *pid = v;
3836
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003837 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003838 seq_printf(m, "%d %s\n", *pid, buf);
3839 return 0;
3840}
3841
3842static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3843 .start = saved_cmdlines_start,
3844 .next = saved_cmdlines_next,
3845 .stop = saved_cmdlines_stop,
3846 .show = saved_cmdlines_show,
3847};
3848
3849static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3850{
3851 if (tracing_disabled)
3852 return -ENODEV;
3853
3854 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003855}
3856
3857static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003858 .open = tracing_saved_cmdlines_open,
3859 .read = seq_read,
3860 .llseek = seq_lseek,
3861 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003862};
3863
3864static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003865tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3866 size_t cnt, loff_t *ppos)
3867{
3868 char buf[64];
3869 int r;
3870
3871 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003872 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003873 arch_spin_unlock(&trace_cmdline_lock);
3874
3875 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3876}
3877
3878static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3879{
3880 kfree(s->saved_cmdlines);
3881 kfree(s->map_cmdline_to_pid);
3882 kfree(s);
3883}
3884
3885static int tracing_resize_saved_cmdlines(unsigned int val)
3886{
3887 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3888
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003889 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003890 if (!s)
3891 return -ENOMEM;
3892
3893 if (allocate_cmdlines_buffer(val, s) < 0) {
3894 kfree(s);
3895 return -ENOMEM;
3896 }
3897
3898 arch_spin_lock(&trace_cmdline_lock);
3899 savedcmd_temp = savedcmd;
3900 savedcmd = s;
3901 arch_spin_unlock(&trace_cmdline_lock);
3902 free_saved_cmdlines_buffer(savedcmd_temp);
3903
3904 return 0;
3905}
3906
3907static ssize_t
3908tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3909 size_t cnt, loff_t *ppos)
3910{
3911 unsigned long val;
3912 int ret;
3913
3914 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3915 if (ret)
3916 return ret;
3917
3918 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3919 if (!val || val > PID_MAX_DEFAULT)
3920 return -EINVAL;
3921
3922 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3923 if (ret < 0)
3924 return ret;
3925
3926 *ppos += cnt;
3927
3928 return cnt;
3929}
3930
3931static const struct file_operations tracing_saved_cmdlines_size_fops = {
3932 .open = tracing_open_generic,
3933 .read = tracing_saved_cmdlines_size_read,
3934 .write = tracing_saved_cmdlines_size_write,
3935};
3936
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04003937#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3938static union trace_enum_map_item *
3939update_enum_map(union trace_enum_map_item *ptr)
3940{
3941 if (!ptr->map.enum_string) {
3942 if (ptr->tail.next) {
3943 ptr = ptr->tail.next;
3944 /* Set ptr to the next real item (skip head) */
3945 ptr++;
3946 } else
3947 return NULL;
3948 }
3949 return ptr;
3950}
3951
3952static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
3953{
3954 union trace_enum_map_item *ptr = v;
3955
3956 /*
3957 * Paranoid! If ptr points to end, we don't want to increment past it.
3958 * This really should never happen.
3959 */
3960 ptr = update_enum_map(ptr);
3961 if (WARN_ON_ONCE(!ptr))
3962 return NULL;
3963
3964 ptr++;
3965
3966 (*pos)++;
3967
3968 ptr = update_enum_map(ptr);
3969
3970 return ptr;
3971}
3972
3973static void *enum_map_start(struct seq_file *m, loff_t *pos)
3974{
3975 union trace_enum_map_item *v;
3976 loff_t l = 0;
3977
3978 mutex_lock(&trace_enum_mutex);
3979
3980 v = trace_enum_maps;
3981 if (v)
3982 v++;
3983
3984 while (v && l < *pos) {
3985 v = enum_map_next(m, v, &l);
3986 }
3987
3988 return v;
3989}
3990
3991static void enum_map_stop(struct seq_file *m, void *v)
3992{
3993 mutex_unlock(&trace_enum_mutex);
3994}
3995
3996static int enum_map_show(struct seq_file *m, void *v)
3997{
3998 union trace_enum_map_item *ptr = v;
3999
4000 seq_printf(m, "%s %ld (%s)\n",
4001 ptr->map.enum_string, ptr->map.enum_value,
4002 ptr->map.system);
4003
4004 return 0;
4005}
4006
4007static const struct seq_operations tracing_enum_map_seq_ops = {
4008 .start = enum_map_start,
4009 .next = enum_map_next,
4010 .stop = enum_map_stop,
4011 .show = enum_map_show,
4012};
4013
4014static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4015{
4016 if (tracing_disabled)
4017 return -ENODEV;
4018
4019 return seq_open(filp, &tracing_enum_map_seq_ops);
4020}
4021
4022static const struct file_operations tracing_enum_map_fops = {
4023 .open = tracing_enum_map_open,
4024 .read = seq_read,
4025 .llseek = seq_lseek,
4026 .release = seq_release,
4027};
4028
4029static inline union trace_enum_map_item *
4030trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4031{
4032 /* Return tail of array given the head */
4033 return ptr + ptr->head.length + 1;
4034}
4035
4036static void
4037trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4038 int len)
4039{
4040 struct trace_enum_map **stop;
4041 struct trace_enum_map **map;
4042 union trace_enum_map_item *map_array;
4043 union trace_enum_map_item *ptr;
4044
4045 stop = start + len;
4046
4047 /*
4048 * The trace_enum_maps contains the map plus a head and tail item,
4049 * where the head holds the module and length of array, and the
4050 * tail holds a pointer to the next list.
4051 */
4052 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4053 if (!map_array) {
4054 pr_warning("Unable to allocate trace enum mapping\n");
4055 return;
4056 }
4057
4058 mutex_lock(&trace_enum_mutex);
4059
4060 if (!trace_enum_maps)
4061 trace_enum_maps = map_array;
4062 else {
4063 ptr = trace_enum_maps;
4064 for (;;) {
4065 ptr = trace_enum_jmp_to_tail(ptr);
4066 if (!ptr->tail.next)
4067 break;
4068 ptr = ptr->tail.next;
4069
4070 }
4071 ptr->tail.next = map_array;
4072 }
4073 map_array->head.mod = mod;
4074 map_array->head.length = len;
4075 map_array++;
4076
4077 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4078 map_array->map = **map;
4079 map_array++;
4080 }
4081 memset(map_array, 0, sizeof(*map_array));
4082
4083 mutex_unlock(&trace_enum_mutex);
4084}
4085
4086static void trace_create_enum_file(struct dentry *d_tracer)
4087{
4088 trace_create_file("enum_map", 0444, d_tracer,
4089 NULL, &tracing_enum_map_fops);
4090}
4091
4092#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4093static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4094static inline void trace_insert_enum_map_file(struct module *mod,
4095 struct trace_enum_map **start, int len) { }
4096#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4097
4098static void trace_insert_enum_map(struct module *mod,
4099 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004100{
4101 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004102
4103 if (len <= 0)
4104 return;
4105
4106 map = start;
4107
4108 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004109
4110 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004111}
4112
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004113static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004114tracing_set_trace_read(struct file *filp, char __user *ubuf,
4115 size_t cnt, loff_t *ppos)
4116{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004117 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004118 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004119 int r;
4120
4121 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004122 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004123 mutex_unlock(&trace_types_lock);
4124
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004125 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004126}
4127
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004128int tracer_init(struct tracer *t, struct trace_array *tr)
4129{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004130 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004131 return t->init(tr);
4132}
4133
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004134static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004135{
4136 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004137
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004138 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004139 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004140}
4141
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004142#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004143/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004144static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4145 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004146{
4147 int cpu, ret = 0;
4148
4149 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4150 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004151 ret = ring_buffer_resize(trace_buf->buffer,
4152 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004153 if (ret < 0)
4154 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004155 per_cpu_ptr(trace_buf->data, cpu)->entries =
4156 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004157 }
4158 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004159 ret = ring_buffer_resize(trace_buf->buffer,
4160 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004161 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004162 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4163 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004164 }
4165
4166 return ret;
4167}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004168#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004169
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004170static int __tracing_resize_ring_buffer(struct trace_array *tr,
4171 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004172{
4173 int ret;
4174
4175 /*
4176 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004177 * we use the size that was given, and we can forget about
4178 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004179 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004180 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004181
Steven Rostedtb382ede62012-10-10 21:44:34 -04004182 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004183 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004184 return 0;
4185
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004186 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004187 if (ret < 0)
4188 return ret;
4189
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004190#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004191 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4192 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004193 goto out;
4194
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004195 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004196 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004197 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4198 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004199 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004200 /*
4201 * AARGH! We are left with different
4202 * size max buffer!!!!
4203 * The max buffer is our "snapshot" buffer.
4204 * When a tracer needs a snapshot (one of the
4205 * latency tracers), it swaps the max buffer
4206 * with the saved snap shot. We succeeded to
4207 * update the size of the main buffer, but failed to
4208 * update the size of the max buffer. But when we tried
4209 * to reset the main buffer to the original size, we
4210 * failed there too. This is very unlikely to
4211 * happen, but if it does, warn and kill all
4212 * tracing.
4213 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004214 WARN_ON(1);
4215 tracing_disabled = 1;
4216 }
4217 return ret;
4218 }
4219
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004220 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004221 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004222 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004223 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004224
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004225 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004226#endif /* CONFIG_TRACER_MAX_TRACE */
4227
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004228 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004229 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004230 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004231 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004232
4233 return ret;
4234}
4235
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004236static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4237 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004238{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004239 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004240
4241 mutex_lock(&trace_types_lock);
4242
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004243 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4244 /* make sure, this cpu is enabled in the mask */
4245 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4246 ret = -EINVAL;
4247 goto out;
4248 }
4249 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004250
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004251 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004252 if (ret < 0)
4253 ret = -ENOMEM;
4254
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004255out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004256 mutex_unlock(&trace_types_lock);
4257
4258 return ret;
4259}
4260
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004261
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004262/**
4263 * tracing_update_buffers - used by tracing facility to expand ring buffers
4264 *
4265 * To save on memory when the tracing is never used on a system with it
4266 * configured in. The ring buffers are set to a minimum size. But once
4267 * a user starts to use the tracing facility, then they need to grow
4268 * to their default size.
4269 *
4270 * This function is to be called when a tracer is about to be used.
4271 */
4272int tracing_update_buffers(void)
4273{
4274 int ret = 0;
4275
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004276 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004277 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004278 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004279 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004280 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004281
4282 return ret;
4283}
4284
Steven Rostedt577b7852009-02-26 23:43:05 -05004285struct trace_option_dentry;
4286
4287static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004288create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004289
4290static void
4291destroy_trace_option_files(struct trace_option_dentry *topts);
4292
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004293/*
4294 * Used to clear out the tracer before deletion of an instance.
4295 * Must have trace_types_lock held.
4296 */
4297static void tracing_set_nop(struct trace_array *tr)
4298{
4299 if (tr->current_trace == &nop_trace)
4300 return;
4301
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004302 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004303
4304 if (tr->current_trace->reset)
4305 tr->current_trace->reset(tr);
4306
4307 tr->current_trace = &nop_trace;
4308}
4309
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004310static void update_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004311{
Steven Rostedt577b7852009-02-26 23:43:05 -05004312 static struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004313
4314 /* Only enable if the directory has been created already. */
4315 if (!tr->dir)
4316 return;
4317
4318 /* Currently, only the top instance has options */
4319 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL))
4320 return;
4321
4322 destroy_trace_option_files(topts);
4323 topts = create_trace_option_files(tr, t);
4324}
4325
4326static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4327{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004328 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004329#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004330 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004331#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004332 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004333
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004334 mutex_lock(&trace_types_lock);
4335
Steven Rostedt73c51622009-03-11 13:42:01 -04004336 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004337 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004338 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004339 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004340 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004341 ret = 0;
4342 }
4343
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004344 for (t = trace_types; t; t = t->next) {
4345 if (strcmp(t->name, buf) == 0)
4346 break;
4347 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004348 if (!t) {
4349 ret = -EINVAL;
4350 goto out;
4351 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004352 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004353 goto out;
4354
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004355 /* Some tracers are only allowed for the top level buffer */
4356 if (!trace_ok_for_array(t, tr)) {
4357 ret = -EINVAL;
4358 goto out;
4359 }
4360
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004361 /* If trace pipe files are being read, we can't change the tracer */
4362 if (tr->current_trace->ref) {
4363 ret = -EBUSY;
4364 goto out;
4365 }
4366
Steven Rostedt9f029e82008-11-12 15:24:24 -05004367 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004368
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004369 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004370
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004371 if (tr->current_trace->reset)
4372 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004373
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004374 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004375 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004376
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004377#ifdef CONFIG_TRACER_MAX_TRACE
4378 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004379
4380 if (had_max_tr && !t->use_max_tr) {
4381 /*
4382 * We need to make sure that the update_max_tr sees that
4383 * current_trace changed to nop_trace to keep it from
4384 * swapping the buffers after we resize it.
4385 * The update_max_tr is called from interrupts disabled
4386 * so a synchronized_sched() is sufficient.
4387 */
4388 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004389 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004390 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004391#endif
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004392 update_tracer_options(tr, t);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004393
4394#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004395 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004396 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004397 if (ret < 0)
4398 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004399 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004400#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004401
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004402 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004403 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004404 if (ret)
4405 goto out;
4406 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004407
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004408 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004409 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004410 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004411 out:
4412 mutex_unlock(&trace_types_lock);
4413
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004414 return ret;
4415}
4416
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004417static ssize_t
4418tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4419 size_t cnt, loff_t *ppos)
4420{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004421 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004422 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004423 int i;
4424 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004425 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004426
Steven Rostedt60063a62008-10-28 10:44:24 -04004427 ret = cnt;
4428
Li Zefanee6c2c12009-09-18 14:06:47 +08004429 if (cnt > MAX_TRACER_SIZE)
4430 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004431
4432 if (copy_from_user(&buf, ubuf, cnt))
4433 return -EFAULT;
4434
4435 buf[cnt] = 0;
4436
4437 /* strip ending whitespace. */
4438 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4439 buf[i] = 0;
4440
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004441 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004442 if (err)
4443 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004444
Jiri Olsacf8517c2009-10-23 19:36:16 -04004445 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004446
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004447 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004448}
4449
4450static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004451tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4452 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004453{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004454 char buf[64];
4455 int r;
4456
Steven Rostedtcffae432008-05-12 21:21:00 +02004457 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004458 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004459 if (r > sizeof(buf))
4460 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004461 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004462}
4463
4464static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004465tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4466 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004467{
Hannes Eder5e398412009-02-10 19:44:34 +01004468 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004469 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004470
Peter Huewe22fe9b52011-06-07 21:58:27 +02004471 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4472 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004473 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004474
4475 *ptr = val * 1000;
4476
4477 return cnt;
4478}
4479
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004480static ssize_t
4481tracing_thresh_read(struct file *filp, char __user *ubuf,
4482 size_t cnt, loff_t *ppos)
4483{
4484 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4485}
4486
4487static ssize_t
4488tracing_thresh_write(struct file *filp, const char __user *ubuf,
4489 size_t cnt, loff_t *ppos)
4490{
4491 struct trace_array *tr = filp->private_data;
4492 int ret;
4493
4494 mutex_lock(&trace_types_lock);
4495 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4496 if (ret < 0)
4497 goto out;
4498
4499 if (tr->current_trace->update_thresh) {
4500 ret = tr->current_trace->update_thresh(tr);
4501 if (ret < 0)
4502 goto out;
4503 }
4504
4505 ret = cnt;
4506out:
4507 mutex_unlock(&trace_types_lock);
4508
4509 return ret;
4510}
4511
4512static ssize_t
4513tracing_max_lat_read(struct file *filp, char __user *ubuf,
4514 size_t cnt, loff_t *ppos)
4515{
4516 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4517}
4518
4519static ssize_t
4520tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4521 size_t cnt, loff_t *ppos)
4522{
4523 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4524}
4525
Steven Rostedtb3806b42008-05-12 21:20:46 +02004526static int tracing_open_pipe(struct inode *inode, struct file *filp)
4527{
Oleg Nesterov15544202013-07-23 17:25:57 +02004528 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004529 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004530 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004531
4532 if (tracing_disabled)
4533 return -ENODEV;
4534
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004535 if (trace_array_get(tr) < 0)
4536 return -ENODEV;
4537
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004538 mutex_lock(&trace_types_lock);
4539
Steven Rostedtb3806b42008-05-12 21:20:46 +02004540 /* create a buffer to store the information to pass to userspace */
4541 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004542 if (!iter) {
4543 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004544 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004545 goto out;
4546 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004547
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004548 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004549 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004550
4551 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4552 ret = -ENOMEM;
4553 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304554 }
4555
Steven Rostedta3097202008-11-07 22:36:02 -05004556 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304557 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004558
Steven Rostedt112f38a72009-06-01 15:16:05 -04004559 if (trace_flags & TRACE_ITER_LATENCY_FMT)
4560 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4561
David Sharp8be07092012-11-13 12:18:22 -08004562 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004563 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004564 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4565
Oleg Nesterov15544202013-07-23 17:25:57 +02004566 iter->tr = tr;
4567 iter->trace_buffer = &tr->trace_buffer;
4568 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004569 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004570 filp->private_data = iter;
4571
Steven Rostedt107bad82008-05-12 21:21:01 +02004572 if (iter->trace->pipe_open)
4573 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004574
Arnd Bergmannb4447862010-07-07 23:40:11 +02004575 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004576
4577 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004578out:
4579 mutex_unlock(&trace_types_lock);
4580 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004581
4582fail:
4583 kfree(iter->trace);
4584 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004585 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004586 mutex_unlock(&trace_types_lock);
4587 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004588}
4589
4590static int tracing_release_pipe(struct inode *inode, struct file *file)
4591{
4592 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004593 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004594
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004595 mutex_lock(&trace_types_lock);
4596
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004597 tr->current_trace->ref--;
4598
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004599 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004600 iter->trace->pipe_close(iter);
4601
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004602 mutex_unlock(&trace_types_lock);
4603
Rusty Russell44623442009-01-01 10:12:23 +10304604 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004605 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004606 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004607
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004608 trace_array_put(tr);
4609
Steven Rostedtb3806b42008-05-12 21:20:46 +02004610 return 0;
4611}
4612
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004613static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004614trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004615{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004616 /* Iterators are static, they should be filled or empty */
4617 if (trace_buffer_iter(iter, iter->cpu_file))
4618 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004619
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004620 if (trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004621 /*
4622 * Always select as readable when in blocking mode
4623 */
4624 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004625 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004626 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004627 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004628}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004629
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004630static unsigned int
4631tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4632{
4633 struct trace_iterator *iter = filp->private_data;
4634
4635 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004636}
4637
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004638/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004639static int tracing_wait_pipe(struct file *filp)
4640{
4641 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004642 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004643
4644 while (trace_empty(iter)) {
4645
4646 if ((filp->f_flags & O_NONBLOCK)) {
4647 return -EAGAIN;
4648 }
4649
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004650 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004651 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004652 * We still block if tracing is disabled, but we have never
4653 * read anything. This allows a user to cat this file, and
4654 * then enable tracing. But after we have read something,
4655 * we give an EOF when tracing is again disabled.
4656 *
4657 * iter->pos will be 0 if we haven't read anything.
4658 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004659 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004660 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004661
4662 mutex_unlock(&iter->mutex);
4663
Rabin Vincente30f53a2014-11-10 19:46:34 +01004664 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004665
4666 mutex_lock(&iter->mutex);
4667
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004668 if (ret)
4669 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004670 }
4671
4672 return 1;
4673}
4674
Steven Rostedtb3806b42008-05-12 21:20:46 +02004675/*
4676 * Consumer reader.
4677 */
4678static ssize_t
4679tracing_read_pipe(struct file *filp, char __user *ubuf,
4680 size_t cnt, loff_t *ppos)
4681{
4682 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004683 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004684
4685 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004686 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4687 if (sret != -EBUSY)
4688 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004689
Steven Rostedtf9520752009-03-02 14:04:40 -05004690 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004691
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004692 /*
4693 * Avoid more than one consumer on a single file descriptor
4694 * This is just a matter of traces coherency, the ring buffer itself
4695 * is protected.
4696 */
4697 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004698 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004699 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4700 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004701 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004702 }
4703
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004704waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004705 sret = tracing_wait_pipe(filp);
4706 if (sret <= 0)
4707 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004708
4709 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004710 if (trace_empty(iter)) {
4711 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004712 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004713 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004714
4715 if (cnt >= PAGE_SIZE)
4716 cnt = PAGE_SIZE - 1;
4717
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004718 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004719 memset(&iter->seq, 0,
4720 sizeof(struct trace_iterator) -
4721 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004722 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004723 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004724
Lai Jiangshan4f535962009-05-18 19:35:34 +08004725 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004726 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004727 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004728 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004729 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004730
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004731 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004732 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004733 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004734 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004735 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004736 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004737 if (ret != TRACE_TYPE_NO_CONSUME)
4738 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004739
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004740 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004741 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004742
4743 /*
4744 * Setting the full flag means we reached the trace_seq buffer
4745 * size and we should leave by partial output condition above.
4746 * One of the trace_seq_* functions is not used properly.
4747 */
4748 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4749 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004750 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004751 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004752 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004753
Steven Rostedtb3806b42008-05-12 21:20:46 +02004754 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004755 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004756 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004757 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004758
4759 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004760 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004761 * entries, go back to wait for more entries.
4762 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004763 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004764 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004765
Steven Rostedt107bad82008-05-12 21:21:01 +02004766out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004767 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004768
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004769 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004770}
4771
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004772static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4773 unsigned int idx)
4774{
4775 __free_page(spd->pages[idx]);
4776}
4777
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004778static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004779 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004780 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004781 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004782 .steal = generic_pipe_buf_steal,
4783 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004784};
4785
Steven Rostedt34cd4992009-02-09 12:06:29 -05004786static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004787tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004788{
4789 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004790 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004791 int ret;
4792
4793 /* Seq buffer is page-sized, exactly what we need. */
4794 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004795 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004796 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004797
4798 if (trace_seq_has_overflowed(&iter->seq)) {
4799 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004800 break;
4801 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004802
4803 /*
4804 * This should not be hit, because it should only
4805 * be set if the iter->seq overflowed. But check it
4806 * anyway to be safe.
4807 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05004808 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004809 iter->seq.seq.len = save_len;
4810 break;
4811 }
4812
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004813 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004814 if (rem < count) {
4815 rem = 0;
4816 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004817 break;
4818 }
4819
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004820 if (ret != TRACE_TYPE_NO_CONSUME)
4821 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004822 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004823 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004824 rem = 0;
4825 iter->ent = NULL;
4826 break;
4827 }
4828 }
4829
4830 return rem;
4831}
4832
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004833static ssize_t tracing_splice_read_pipe(struct file *filp,
4834 loff_t *ppos,
4835 struct pipe_inode_info *pipe,
4836 size_t len,
4837 unsigned int flags)
4838{
Jens Axboe35f3d142010-05-20 10:43:18 +02004839 struct page *pages_def[PIPE_DEF_BUFFERS];
4840 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004841 struct trace_iterator *iter = filp->private_data;
4842 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004843 .pages = pages_def,
4844 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004845 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004846 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004847 .flags = flags,
4848 .ops = &tracing_pipe_buf_ops,
4849 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004850 };
4851 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004852 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004853 unsigned int i;
4854
Jens Axboe35f3d142010-05-20 10:43:18 +02004855 if (splice_grow_spd(pipe, &spd))
4856 return -ENOMEM;
4857
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004858 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004859
4860 if (iter->trace->splice_read) {
4861 ret = iter->trace->splice_read(iter, filp,
4862 ppos, pipe, len, flags);
4863 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004864 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004865 }
4866
4867 ret = tracing_wait_pipe(filp);
4868 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004869 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004870
Jason Wessel955b61e2010-08-05 09:22:23 -05004871 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004872 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004873 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004874 }
4875
Lai Jiangshan4f535962009-05-18 19:35:34 +08004876 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004877 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004878
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004879 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004880 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004881 spd.pages[i] = alloc_page(GFP_KERNEL);
4882 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004883 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004884
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004885 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004886
4887 /* Copy the data into the page, so we can start over. */
4888 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004889 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004890 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004891 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004892 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004893 break;
4894 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004895 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004896 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004897
Steven Rostedtf9520752009-03-02 14:04:40 -05004898 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004899 }
4900
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004901 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004902 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004903 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004904
4905 spd.nr_pages = i;
4906
Jens Axboe35f3d142010-05-20 10:43:18 +02004907 ret = splice_to_pipe(pipe, &spd);
4908out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004909 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004910 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004911
Steven Rostedt34cd4992009-02-09 12:06:29 -05004912out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004913 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004914 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004915}
4916
Steven Rostedta98a3c32008-05-12 21:20:59 +02004917static ssize_t
4918tracing_entries_read(struct file *filp, char __user *ubuf,
4919 size_t cnt, loff_t *ppos)
4920{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004921 struct inode *inode = file_inode(filp);
4922 struct trace_array *tr = inode->i_private;
4923 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004924 char buf[64];
4925 int r = 0;
4926 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004927
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004928 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004929
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004930 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004931 int cpu, buf_size_same;
4932 unsigned long size;
4933
4934 size = 0;
4935 buf_size_same = 1;
4936 /* check if all cpu sizes are same */
4937 for_each_tracing_cpu(cpu) {
4938 /* fill in the size from first enabled cpu */
4939 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004940 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4941 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004942 buf_size_same = 0;
4943 break;
4944 }
4945 }
4946
4947 if (buf_size_same) {
4948 if (!ring_buffer_expanded)
4949 r = sprintf(buf, "%lu (expanded: %lu)\n",
4950 size >> 10,
4951 trace_buf_size >> 10);
4952 else
4953 r = sprintf(buf, "%lu\n", size >> 10);
4954 } else
4955 r = sprintf(buf, "X\n");
4956 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004957 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004958
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004959 mutex_unlock(&trace_types_lock);
4960
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004961 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4962 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004963}
4964
4965static ssize_t
4966tracing_entries_write(struct file *filp, const char __user *ubuf,
4967 size_t cnt, loff_t *ppos)
4968{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004969 struct inode *inode = file_inode(filp);
4970 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004971 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004972 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004973
Peter Huewe22fe9b52011-06-07 21:58:27 +02004974 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4975 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004976 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004977
4978 /* must have at least 1 entry */
4979 if (!val)
4980 return -EINVAL;
4981
Steven Rostedt1696b2b2008-11-13 00:09:35 -05004982 /* value is in KB */
4983 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004984 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004985 if (ret < 0)
4986 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004987
Jiri Olsacf8517c2009-10-23 19:36:16 -04004988 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004989
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004990 return cnt;
4991}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05004992
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004993static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07004994tracing_total_entries_read(struct file *filp, char __user *ubuf,
4995 size_t cnt, loff_t *ppos)
4996{
4997 struct trace_array *tr = filp->private_data;
4998 char buf[64];
4999 int r, cpu;
5000 unsigned long size = 0, expanded_size = 0;
5001
5002 mutex_lock(&trace_types_lock);
5003 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005004 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005005 if (!ring_buffer_expanded)
5006 expanded_size += trace_buf_size >> 10;
5007 }
5008 if (ring_buffer_expanded)
5009 r = sprintf(buf, "%lu\n", size);
5010 else
5011 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5012 mutex_unlock(&trace_types_lock);
5013
5014 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5015}
5016
5017static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005018tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5019 size_t cnt, loff_t *ppos)
5020{
5021 /*
5022 * There is no need to read what the user has written, this function
5023 * is just to make sure that there is no error when "echo" is used
5024 */
5025
5026 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005027
5028 return cnt;
5029}
5030
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005031static int
5032tracing_free_buffer_release(struct inode *inode, struct file *filp)
5033{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005034 struct trace_array *tr = inode->i_private;
5035
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005036 /* disable tracing ? */
5037 if (trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005038 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005039 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005040 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005041
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005042 trace_array_put(tr);
5043
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005044 return 0;
5045}
5046
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005047static ssize_t
5048tracing_mark_write(struct file *filp, const char __user *ubuf,
5049 size_t cnt, loff_t *fpos)
5050{
Steven Rostedtd696b582011-09-22 11:50:27 -04005051 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005052 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005053 struct ring_buffer_event *event;
5054 struct ring_buffer *buffer;
5055 struct print_entry *entry;
5056 unsigned long irq_flags;
5057 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005058 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005059 int nr_pages = 1;
5060 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005061 int offset;
5062 int size;
5063 int len;
5064 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005065 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005066
Steven Rostedtc76f0692008-11-07 22:36:02 -05005067 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005068 return -EINVAL;
5069
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005070 if (!(trace_flags & TRACE_ITER_MARKERS))
5071 return -EINVAL;
5072
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005073 if (cnt > TRACE_BUF_SIZE)
5074 cnt = TRACE_BUF_SIZE;
5075
Steven Rostedtd696b582011-09-22 11:50:27 -04005076 /*
5077 * Userspace is injecting traces into the kernel trace buffer.
5078 * We want to be as non intrusive as possible.
5079 * To do so, we do not want to allocate any special buffers
5080 * or take any locks, but instead write the userspace data
5081 * straight into the ring buffer.
5082 *
5083 * First we need to pin the userspace buffer into memory,
5084 * which, most likely it is, because it just referenced it.
5085 * But there's no guarantee that it is. By using get_user_pages_fast()
5086 * and kmap_atomic/kunmap_atomic() we can get access to the
5087 * pages directly. We then write the data directly into the
5088 * ring buffer.
5089 */
5090 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005091
Steven Rostedtd696b582011-09-22 11:50:27 -04005092 /* check if we cross pages */
5093 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5094 nr_pages = 2;
5095
5096 offset = addr & (PAGE_SIZE - 1);
5097 addr &= PAGE_MASK;
5098
5099 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5100 if (ret < nr_pages) {
5101 while (--ret >= 0)
5102 put_page(pages[ret]);
5103 written = -EFAULT;
5104 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005105 }
5106
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005107 for (i = 0; i < nr_pages; i++)
5108 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005109
5110 local_save_flags(irq_flags);
5111 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005112 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005113 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5114 irq_flags, preempt_count());
5115 if (!event) {
5116 /* Ring buffer disabled, return as if not open for write */
5117 written = -EBADF;
5118 goto out_unlock;
5119 }
5120
5121 entry = ring_buffer_event_data(event);
5122 entry->ip = _THIS_IP_;
5123
5124 if (nr_pages == 2) {
5125 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005126 memcpy(&entry->buf, map_page[0] + offset, len);
5127 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005128 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005129 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005130
5131 if (entry->buf[cnt - 1] != '\n') {
5132 entry->buf[cnt] = '\n';
5133 entry->buf[cnt + 1] = '\0';
5134 } else
5135 entry->buf[cnt] = '\0';
5136
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005137 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005138
5139 written = cnt;
5140
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005141 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005142
Steven Rostedtd696b582011-09-22 11:50:27 -04005143 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005144 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005145 kunmap_atomic(map_page[i]);
5146 put_page(pages[i]);
5147 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005148 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005149 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005150}
5151
Li Zefan13f16d22009-12-08 11:16:11 +08005152static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005153{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005154 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005155 int i;
5156
5157 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005158 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005159 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005160 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5161 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005162 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005163
Li Zefan13f16d22009-12-08 11:16:11 +08005164 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005165}
5166
Steven Rostedte1e232c2014-02-10 23:38:46 -05005167static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005168{
Zhaolei5079f322009-08-25 16:12:56 +08005169 int i;
5170
Zhaolei5079f322009-08-25 16:12:56 +08005171 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5172 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5173 break;
5174 }
5175 if (i == ARRAY_SIZE(trace_clocks))
5176 return -EINVAL;
5177
Zhaolei5079f322009-08-25 16:12:56 +08005178 mutex_lock(&trace_types_lock);
5179
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005180 tr->clock_id = i;
5181
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005182 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005183
David Sharp60303ed2012-10-11 16:27:52 -07005184 /*
5185 * New clock may not be consistent with the previous clock.
5186 * Reset the buffer so that it doesn't have incomparable timestamps.
5187 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005188 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005189
5190#ifdef CONFIG_TRACER_MAX_TRACE
5191 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5192 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005193 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005194#endif
David Sharp60303ed2012-10-11 16:27:52 -07005195
Zhaolei5079f322009-08-25 16:12:56 +08005196 mutex_unlock(&trace_types_lock);
5197
Steven Rostedte1e232c2014-02-10 23:38:46 -05005198 return 0;
5199}
5200
5201static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5202 size_t cnt, loff_t *fpos)
5203{
5204 struct seq_file *m = filp->private_data;
5205 struct trace_array *tr = m->private;
5206 char buf[64];
5207 const char *clockstr;
5208 int ret;
5209
5210 if (cnt >= sizeof(buf))
5211 return -EINVAL;
5212
5213 if (copy_from_user(&buf, ubuf, cnt))
5214 return -EFAULT;
5215
5216 buf[cnt] = 0;
5217
5218 clockstr = strstrip(buf);
5219
5220 ret = tracing_set_clock(tr, clockstr);
5221 if (ret)
5222 return ret;
5223
Zhaolei5079f322009-08-25 16:12:56 +08005224 *fpos += cnt;
5225
5226 return cnt;
5227}
5228
Li Zefan13f16d22009-12-08 11:16:11 +08005229static int tracing_clock_open(struct inode *inode, struct file *file)
5230{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005231 struct trace_array *tr = inode->i_private;
5232 int ret;
5233
Li Zefan13f16d22009-12-08 11:16:11 +08005234 if (tracing_disabled)
5235 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005236
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005237 if (trace_array_get(tr))
5238 return -ENODEV;
5239
5240 ret = single_open(file, tracing_clock_show, inode->i_private);
5241 if (ret < 0)
5242 trace_array_put(tr);
5243
5244 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005245}
5246
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005247struct ftrace_buffer_info {
5248 struct trace_iterator iter;
5249 void *spare;
5250 unsigned int read;
5251};
5252
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005253#ifdef CONFIG_TRACER_SNAPSHOT
5254static int tracing_snapshot_open(struct inode *inode, struct file *file)
5255{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005256 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005257 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005258 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005259 int ret = 0;
5260
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005261 if (trace_array_get(tr) < 0)
5262 return -ENODEV;
5263
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005264 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005265 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005266 if (IS_ERR(iter))
5267 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005268 } else {
5269 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005270 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005271 m = kzalloc(sizeof(*m), GFP_KERNEL);
5272 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005273 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005274 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5275 if (!iter) {
5276 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005277 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005278 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005279 ret = 0;
5280
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005281 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005282 iter->trace_buffer = &tr->max_buffer;
5283 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005284 m->private = iter;
5285 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005286 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005287out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005288 if (ret < 0)
5289 trace_array_put(tr);
5290
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005291 return ret;
5292}
5293
5294static ssize_t
5295tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5296 loff_t *ppos)
5297{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005298 struct seq_file *m = filp->private_data;
5299 struct trace_iterator *iter = m->private;
5300 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005301 unsigned long val;
5302 int ret;
5303
5304 ret = tracing_update_buffers();
5305 if (ret < 0)
5306 return ret;
5307
5308 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5309 if (ret)
5310 return ret;
5311
5312 mutex_lock(&trace_types_lock);
5313
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005314 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005315 ret = -EBUSY;
5316 goto out;
5317 }
5318
5319 switch (val) {
5320 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005321 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5322 ret = -EINVAL;
5323 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005324 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005325 if (tr->allocated_snapshot)
5326 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005327 break;
5328 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005329/* Only allow per-cpu swap if the ring buffer supports it */
5330#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5331 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5332 ret = -EINVAL;
5333 break;
5334 }
5335#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005336 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005337 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005338 if (ret < 0)
5339 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005340 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005341 local_irq_disable();
5342 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005343 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005344 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005345 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005346 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005347 local_irq_enable();
5348 break;
5349 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005350 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005351 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5352 tracing_reset_online_cpus(&tr->max_buffer);
5353 else
5354 tracing_reset(&tr->max_buffer, iter->cpu_file);
5355 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005356 break;
5357 }
5358
5359 if (ret >= 0) {
5360 *ppos += cnt;
5361 ret = cnt;
5362 }
5363out:
5364 mutex_unlock(&trace_types_lock);
5365 return ret;
5366}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005367
5368static int tracing_snapshot_release(struct inode *inode, struct file *file)
5369{
5370 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005371 int ret;
5372
5373 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005374
5375 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005376 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005377
5378 /* If write only, the seq_file is just a stub */
5379 if (m)
5380 kfree(m->private);
5381 kfree(m);
5382
5383 return 0;
5384}
5385
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005386static int tracing_buffers_open(struct inode *inode, struct file *filp);
5387static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5388 size_t count, loff_t *ppos);
5389static int tracing_buffers_release(struct inode *inode, struct file *file);
5390static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5391 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5392
5393static int snapshot_raw_open(struct inode *inode, struct file *filp)
5394{
5395 struct ftrace_buffer_info *info;
5396 int ret;
5397
5398 ret = tracing_buffers_open(inode, filp);
5399 if (ret < 0)
5400 return ret;
5401
5402 info = filp->private_data;
5403
5404 if (info->iter.trace->use_max_tr) {
5405 tracing_buffers_release(inode, filp);
5406 return -EBUSY;
5407 }
5408
5409 info->iter.snapshot = true;
5410 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5411
5412 return ret;
5413}
5414
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005415#endif /* CONFIG_TRACER_SNAPSHOT */
5416
5417
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005418static const struct file_operations tracing_thresh_fops = {
5419 .open = tracing_open_generic,
5420 .read = tracing_thresh_read,
5421 .write = tracing_thresh_write,
5422 .llseek = generic_file_llseek,
5423};
5424
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005425static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005426 .open = tracing_open_generic,
5427 .read = tracing_max_lat_read,
5428 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005429 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005430};
5431
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005432static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005433 .open = tracing_open_generic,
5434 .read = tracing_set_trace_read,
5435 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005436 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005437};
5438
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005439static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005440 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005441 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005442 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005443 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005444 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005445 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005446};
5447
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005448static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005449 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005450 .read = tracing_entries_read,
5451 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005452 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005453 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005454};
5455
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005456static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005457 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005458 .read = tracing_total_entries_read,
5459 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005460 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005461};
5462
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005463static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005464 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005465 .write = tracing_free_buffer_write,
5466 .release = tracing_free_buffer_release,
5467};
5468
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005469static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005470 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005471 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005472 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005473 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005474};
5475
Zhaolei5079f322009-08-25 16:12:56 +08005476static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005477 .open = tracing_clock_open,
5478 .read = seq_read,
5479 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005480 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005481 .write = tracing_clock_write,
5482};
5483
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005484#ifdef CONFIG_TRACER_SNAPSHOT
5485static const struct file_operations snapshot_fops = {
5486 .open = tracing_snapshot_open,
5487 .read = seq_read,
5488 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005489 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005490 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005491};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005492
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005493static const struct file_operations snapshot_raw_fops = {
5494 .open = snapshot_raw_open,
5495 .read = tracing_buffers_read,
5496 .release = tracing_buffers_release,
5497 .splice_read = tracing_buffers_splice_read,
5498 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005499};
5500
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005501#endif /* CONFIG_TRACER_SNAPSHOT */
5502
Steven Rostedt2cadf912008-12-01 22:20:19 -05005503static int tracing_buffers_open(struct inode *inode, struct file *filp)
5504{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005505 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005506 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005507 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005508
5509 if (tracing_disabled)
5510 return -ENODEV;
5511
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005512 if (trace_array_get(tr) < 0)
5513 return -ENODEV;
5514
Steven Rostedt2cadf912008-12-01 22:20:19 -05005515 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005516 if (!info) {
5517 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005518 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005519 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005520
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005521 mutex_lock(&trace_types_lock);
5522
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005523 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005524 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005525 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005526 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005527 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005528 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005529 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005530
5531 filp->private_data = info;
5532
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005533 tr->current_trace->ref++;
5534
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005535 mutex_unlock(&trace_types_lock);
5536
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005537 ret = nonseekable_open(inode, filp);
5538 if (ret < 0)
5539 trace_array_put(tr);
5540
5541 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005542}
5543
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005544static unsigned int
5545tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5546{
5547 struct ftrace_buffer_info *info = filp->private_data;
5548 struct trace_iterator *iter = &info->iter;
5549
5550 return trace_poll(iter, filp, poll_table);
5551}
5552
Steven Rostedt2cadf912008-12-01 22:20:19 -05005553static ssize_t
5554tracing_buffers_read(struct file *filp, char __user *ubuf,
5555 size_t count, loff_t *ppos)
5556{
5557 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005558 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005559 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005560 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005561
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005562 if (!count)
5563 return 0;
5564
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005565#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005566 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5567 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005568#endif
5569
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005570 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005571 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5572 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005573 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005574 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005575
Steven Rostedt2cadf912008-12-01 22:20:19 -05005576 /* Do we have previous read data to read? */
5577 if (info->read < PAGE_SIZE)
5578 goto read;
5579
Steven Rostedtb6273442013-02-28 13:44:11 -05005580 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005581 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005582 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005583 &info->spare,
5584 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005585 iter->cpu_file, 0);
5586 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005587
5588 if (ret < 0) {
5589 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005590 if ((filp->f_flags & O_NONBLOCK))
5591 return -EAGAIN;
5592
Rabin Vincente30f53a2014-11-10 19:46:34 +01005593 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005594 if (ret)
5595 return ret;
5596
Steven Rostedtb6273442013-02-28 13:44:11 -05005597 goto again;
5598 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005599 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005600 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005601
Steven Rostedt436fc282011-10-14 10:44:25 -04005602 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005603 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005604 size = PAGE_SIZE - info->read;
5605 if (size > count)
5606 size = count;
5607
5608 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005609 if (ret == size)
5610 return -EFAULT;
5611
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005612 size -= ret;
5613
Steven Rostedt2cadf912008-12-01 22:20:19 -05005614 *ppos += size;
5615 info->read += size;
5616
5617 return size;
5618}
5619
5620static int tracing_buffers_release(struct inode *inode, struct file *file)
5621{
5622 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005623 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005624
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005625 mutex_lock(&trace_types_lock);
5626
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005627 iter->tr->current_trace->ref--;
5628
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005629 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005630
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005631 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005632 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005633 kfree(info);
5634
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005635 mutex_unlock(&trace_types_lock);
5636
Steven Rostedt2cadf912008-12-01 22:20:19 -05005637 return 0;
5638}
5639
5640struct buffer_ref {
5641 struct ring_buffer *buffer;
5642 void *page;
5643 int ref;
5644};
5645
5646static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5647 struct pipe_buffer *buf)
5648{
5649 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5650
5651 if (--ref->ref)
5652 return;
5653
5654 ring_buffer_free_read_page(ref->buffer, ref->page);
5655 kfree(ref);
5656 buf->private = 0;
5657}
5658
Steven Rostedt2cadf912008-12-01 22:20:19 -05005659static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5660 struct pipe_buffer *buf)
5661{
5662 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5663
5664 ref->ref++;
5665}
5666
5667/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005668static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005669 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005670 .confirm = generic_pipe_buf_confirm,
5671 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005672 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005673 .get = buffer_pipe_buf_get,
5674};
5675
5676/*
5677 * Callback from splice_to_pipe(), if we need to release some pages
5678 * at the end of the spd in case we error'ed out in filling the pipe.
5679 */
5680static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5681{
5682 struct buffer_ref *ref =
5683 (struct buffer_ref *)spd->partial[i].private;
5684
5685 if (--ref->ref)
5686 return;
5687
5688 ring_buffer_free_read_page(ref->buffer, ref->page);
5689 kfree(ref);
5690 spd->partial[i].private = 0;
5691}
5692
5693static ssize_t
5694tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5695 struct pipe_inode_info *pipe, size_t len,
5696 unsigned int flags)
5697{
5698 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005699 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005700 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5701 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005702 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005703 .pages = pages_def,
5704 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005705 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005706 .flags = flags,
5707 .ops = &buffer_pipe_buf_ops,
5708 .spd_release = buffer_spd_release,
5709 };
5710 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005711 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01005712 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005713
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005714#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005715 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5716 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005717#endif
5718
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005719 if (splice_grow_spd(pipe, &spd))
5720 return -ENOMEM;
Jens Axboe35f3d142010-05-20 10:43:18 +02005721
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005722 if (*ppos & (PAGE_SIZE - 1))
5723 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005724
5725 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005726 if (len < PAGE_SIZE)
5727 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005728 len &= PAGE_MASK;
5729 }
5730
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005731 again:
5732 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005733 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005734
Al Viroa786c062014-04-11 12:01:03 -04005735 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005736 struct page *page;
5737 int r;
5738
5739 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01005740 if (!ref) {
5741 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005742 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01005743 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005744
Steven Rostedt7267fa62009-04-29 00:16:21 -04005745 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005746 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005747 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005748 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005749 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005750 kfree(ref);
5751 break;
5752 }
5753
5754 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005755 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005756 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005757 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005758 kfree(ref);
5759 break;
5760 }
5761
5762 /*
5763 * zero out any left over data, this is going to
5764 * user land.
5765 */
5766 size = ring_buffer_page_len(ref->page);
5767 if (size < PAGE_SIZE)
5768 memset(ref->page + size, 0, PAGE_SIZE - size);
5769
5770 page = virt_to_page(ref->page);
5771
5772 spd.pages[i] = page;
5773 spd.partial[i].len = PAGE_SIZE;
5774 spd.partial[i].offset = 0;
5775 spd.partial[i].private = (unsigned long)ref;
5776 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005777 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005778
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005779 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005780 }
5781
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005782 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005783 spd.nr_pages = i;
5784
5785 /* did we read anything? */
5786 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005787 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005788 return ret;
Rabin Vincent07906da2014-11-06 22:26:07 +01005789
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005790 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5791 return -EAGAIN;
5792
Rabin Vincente30f53a2014-11-10 19:46:34 +01005793 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005794 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005795 return ret;
Rabin Vincente30f53a2014-11-10 19:46:34 +01005796
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005797 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005798 }
5799
5800 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005801 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005802
Steven Rostedt2cadf912008-12-01 22:20:19 -05005803 return ret;
5804}
5805
5806static const struct file_operations tracing_buffers_fops = {
5807 .open = tracing_buffers_open,
5808 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005809 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005810 .release = tracing_buffers_release,
5811 .splice_read = tracing_buffers_splice_read,
5812 .llseek = no_llseek,
5813};
5814
Steven Rostedtc8d77182009-04-29 18:03:45 -04005815static ssize_t
5816tracing_stats_read(struct file *filp, char __user *ubuf,
5817 size_t count, loff_t *ppos)
5818{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005819 struct inode *inode = file_inode(filp);
5820 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005821 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005822 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005823 struct trace_seq *s;
5824 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005825 unsigned long long t;
5826 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005827
Li Zefane4f2d102009-06-15 10:57:28 +08005828 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005829 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005830 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005831
5832 trace_seq_init(s);
5833
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005834 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005835 trace_seq_printf(s, "entries: %ld\n", cnt);
5836
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005837 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005838 trace_seq_printf(s, "overrun: %ld\n", cnt);
5839
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005840 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005841 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5842
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005843 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005844 trace_seq_printf(s, "bytes: %ld\n", cnt);
5845
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005846 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005847 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005848 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005849 usec_rem = do_div(t, USEC_PER_SEC);
5850 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5851 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005852
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005853 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005854 usec_rem = do_div(t, USEC_PER_SEC);
5855 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5856 } else {
5857 /* counter or tsc mode for trace_clock */
5858 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005859 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005860
5861 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005862 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005863 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005864
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005865 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005866 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5867
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005868 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005869 trace_seq_printf(s, "read events: %ld\n", cnt);
5870
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005871 count = simple_read_from_buffer(ubuf, count, ppos,
5872 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04005873
5874 kfree(s);
5875
5876 return count;
5877}
5878
5879static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005880 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005881 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005882 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005883 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005884};
5885
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005886#ifdef CONFIG_DYNAMIC_FTRACE
5887
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005888int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005889{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005890 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005891}
5892
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005893static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005894tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005895 size_t cnt, loff_t *ppos)
5896{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005897 static char ftrace_dyn_info_buffer[1024];
5898 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005899 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005900 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005901 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005902 int r;
5903
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005904 mutex_lock(&dyn_info_mutex);
5905 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005906
Steven Rostedta26a2a22008-10-31 00:03:22 -04005907 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005908 buf[r++] = '\n';
5909
5910 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5911
5912 mutex_unlock(&dyn_info_mutex);
5913
5914 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005915}
5916
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005917static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005918 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005919 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005920 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005921};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005922#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005923
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005924#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5925static void
5926ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005927{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005928 tracing_snapshot();
5929}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005930
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005931static void
5932ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5933{
5934 unsigned long *count = (long *)data;
5935
5936 if (!*count)
5937 return;
5938
5939 if (*count != -1)
5940 (*count)--;
5941
5942 tracing_snapshot();
5943}
5944
5945static int
5946ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5947 struct ftrace_probe_ops *ops, void *data)
5948{
5949 long count = (long)data;
5950
5951 seq_printf(m, "%ps:", (void *)ip);
5952
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005953 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005954
5955 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005956 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005957 else
5958 seq_printf(m, ":count=%ld\n", count);
5959
5960 return 0;
5961}
5962
5963static struct ftrace_probe_ops snapshot_probe_ops = {
5964 .func = ftrace_snapshot,
5965 .print = ftrace_snapshot_print,
5966};
5967
5968static struct ftrace_probe_ops snapshot_count_probe_ops = {
5969 .func = ftrace_count_snapshot,
5970 .print = ftrace_snapshot_print,
5971};
5972
5973static int
5974ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
5975 char *glob, char *cmd, char *param, int enable)
5976{
5977 struct ftrace_probe_ops *ops;
5978 void *count = (void *)-1;
5979 char *number;
5980 int ret;
5981
5982 /* hash funcs only work with set_ftrace_filter */
5983 if (!enable)
5984 return -EINVAL;
5985
5986 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
5987
5988 if (glob[0] == '!') {
5989 unregister_ftrace_function_probe_func(glob+1, ops);
5990 return 0;
5991 }
5992
5993 if (!param)
5994 goto out_reg;
5995
5996 number = strsep(&param, ":");
5997
5998 if (!strlen(number))
5999 goto out_reg;
6000
6001 /*
6002 * We use the callback data field (which is a pointer)
6003 * as our counter.
6004 */
6005 ret = kstrtoul(number, 0, (unsigned long *)&count);
6006 if (ret)
6007 return ret;
6008
6009 out_reg:
6010 ret = register_ftrace_function_probe(glob, ops, count);
6011
6012 if (ret >= 0)
6013 alloc_snapshot(&global_trace);
6014
6015 return ret < 0 ? ret : 0;
6016}
6017
6018static struct ftrace_func_command ftrace_snapshot_cmd = {
6019 .name = "snapshot",
6020 .func = ftrace_trace_snapshot_callback,
6021};
6022
Tom Zanussi38de93a2013-10-24 08:34:18 -05006023static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006024{
6025 return register_ftrace_command(&ftrace_snapshot_cmd);
6026}
6027#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006028static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006029#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006030
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006031static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006032{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006033 if (WARN_ON(!tr->dir))
6034 return ERR_PTR(-ENODEV);
6035
6036 /* Top directory uses NULL as the parent */
6037 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6038 return NULL;
6039
6040 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006041 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006042}
6043
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006044static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6045{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006046 struct dentry *d_tracer;
6047
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006048 if (tr->percpu_dir)
6049 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006050
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006051 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006052 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006053 return NULL;
6054
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006055 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006056
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006057 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006058 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006059
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006060 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006061}
6062
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006063static struct dentry *
6064trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6065 void *data, long cpu, const struct file_operations *fops)
6066{
6067 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6068
6069 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006070 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006071 return ret;
6072}
6073
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006074static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006075tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006076{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006077 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006078 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006079 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006080
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006081 if (!d_percpu)
6082 return;
6083
Steven Rostedtdd49a382010-10-20 21:51:26 -04006084 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006085 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006086 if (!d_cpu) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006087 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006088 return;
6089 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006090
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006091 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006092 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006093 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006094
6095 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006096 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006097 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006098
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006099 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006100 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006101
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006102 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006103 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006104
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006105 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006106 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006107
6108#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006109 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006110 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006111
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006112 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006113 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006114#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006115}
6116
Steven Rostedt60a11772008-05-12 21:20:44 +02006117#ifdef CONFIG_FTRACE_SELFTEST
6118/* Let selftest have access to static functions in this file */
6119#include "trace_selftest.c"
6120#endif
6121
Steven Rostedt577b7852009-02-26 23:43:05 -05006122struct trace_option_dentry {
6123 struct tracer_opt *opt;
6124 struct tracer_flags *flags;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006125 struct trace_array *tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006126 struct dentry *entry;
6127};
6128
6129static ssize_t
6130trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6131 loff_t *ppos)
6132{
6133 struct trace_option_dentry *topt = filp->private_data;
6134 char *buf;
6135
6136 if (topt->flags->val & topt->opt->bit)
6137 buf = "1\n";
6138 else
6139 buf = "0\n";
6140
6141 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6142}
6143
6144static ssize_t
6145trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6146 loff_t *ppos)
6147{
6148 struct trace_option_dentry *topt = filp->private_data;
6149 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006150 int ret;
6151
Peter Huewe22fe9b52011-06-07 21:58:27 +02006152 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6153 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006154 return ret;
6155
Li Zefan8d18eaa2009-12-08 11:17:06 +08006156 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006157 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006158
6159 if (!!(topt->flags->val & topt->opt->bit) != val) {
6160 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006161 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006162 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006163 mutex_unlock(&trace_types_lock);
6164 if (ret)
6165 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006166 }
6167
6168 *ppos += cnt;
6169
6170 return cnt;
6171}
6172
6173
6174static const struct file_operations trace_options_fops = {
6175 .open = tracing_open_generic,
6176 .read = trace_options_read,
6177 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006178 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006179};
6180
Steven Rostedta8259072009-02-26 22:19:12 -05006181static ssize_t
6182trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6183 loff_t *ppos)
6184{
6185 long index = (long)filp->private_data;
6186 char *buf;
6187
6188 if (trace_flags & (1 << index))
6189 buf = "1\n";
6190 else
6191 buf = "0\n";
6192
6193 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6194}
6195
6196static ssize_t
6197trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6198 loff_t *ppos)
6199{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006200 struct trace_array *tr = &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006201 long index = (long)filp->private_data;
Steven Rostedta8259072009-02-26 22:19:12 -05006202 unsigned long val;
6203 int ret;
6204
Peter Huewe22fe9b52011-06-07 21:58:27 +02006205 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6206 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006207 return ret;
6208
Zhaoleif2d84b62009-08-07 18:55:48 +08006209 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006210 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006211
6212 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006213 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006214 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006215
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006216 if (ret < 0)
6217 return ret;
6218
Steven Rostedta8259072009-02-26 22:19:12 -05006219 *ppos += cnt;
6220
6221 return cnt;
6222}
6223
Steven Rostedta8259072009-02-26 22:19:12 -05006224static const struct file_operations trace_options_core_fops = {
6225 .open = tracing_open_generic,
6226 .read = trace_options_core_read,
6227 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006228 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006229};
6230
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006231struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006232 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006233 struct dentry *parent,
6234 void *data,
6235 const struct file_operations *fops)
6236{
6237 struct dentry *ret;
6238
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006239 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006240 if (!ret)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006241 pr_warning("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006242
6243 return ret;
6244}
6245
6246
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006247static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006248{
6249 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006250
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006251 if (tr->options)
6252 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006253
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006254 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006255 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006256 return NULL;
6257
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006258 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006259 if (!tr->options) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006260 pr_warning("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006261 return NULL;
6262 }
6263
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006264 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006265}
6266
Steven Rostedt577b7852009-02-26 23:43:05 -05006267static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006268create_trace_option_file(struct trace_array *tr,
6269 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006270 struct tracer_flags *flags,
6271 struct tracer_opt *opt)
6272{
6273 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006274
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006275 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006276 if (!t_options)
6277 return;
6278
6279 topt->flags = flags;
6280 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006281 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006282
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006283 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006284 &trace_options_fops);
6285
Steven Rostedt577b7852009-02-26 23:43:05 -05006286}
6287
6288static struct trace_option_dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006289create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006290{
6291 struct trace_option_dentry *topts;
6292 struct tracer_flags *flags;
6293 struct tracer_opt *opts;
6294 int cnt;
6295
6296 if (!tracer)
6297 return NULL;
6298
6299 flags = tracer->flags;
6300
6301 if (!flags || !flags->opts)
6302 return NULL;
6303
6304 opts = flags->opts;
6305
6306 for (cnt = 0; opts[cnt].name; cnt++)
6307 ;
6308
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006309 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006310 if (!topts)
6311 return NULL;
6312
6313 for (cnt = 0; opts[cnt].name; cnt++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006314 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006315 &opts[cnt]);
6316
6317 return topts;
6318}
6319
6320static void
6321destroy_trace_option_files(struct trace_option_dentry *topts)
6322{
6323 int cnt;
6324
6325 if (!topts)
6326 return;
6327
Fabian Frederick3f4d8f72014-06-26 19:14:31 +02006328 for (cnt = 0; topts[cnt].opt; cnt++)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006329 tracefs_remove(topts[cnt].entry);
Steven Rostedt577b7852009-02-26 23:43:05 -05006330
6331 kfree(topts);
6332}
6333
Steven Rostedta8259072009-02-26 22:19:12 -05006334static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006335create_trace_option_core_file(struct trace_array *tr,
6336 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006337{
6338 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006339
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006340 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006341 if (!t_options)
6342 return NULL;
6343
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006344 return trace_create_file(option, 0644, t_options, (void *)index,
Steven Rostedta8259072009-02-26 22:19:12 -05006345 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006346}
6347
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006348static __init void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006349{
6350 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006351 int i;
6352
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006353 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006354 if (!t_options)
6355 return;
6356
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006357 for (i = 0; trace_options[i]; i++)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006358 create_trace_option_core_file(tr, trace_options[i], i);
Steven Rostedta8259072009-02-26 22:19:12 -05006359}
6360
Steven Rostedt499e5472012-02-22 15:50:28 -05006361static ssize_t
6362rb_simple_read(struct file *filp, char __user *ubuf,
6363 size_t cnt, loff_t *ppos)
6364{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006365 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006366 char buf[64];
6367 int r;
6368
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006369 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006370 r = sprintf(buf, "%d\n", r);
6371
6372 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6373}
6374
6375static ssize_t
6376rb_simple_write(struct file *filp, const char __user *ubuf,
6377 size_t cnt, loff_t *ppos)
6378{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006379 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006380 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006381 unsigned long val;
6382 int ret;
6383
6384 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6385 if (ret)
6386 return ret;
6387
6388 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006389 mutex_lock(&trace_types_lock);
6390 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006391 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006392 if (tr->current_trace->start)
6393 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006394 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006395 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006396 if (tr->current_trace->stop)
6397 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006398 }
6399 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006400 }
6401
6402 (*ppos)++;
6403
6404 return cnt;
6405}
6406
6407static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006408 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006409 .read = rb_simple_read,
6410 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006411 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006412 .llseek = default_llseek,
6413};
6414
Steven Rostedt277ba042012-08-03 16:10:49 -04006415struct dentry *trace_instance_dir;
6416
6417static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006418init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04006419
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006420static int
6421allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006422{
6423 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006424
6425 rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
6426
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006427 buf->tr = tr;
6428
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006429 buf->buffer = ring_buffer_alloc(size, rb_flags);
6430 if (!buf->buffer)
6431 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006432
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006433 buf->data = alloc_percpu(struct trace_array_cpu);
6434 if (!buf->data) {
6435 ring_buffer_free(buf->buffer);
6436 return -ENOMEM;
6437 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006438
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006439 /* Allocate the first page for all buffers */
6440 set_buffer_entries(&tr->trace_buffer,
6441 ring_buffer_size(tr->trace_buffer.buffer, 0));
6442
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006443 return 0;
6444}
6445
6446static int allocate_trace_buffers(struct trace_array *tr, int size)
6447{
6448 int ret;
6449
6450 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6451 if (ret)
6452 return ret;
6453
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006454#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006455 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6456 allocate_snapshot ? size : 1);
6457 if (WARN_ON(ret)) {
6458 ring_buffer_free(tr->trace_buffer.buffer);
6459 free_percpu(tr->trace_buffer.data);
6460 return -ENOMEM;
6461 }
6462 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006463
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006464 /*
6465 * Only the top level trace array gets its snapshot allocated
6466 * from the kernel command line.
6467 */
6468 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006469#endif
6470 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006471}
6472
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006473static void free_trace_buffer(struct trace_buffer *buf)
6474{
6475 if (buf->buffer) {
6476 ring_buffer_free(buf->buffer);
6477 buf->buffer = NULL;
6478 free_percpu(buf->data);
6479 buf->data = NULL;
6480 }
6481}
6482
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006483static void free_trace_buffers(struct trace_array *tr)
6484{
6485 if (!tr)
6486 return;
6487
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006488 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006489
6490#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006491 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006492#endif
6493}
6494
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006495static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006496{
Steven Rostedt277ba042012-08-03 16:10:49 -04006497 struct trace_array *tr;
6498 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006499
6500 mutex_lock(&trace_types_lock);
6501
6502 ret = -EEXIST;
6503 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6504 if (tr->name && strcmp(tr->name, name) == 0)
6505 goto out_unlock;
6506 }
6507
6508 ret = -ENOMEM;
6509 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6510 if (!tr)
6511 goto out_unlock;
6512
6513 tr->name = kstrdup(name, GFP_KERNEL);
6514 if (!tr->name)
6515 goto out_free_tr;
6516
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006517 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6518 goto out_free_tr;
6519
6520 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6521
Steven Rostedt277ba042012-08-03 16:10:49 -04006522 raw_spin_lock_init(&tr->start_lock);
6523
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006524 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6525
Steven Rostedt277ba042012-08-03 16:10:49 -04006526 tr->current_trace = &nop_trace;
6527
6528 INIT_LIST_HEAD(&tr->systems);
6529 INIT_LIST_HEAD(&tr->events);
6530
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006531 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006532 goto out_free_tr;
6533
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006534 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006535 if (!tr->dir)
6536 goto out_free_tr;
6537
6538 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006539 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006540 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006541 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006542 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006543
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006544 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006545
6546 list_add(&tr->list, &ftrace_trace_arrays);
6547
6548 mutex_unlock(&trace_types_lock);
6549
6550 return 0;
6551
6552 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006553 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006554 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006555 kfree(tr->name);
6556 kfree(tr);
6557
6558 out_unlock:
6559 mutex_unlock(&trace_types_lock);
6560
6561 return ret;
6562
6563}
6564
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006565static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006566{
6567 struct trace_array *tr;
6568 int found = 0;
6569 int ret;
6570
6571 mutex_lock(&trace_types_lock);
6572
6573 ret = -ENODEV;
6574 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6575 if (tr->name && strcmp(tr->name, name) == 0) {
6576 found = 1;
6577 break;
6578 }
6579 }
6580 if (!found)
6581 goto out_unlock;
6582
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006583 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006584 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006585 goto out_unlock;
6586
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006587 list_del(&tr->list);
6588
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006589 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006590 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006591 ftrace_destroy_function_files(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006592 debugfs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006593 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006594
6595 kfree(tr->name);
6596 kfree(tr);
6597
6598 ret = 0;
6599
6600 out_unlock:
6601 mutex_unlock(&trace_types_lock);
6602
6603 return ret;
6604}
6605
Steven Rostedt277ba042012-08-03 16:10:49 -04006606static __init void create_trace_instances(struct dentry *d_tracer)
6607{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006608 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6609 instance_mkdir,
6610 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006611 if (WARN_ON(!trace_instance_dir))
6612 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04006613}
6614
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006615static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006616init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006617{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006618 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006619
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006620 trace_create_file("available_tracers", 0444, d_tracer,
6621 tr, &show_traces_fops);
6622
6623 trace_create_file("current_tracer", 0644, d_tracer,
6624 tr, &set_tracer_fops);
6625
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006626 trace_create_file("tracing_cpumask", 0644, d_tracer,
6627 tr, &tracing_cpumask_fops);
6628
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006629 trace_create_file("trace_options", 0644, d_tracer,
6630 tr, &tracing_iter_fops);
6631
6632 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006633 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006634
6635 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006636 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006637
6638 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006639 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006640
6641 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6642 tr, &tracing_total_entries_fops);
6643
Wang YanQing238ae932013-05-26 16:52:01 +08006644 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006645 tr, &tracing_free_buffer_fops);
6646
6647 trace_create_file("trace_marker", 0220, d_tracer,
6648 tr, &tracing_mark_fops);
6649
6650 trace_create_file("trace_clock", 0644, d_tracer, tr,
6651 &trace_clock_fops);
6652
6653 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006654 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006655
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006656#ifdef CONFIG_TRACER_MAX_TRACE
6657 trace_create_file("tracing_max_latency", 0644, d_tracer,
6658 &tr->max_latency, &tracing_max_lat_fops);
6659#endif
6660
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006661 if (ftrace_create_function_files(tr, d_tracer))
6662 WARN(1, "Could not allocate function filter files");
6663
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006664#ifdef CONFIG_TRACER_SNAPSHOT
6665 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006666 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006667#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006668
6669 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006670 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006671
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006672}
6673
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006674static struct vfsmount *trace_automount(void *ingore)
6675{
6676 struct vfsmount *mnt;
6677 struct file_system_type *type;
6678
6679 /*
6680 * To maintain backward compatibility for tools that mount
6681 * debugfs to get to the tracing facility, tracefs is automatically
6682 * mounted to the debugfs/tracing directory.
6683 */
6684 type = get_fs_type("tracefs");
6685 if (!type)
6686 return NULL;
6687 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6688 put_filesystem(type);
6689 if (IS_ERR(mnt))
6690 return NULL;
6691 mntget(mnt);
6692
6693 return mnt;
6694}
6695
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006696/**
6697 * tracing_init_dentry - initialize top level trace array
6698 *
6699 * This is called when creating files or directories in the tracing
6700 * directory. It is called via fs_initcall() by any of the boot up code
6701 * and expects to return the dentry of the top level tracing directory.
6702 */
6703struct dentry *tracing_init_dentry(void)
6704{
6705 struct trace_array *tr = &global_trace;
6706
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006707 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006708 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006709 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006710
6711 if (WARN_ON(!debugfs_initialized()))
6712 return ERR_PTR(-ENODEV);
6713
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006714 /*
6715 * As there may still be users that expect the tracing
6716 * files to exist in debugfs/tracing, we must automount
6717 * the tracefs file system there, so older tools still
6718 * work with the newer kerenl.
6719 */
6720 tr->dir = debugfs_create_automount("tracing", NULL,
6721 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006722 if (!tr->dir) {
6723 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6724 return ERR_PTR(-ENOMEM);
6725 }
6726
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006727 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006728}
6729
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006730extern struct trace_enum_map *__start_ftrace_enum_maps[];
6731extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6732
6733static void __init trace_enum_init(void)
6734{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006735 int len;
6736
6737 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006738 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006739}
6740
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006741#ifdef CONFIG_MODULES
6742static void trace_module_add_enums(struct module *mod)
6743{
6744 if (!mod->num_trace_enums)
6745 return;
6746
6747 /*
6748 * Modules with bad taint do not have events created, do
6749 * not bother with enums either.
6750 */
6751 if (trace_module_has_bad_taint(mod))
6752 return;
6753
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006754 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006755}
6756
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006757#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6758static void trace_module_remove_enums(struct module *mod)
6759{
6760 union trace_enum_map_item *map;
6761 union trace_enum_map_item **last = &trace_enum_maps;
6762
6763 if (!mod->num_trace_enums)
6764 return;
6765
6766 mutex_lock(&trace_enum_mutex);
6767
6768 map = trace_enum_maps;
6769
6770 while (map) {
6771 if (map->head.mod == mod)
6772 break;
6773 map = trace_enum_jmp_to_tail(map);
6774 last = &map->tail.next;
6775 map = map->tail.next;
6776 }
6777 if (!map)
6778 goto out;
6779
6780 *last = trace_enum_jmp_to_tail(map)->tail.next;
6781 kfree(map);
6782 out:
6783 mutex_unlock(&trace_enum_mutex);
6784}
6785#else
6786static inline void trace_module_remove_enums(struct module *mod) { }
6787#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6788
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006789static int trace_module_notify(struct notifier_block *self,
6790 unsigned long val, void *data)
6791{
6792 struct module *mod = data;
6793
6794 switch (val) {
6795 case MODULE_STATE_COMING:
6796 trace_module_add_enums(mod);
6797 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006798 case MODULE_STATE_GOING:
6799 trace_module_remove_enums(mod);
6800 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006801 }
6802
6803 return 0;
6804}
6805
6806static struct notifier_block trace_module_nb = {
6807 .notifier_call = trace_module_notify,
6808 .priority = 0,
6809};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006810#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006811
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006812static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006813{
6814 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006815
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006816 trace_access_lock_init();
6817
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006818 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006819 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006820 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006821
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006822 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006823
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006824 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006825 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006826
Li Zefan339ae5d2009-04-17 10:34:30 +08006827 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006828 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006829
Avadh Patel69abe6a2009-04-10 16:04:48 -04006830 trace_create_file("saved_cmdlines", 0444, d_tracer,
6831 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006832
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006833 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6834 NULL, &tracing_saved_cmdlines_size_fops);
6835
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006836 trace_enum_init();
6837
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006838 trace_create_enum_file(d_tracer);
6839
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006840#ifdef CONFIG_MODULES
6841 register_module_notifier(&trace_module_nb);
6842#endif
6843
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006844#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006845 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6846 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006847#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006848
Steven Rostedt277ba042012-08-03 16:10:49 -04006849 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006850
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006851 create_trace_options_dir(&global_trace);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006852
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006853 /* If the tracer was started via cmdline, create options for it here */
6854 if (global_trace.current_trace != &nop_trace)
6855 update_tracer_options(&global_trace, global_trace.current_trace);
6856
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006857 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006858}
6859
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006860static int trace_panic_handler(struct notifier_block *this,
6861 unsigned long event, void *unused)
6862{
Steven Rostedt944ac422008-10-23 19:26:08 -04006863 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006864 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006865 return NOTIFY_OK;
6866}
6867
6868static struct notifier_block trace_panic_notifier = {
6869 .notifier_call = trace_panic_handler,
6870 .next = NULL,
6871 .priority = 150 /* priority: INT_MAX >= x >= 0 */
6872};
6873
6874static int trace_die_handler(struct notifier_block *self,
6875 unsigned long val,
6876 void *data)
6877{
6878 switch (val) {
6879 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04006880 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006881 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006882 break;
6883 default:
6884 break;
6885 }
6886 return NOTIFY_OK;
6887}
6888
6889static struct notifier_block trace_die_notifier = {
6890 .notifier_call = trace_die_handler,
6891 .priority = 200
6892};
6893
6894/*
6895 * printk is set to max of 1024, we really don't need it that big.
6896 * Nothing should be printing 1000 characters anyway.
6897 */
6898#define TRACE_MAX_PRINT 1000
6899
6900/*
6901 * Define here KERN_TRACE so that we have one place to modify
6902 * it if we decide to change what log level the ftrace dump
6903 * should be at.
6904 */
Steven Rostedt428aee12009-01-14 12:24:42 -05006905#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006906
Jason Wessel955b61e2010-08-05 09:22:23 -05006907void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006908trace_printk_seq(struct trace_seq *s)
6909{
6910 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006911 if (s->seq.len >= TRACE_MAX_PRINT)
6912 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006913
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05006914 /*
6915 * More paranoid code. Although the buffer size is set to
6916 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
6917 * an extra layer of protection.
6918 */
6919 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
6920 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006921
6922 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04006923 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006924
6925 printk(KERN_TRACE "%s", s->buffer);
6926
Steven Rostedtf9520752009-03-02 14:04:40 -05006927 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006928}
6929
Jason Wessel955b61e2010-08-05 09:22:23 -05006930void trace_init_global_iter(struct trace_iterator *iter)
6931{
6932 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006933 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05006934 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006935 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07006936
6937 if (iter->trace && iter->trace->open)
6938 iter->trace->open(iter);
6939
6940 /* Annotate start of buffers if we had overruns */
6941 if (ring_buffer_overruns(iter->trace_buffer->buffer))
6942 iter->iter_flags |= TRACE_FILE_ANNOTATE;
6943
6944 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
6945 if (trace_clocks[iter->tr->clock_id].in_ns)
6946 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05006947}
6948
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006949void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006950{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006951 /* use static because iter can be a bit big for the stack */
6952 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006953 static atomic_t dump_running;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006954 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04006955 unsigned long flags;
6956 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006957
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006958 /* Only allow one dump user at a time. */
6959 if (atomic_inc_return(&dump_running) != 1) {
6960 atomic_dec(&dump_running);
6961 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04006962 }
6963
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04006964 /*
6965 * Always turn off tracing when we dump.
6966 * We don't need to show trace output of what happens
6967 * between multiple crashes.
6968 *
6969 * If the user does a sysrq-z, then they can re-enable
6970 * tracing with echo 1 > tracing_on.
6971 */
6972 tracing_off();
6973
6974 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04006975
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08006976 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05006977 trace_init_global_iter(&iter);
6978
Steven Rostedtd7690412008-10-01 00:29:53 -04006979 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05306980 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04006981 }
6982
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01006983 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
6984
Török Edwinb54d3de2008-11-22 13:28:48 +02006985 /* don't look at user memory in panic mode */
6986 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
6987
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006988 switch (oops_dump_mode) {
6989 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05006990 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02006991 break;
6992 case DUMP_ORIG:
6993 iter.cpu_file = raw_smp_processor_id();
6994 break;
6995 case DUMP_NONE:
6996 goto out_enable;
6997 default:
6998 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05006999 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007000 }
7001
7002 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007003
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007004 /* Did function tracer already get disabled? */
7005 if (ftrace_is_dead()) {
7006 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7007 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7008 }
7009
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007010 /*
7011 * We need to stop all tracing on all CPUS to read the
7012 * the next buffer. This is a bit expensive, but is
7013 * not done often. We fill all what we can read,
7014 * and then release the locks again.
7015 */
7016
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007017 while (!trace_empty(&iter)) {
7018
7019 if (!cnt)
7020 printk(KERN_TRACE "---------------------------------\n");
7021
7022 cnt++;
7023
7024 /* reset all but tr, trace, and overruns */
7025 memset(&iter.seq, 0,
7026 sizeof(struct trace_iterator) -
7027 offsetof(struct trace_iterator, seq));
7028 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7029 iter.pos = -1;
7030
Jason Wessel955b61e2010-08-05 09:22:23 -05007031 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007032 int ret;
7033
7034 ret = print_trace_line(&iter);
7035 if (ret != TRACE_TYPE_NO_CONSUME)
7036 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007037 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007038 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007039
7040 trace_printk_seq(&iter.seq);
7041 }
7042
7043 if (!cnt)
7044 printk(KERN_TRACE " (ftrace buffer empty)\n");
7045 else
7046 printk(KERN_TRACE "---------------------------------\n");
7047
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007048 out_enable:
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007049 trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007050
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007051 for_each_tracing_cpu(cpu) {
7052 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007053 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007054 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007055 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007056}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007057EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007058
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007059__init static int tracer_alloc_buffers(void)
7060{
Steven Rostedt73c51622009-03-11 13:42:01 -04007061 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307062 int ret = -ENOMEM;
7063
7064 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7065 goto out;
7066
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007067 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307068 goto out_free_buffer_mask;
7069
Steven Rostedt07d777f2011-09-22 14:01:55 -04007070 /* Only allocate trace_printk buffers if a trace_printk exists */
7071 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007072 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007073 trace_printk_init_buffers();
7074
Steven Rostedt73c51622009-03-11 13:42:01 -04007075 /* To save memory, keep the ring buffer size to its minimum */
7076 if (ring_buffer_expanded)
7077 ring_buf_size = trace_buf_size;
7078 else
7079 ring_buf_size = 1;
7080
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307081 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007082 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007083
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007084 raw_spin_lock_init(&global_trace.start_lock);
7085
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007086 /* Used for event triggers */
7087 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7088 if (!temp_buffer)
7089 goto out_free_cpumask;
7090
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007091 if (trace_create_savedcmd() < 0)
7092 goto out_free_temp_buffer;
7093
Steven Rostedtab464282008-05-12 21:21:00 +02007094 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007095 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007096 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7097 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007098 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007099 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007100
Steven Rostedt499e5472012-02-22 15:50:28 -05007101 if (global_trace.buffer_disabled)
7102 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007103
Steven Rostedte1e232c2014-02-10 23:38:46 -05007104 if (trace_boot_clock) {
7105 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7106 if (ret < 0)
7107 pr_warning("Trace clock %s not defined, going back to default\n",
7108 trace_boot_clock);
7109 }
7110
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007111 /*
7112 * register_tracer() might reference current_trace, so it
7113 * needs to be set before we register anything. This is
7114 * just a bootstrap of current_trace anyway.
7115 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007116 global_trace.current_trace = &nop_trace;
7117
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007118 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7119
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007120 ftrace_init_global_array_ops(&global_trace);
7121
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007122 register_tracer(&nop_trace);
7123
Steven Rostedt60a11772008-05-12 21:20:44 +02007124 /* All seems OK, enable tracing */
7125 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007126
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007127 atomic_notifier_chain_register(&panic_notifier_list,
7128 &trace_panic_notifier);
7129
7130 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007131
Steven Rostedtae63b312012-05-03 23:09:03 -04007132 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7133
7134 INIT_LIST_HEAD(&global_trace.systems);
7135 INIT_LIST_HEAD(&global_trace.events);
7136 list_add(&global_trace.list, &ftrace_trace_arrays);
7137
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007138 while (trace_boot_options) {
7139 char *option;
7140
7141 option = strsep(&trace_boot_options, ",");
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007142 trace_set_options(&global_trace, option);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007143 }
7144
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007145 register_snapshot_cmd();
7146
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007147 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007148
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007149out_free_savedcmd:
7150 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007151out_free_temp_buffer:
7152 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307153out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007154 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307155out_free_buffer_mask:
7156 free_cpumask_var(tracing_buffer_mask);
7157out:
7158 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007159}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007160
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007161void __init trace_init(void)
7162{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007163 if (tracepoint_printk) {
7164 tracepoint_print_iter =
7165 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7166 if (WARN_ON(!tracepoint_print_iter))
7167 tracepoint_printk = 0;
7168 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007169 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007170 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007171}
7172
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007173__init static int clear_boot_tracer(void)
7174{
7175 /*
7176 * The default tracer at boot buffer is an init section.
7177 * This function is called in lateinit. If we did not
7178 * find the boot tracer, then clear it out, to prevent
7179 * later registration from accessing the buffer that is
7180 * about to be freed.
7181 */
7182 if (!default_bootup_tracer)
7183 return 0;
7184
7185 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7186 default_bootup_tracer);
7187 default_bootup_tracer = NULL;
7188
7189 return 0;
7190}
7191
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007192fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007193late_initcall(clear_boot_tracer);