blob: de95fcfc6865be623d9dfc4ac7b963bcf640289a [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050028#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050046#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020047
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010048/*
Steven Rostedt73c51622009-03-11 13:42:01 -040049 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050052bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040053
54/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010056 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010058 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * at the same time, giving false positive or negative results.
60 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010061static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062
Steven Rostedtb2821ae2009-02-02 21:38:32 -050063/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
Li Zefan020e5f82009-07-01 10:47:05 +080066bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050068/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010072/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050082static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010084{
85 return 0;
86}
Steven Rostedt0f048702008-11-05 16:05:44 -050087
88/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040089 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
95/*
Steven Rostedt0f048702008-11-05 16:05:44 -050096 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100101static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500102
Jason Wessel955b61e2010-08-05 09:22:23 -0500103cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200104
Steven Rostedt944ac422008-10-23 19:26:08 -0400105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400119 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200120
121enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400122
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400126#ifdef CONFIG_TRACE_ENUM_MAP_FILE
127/* Map of enums to their values, for "enum_map" file */
128struct trace_enum_map_head {
129 struct module *mod;
130 unsigned long length;
131};
132
133union trace_enum_map_item;
134
135struct trace_enum_map_tail {
136 /*
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "enum_string"
139 */
140 union trace_enum_map_item *next;
141 const char *end; /* points to NULL */
142};
143
144static DEFINE_MUTEX(trace_enum_mutex);
145
146/*
147 * The trace_enum_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved enum_map items.
152 */
153union trace_enum_map_item {
154 struct trace_enum_map map;
155 struct trace_enum_map_head head;
156 struct trace_enum_map_tail tail;
157};
158
159static union trace_enum_map_item *trace_enum_maps;
160#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
161
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500162static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163
Li Zefanee6c2c12009-09-18 14:06:47 +0800164#define MAX_TRACER_SIZE 100
165static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500166static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100167
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500168static bool allocate_snapshot;
169
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200170static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100171{
Chen Gang67012ab2013-04-08 12:06:44 +0800172 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500173 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400174 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500175 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176 return 1;
177}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200178__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100179
Steven Rostedt944ac422008-10-23 19:26:08 -0400180static int __init set_ftrace_dump_on_oops(char *str)
181{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200182 if (*str++ != '=' || !*str) {
183 ftrace_dump_on_oops = DUMP_ALL;
184 return 1;
185 }
186
187 if (!strcmp("orig_cpu", str)) {
188 ftrace_dump_on_oops = DUMP_ORIG;
189 return 1;
190 }
191
192 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400193}
194__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200195
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400196static int __init stop_trace_on_warning(char *str)
197{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200198 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
199 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200 return 1;
201}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200202__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400203
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400204static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500205{
206 allocate_snapshot = true;
207 /* We also need the main ring buffer expanded */
208 ring_buffer_expanded = true;
209 return 1;
210}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400211__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500212
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400213
214static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400215
216static int __init set_trace_boot_options(char *str)
217{
Chen Gang67012ab2013-04-08 12:06:44 +0800218 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400219 return 0;
220}
221__setup("trace_options=", set_trace_boot_options);
222
Steven Rostedte1e232c2014-02-10 23:38:46 -0500223static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
224static char *trace_boot_clock __initdata;
225
226static int __init set_trace_boot_clock(char *str)
227{
228 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
229 trace_boot_clock = trace_boot_clock_buf;
230 return 0;
231}
232__setup("trace_clock=", set_trace_boot_clock);
233
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500234static int __init set_tracepoint_printk(char *str)
235{
236 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
237 tracepoint_printk = 1;
238 return 1;
239}
240__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400241
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800242unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200243{
244 nsec += 500;
245 do_div(nsec, 1000);
246 return nsec;
247}
248
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400249/* trace_flags holds trace_options default values */
250#define TRACE_DEFAULT_FLAGS \
251 (FUNCTION_DEFAULT_FLAGS | \
252 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
253 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
254 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
255 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
256
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400257/* trace_options that are only supported by global_trace */
258#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
259 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
260
261
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200262/*
263 * The global_trace is the descriptor that holds the tracing
264 * buffers for the live tracing. For each CPU, it contains
265 * a link list of pages that will store trace entries. The
266 * page descriptor of the pages in the memory is used to hold
267 * the link list by linking the lru item in the page descriptor
268 * to each of the pages in the buffer per CPU.
269 *
270 * For each active CPU there is a data field that holds the
271 * pages for the buffer for that CPU. Each CPU has the same number
272 * of pages allocated for its buffer.
273 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400274static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
276};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedtae63b312012-05-03 23:09:03 -0400278LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200279
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400280int trace_array_get(struct trace_array *this_tr)
281{
282 struct trace_array *tr;
283 int ret = -ENODEV;
284
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 if (tr == this_tr) {
288 tr->ref++;
289 ret = 0;
290 break;
291 }
292 }
293 mutex_unlock(&trace_types_lock);
294
295 return ret;
296}
297
298static void __trace_array_put(struct trace_array *this_tr)
299{
300 WARN_ON(!this_tr->ref);
301 this_tr->ref--;
302}
303
304void trace_array_put(struct trace_array *this_tr)
305{
306 mutex_lock(&trace_types_lock);
307 __trace_array_put(this_tr);
308 mutex_unlock(&trace_types_lock);
309}
310
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400311int filter_check_discard(struct trace_event_file *file, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500312 struct ring_buffer *buffer,
313 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500314{
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400315 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
Tom Zanussif306cc82013-10-24 08:34:17 -0500316 !filter_match_preds(file->filter, rec)) {
317 ring_buffer_discard_commit(buffer, event);
318 return 1;
319 }
320
321 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500322}
Tom Zanussif306cc82013-10-24 08:34:17 -0500323EXPORT_SYMBOL_GPL(filter_check_discard);
324
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400325int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
328{
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
331 ring_buffer_discard_commit(buffer, event);
332 return 1;
333 }
334
335 return 0;
336}
337EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500338
Fabian Frederickad1438a2014-04-17 21:44:42 +0200339static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400340{
341 u64 ts;
342
343 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700344 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400345 return trace_clock_local();
346
Alexander Z Lam94571582013-08-02 18:36:16 -0700347 ts = ring_buffer_time_stamp(buf->buffer, cpu);
348 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400349
350 return ts;
351}
352
Alexander Z Lam94571582013-08-02 18:36:16 -0700353cycle_t ftrace_now(int cpu)
354{
355 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
356}
357
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400358/**
359 * tracing_is_enabled - Show if global_trace has been disabled
360 *
361 * Shows if the global trace has been enabled or not. It uses the
362 * mirror flag "buffer_disabled" to be used in fast paths such as for
363 * the irqsoff tracer. But it may be inaccurate due to races. If you
364 * need to know the accurate state, use tracing_is_on() which is a little
365 * slower, but accurate.
366 */
Steven Rostedt90369902008-11-05 16:05:44 -0500367int tracing_is_enabled(void)
368{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400369 /*
370 * For quick access (irqsoff uses this in fast path), just
371 * return the mirror variable of the state of the ring buffer.
372 * It's a little racy, but we don't really care.
373 */
374 smp_rmb();
375 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500376}
377
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200378/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400379 * trace_buf_size is the size in bytes that is allocated
380 * for a buffer. Note, the number of bytes is always rounded
381 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400382 *
383 * This number is purposely set to a low number of 16384.
384 * If the dump on oops happens, it will be much appreciated
385 * to not have to wait for all that output. Anyway this can be
386 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200387 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400388#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400389
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400390static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200391
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200392/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200393static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200394
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200395/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200396 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200397 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700398DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200399
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800400/*
401 * serialize the access of the ring buffer
402 *
403 * ring buffer serializes readers, but it is low level protection.
404 * The validity of the events (which returns by ring_buffer_peek() ..etc)
405 * are not protected by ring buffer.
406 *
407 * The content of events may become garbage if we allow other process consumes
408 * these events concurrently:
409 * A) the page of the consumed events may become a normal page
410 * (not reader page) in ring buffer, and this page will be rewrited
411 * by events producer.
412 * B) The page of the consumed events may become a page for splice_read,
413 * and this page will be returned to system.
414 *
415 * These primitives allow multi process access to different cpu ring buffer
416 * concurrently.
417 *
418 * These primitives don't distinguish read-only and read-consume access.
419 * Multi read-only access are also serialized.
420 */
421
422#ifdef CONFIG_SMP
423static DECLARE_RWSEM(all_cpu_access_lock);
424static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
425
426static inline void trace_access_lock(int cpu)
427{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500428 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800429 /* gain it for accessing the whole ring buffer. */
430 down_write(&all_cpu_access_lock);
431 } else {
432 /* gain it for accessing a cpu ring buffer. */
433
Steven Rostedtae3b5092013-01-23 15:22:59 -0500434 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800435 down_read(&all_cpu_access_lock);
436
437 /* Secondly block other access to this @cpu ring buffer. */
438 mutex_lock(&per_cpu(cpu_access_lock, cpu));
439 }
440}
441
442static inline void trace_access_unlock(int cpu)
443{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500444 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800445 up_write(&all_cpu_access_lock);
446 } else {
447 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
448 up_read(&all_cpu_access_lock);
449 }
450}
451
452static inline void trace_access_lock_init(void)
453{
454 int cpu;
455
456 for_each_possible_cpu(cpu)
457 mutex_init(&per_cpu(cpu_access_lock, cpu));
458}
459
460#else
461
462static DEFINE_MUTEX(access_lock);
463
464static inline void trace_access_lock(int cpu)
465{
466 (void)cpu;
467 mutex_lock(&access_lock);
468}
469
470static inline void trace_access_unlock(int cpu)
471{
472 (void)cpu;
473 mutex_unlock(&access_lock);
474}
475
476static inline void trace_access_lock_init(void)
477{
478}
479
480#endif
481
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400482#ifdef CONFIG_STACKTRACE
483static void __ftrace_trace_stack(struct ring_buffer *buffer,
484 unsigned long flags,
485 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400486static inline void ftrace_trace_stack(struct trace_array *tr,
487 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400488 unsigned long flags,
489 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400490
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400491#else
492static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
493 unsigned long flags,
494 int skip, int pc, struct pt_regs *regs)
495{
496}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400497static inline void ftrace_trace_stack(struct trace_array *tr,
498 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400499 unsigned long flags,
500 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400501{
502}
503
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400504#endif
505
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400506static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400507{
508 if (tr->trace_buffer.buffer)
509 ring_buffer_record_on(tr->trace_buffer.buffer);
510 /*
511 * This flag is looked at when buffers haven't been allocated
512 * yet, or by some tracers (like irqsoff), that just want to
513 * know if the ring buffer has been disabled, but it can handle
514 * races of where it gets disabled but we still do a record.
515 * As the check is in the fast path of the tracers, it is more
516 * important to be fast than accurate.
517 */
518 tr->buffer_disabled = 0;
519 /* Make the flag seen by readers */
520 smp_wmb();
521}
522
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200523/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500524 * tracing_on - enable tracing buffers
525 *
526 * This function enables tracing buffers that may have been
527 * disabled with tracing_off.
528 */
529void tracing_on(void)
530{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400531 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500532}
533EXPORT_SYMBOL_GPL(tracing_on);
534
535/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500536 * __trace_puts - write a constant string into the trace buffer.
537 * @ip: The address of the caller
538 * @str: The constant string to write
539 * @size: The size of the string.
540 */
541int __trace_puts(unsigned long ip, const char *str, int size)
542{
543 struct ring_buffer_event *event;
544 struct ring_buffer *buffer;
545 struct print_entry *entry;
546 unsigned long irq_flags;
547 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800548 int pc;
549
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400550 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800551 return 0;
552
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800553 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500554
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500555 if (unlikely(tracing_selftest_running || tracing_disabled))
556 return 0;
557
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500558 alloc = sizeof(*entry) + size + 2; /* possible \n added */
559
560 local_save_flags(irq_flags);
561 buffer = global_trace.trace_buffer.buffer;
562 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800563 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500564 if (!event)
565 return 0;
566
567 entry = ring_buffer_event_data(event);
568 entry->ip = ip;
569
570 memcpy(&entry->buf, str, size);
571
572 /* Add a newline if necessary */
573 if (entry->buf[size - 1] != '\n') {
574 entry->buf[size] = '\n';
575 entry->buf[size + 1] = '\0';
576 } else
577 entry->buf[size] = '\0';
578
579 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400580 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500581
582 return size;
583}
584EXPORT_SYMBOL_GPL(__trace_puts);
585
586/**
587 * __trace_bputs - write the pointer to a constant string into trace buffer
588 * @ip: The address of the caller
589 * @str: The constant string to write to the buffer to
590 */
591int __trace_bputs(unsigned long ip, const char *str)
592{
593 struct ring_buffer_event *event;
594 struct ring_buffer *buffer;
595 struct bputs_entry *entry;
596 unsigned long irq_flags;
597 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800598 int pc;
599
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400600 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800601 return 0;
602
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800603 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500604
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500605 if (unlikely(tracing_selftest_running || tracing_disabled))
606 return 0;
607
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500608 local_save_flags(irq_flags);
609 buffer = global_trace.trace_buffer.buffer;
610 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800611 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500612 if (!event)
613 return 0;
614
615 entry = ring_buffer_event_data(event);
616 entry->ip = ip;
617 entry->str = str;
618
619 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400620 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500621
622 return 1;
623}
624EXPORT_SYMBOL_GPL(__trace_bputs);
625
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500626#ifdef CONFIG_TRACER_SNAPSHOT
627/**
628 * trace_snapshot - take a snapshot of the current buffer.
629 *
630 * This causes a swap between the snapshot buffer and the current live
631 * tracing buffer. You can use this to take snapshots of the live
632 * trace when some condition is triggered, but continue to trace.
633 *
634 * Note, make sure to allocate the snapshot with either
635 * a tracing_snapshot_alloc(), or by doing it manually
636 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
637 *
638 * If the snapshot buffer is not allocated, it will stop tracing.
639 * Basically making a permanent snapshot.
640 */
641void tracing_snapshot(void)
642{
643 struct trace_array *tr = &global_trace;
644 struct tracer *tracer = tr->current_trace;
645 unsigned long flags;
646
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500647 if (in_nmi()) {
648 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
649 internal_trace_puts("*** snapshot is being ignored ***\n");
650 return;
651 }
652
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500653 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500654 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
655 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500656 tracing_off();
657 return;
658 }
659
660 /* Note, snapshot can not be used when the tracer uses it */
661 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500662 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
663 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 return;
665 }
666
667 local_irq_save(flags);
668 update_max_tr(tr, current, smp_processor_id());
669 local_irq_restore(flags);
670}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500671EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500672
673static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
674 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400675static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
676
677static int alloc_snapshot(struct trace_array *tr)
678{
679 int ret;
680
681 if (!tr->allocated_snapshot) {
682
683 /* allocate spare buffer */
684 ret = resize_buffer_duplicate_size(&tr->max_buffer,
685 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
686 if (ret < 0)
687 return ret;
688
689 tr->allocated_snapshot = true;
690 }
691
692 return 0;
693}
694
Fabian Frederickad1438a2014-04-17 21:44:42 +0200695static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400696{
697 /*
698 * We don't free the ring buffer. instead, resize it because
699 * The max_tr ring buffer has some state (e.g. ring->clock) and
700 * we want preserve it.
701 */
702 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
703 set_buffer_entries(&tr->max_buffer, 1);
704 tracing_reset_online_cpus(&tr->max_buffer);
705 tr->allocated_snapshot = false;
706}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500707
708/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500709 * tracing_alloc_snapshot - allocate snapshot buffer.
710 *
711 * This only allocates the snapshot buffer if it isn't already
712 * allocated - it doesn't also take a snapshot.
713 *
714 * This is meant to be used in cases where the snapshot buffer needs
715 * to be set up for events that can't sleep but need to be able to
716 * trigger a snapshot.
717 */
718int tracing_alloc_snapshot(void)
719{
720 struct trace_array *tr = &global_trace;
721 int ret;
722
723 ret = alloc_snapshot(tr);
724 WARN_ON(ret < 0);
725
726 return ret;
727}
728EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
729
730/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500731 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
732 *
733 * This is similar to trace_snapshot(), but it will allocate the
734 * snapshot buffer if it isn't already allocated. Use this only
735 * where it is safe to sleep, as the allocation may sleep.
736 *
737 * This causes a swap between the snapshot buffer and the current live
738 * tracing buffer. You can use this to take snapshots of the live
739 * trace when some condition is triggered, but continue to trace.
740 */
741void tracing_snapshot_alloc(void)
742{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500743 int ret;
744
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500745 ret = tracing_alloc_snapshot();
746 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400747 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500748
749 tracing_snapshot();
750}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500751EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500752#else
753void tracing_snapshot(void)
754{
755 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
756}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500757EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500758int tracing_alloc_snapshot(void)
759{
760 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
761 return -ENODEV;
762}
763EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500764void tracing_snapshot_alloc(void)
765{
766 /* Give warning */
767 tracing_snapshot();
768}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500769EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500770#endif /* CONFIG_TRACER_SNAPSHOT */
771
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400772static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400773{
774 if (tr->trace_buffer.buffer)
775 ring_buffer_record_off(tr->trace_buffer.buffer);
776 /*
777 * This flag is looked at when buffers haven't been allocated
778 * yet, or by some tracers (like irqsoff), that just want to
779 * know if the ring buffer has been disabled, but it can handle
780 * races of where it gets disabled but we still do a record.
781 * As the check is in the fast path of the tracers, it is more
782 * important to be fast than accurate.
783 */
784 tr->buffer_disabled = 1;
785 /* Make the flag seen by readers */
786 smp_wmb();
787}
788
Steven Rostedt499e5472012-02-22 15:50:28 -0500789/**
790 * tracing_off - turn off tracing buffers
791 *
792 * This function stops the tracing buffers from recording data.
793 * It does not disable any overhead the tracers themselves may
794 * be causing. This function simply causes all recording to
795 * the ring buffers to fail.
796 */
797void tracing_off(void)
798{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400799 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500800}
801EXPORT_SYMBOL_GPL(tracing_off);
802
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400803void disable_trace_on_warning(void)
804{
805 if (__disable_trace_on_warning)
806 tracing_off();
807}
808
Steven Rostedt499e5472012-02-22 15:50:28 -0500809/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400810 * tracer_tracing_is_on - show real state of ring buffer enabled
811 * @tr : the trace array to know if ring buffer is enabled
812 *
813 * Shows real state of the ring buffer if it is enabled or not.
814 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400815static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400816{
817 if (tr->trace_buffer.buffer)
818 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
819 return !tr->buffer_disabled;
820}
821
Steven Rostedt499e5472012-02-22 15:50:28 -0500822/**
823 * tracing_is_on - show state of ring buffers enabled
824 */
825int tracing_is_on(void)
826{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400827 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500828}
829EXPORT_SYMBOL_GPL(tracing_is_on);
830
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400831static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200832{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400833 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200834
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200835 if (!str)
836 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800837 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200838 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800839 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200840 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400841 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200842 return 1;
843}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400844__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200845
Tim Bird0e950172010-02-25 15:36:43 -0800846static int __init set_tracing_thresh(char *str)
847{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800848 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800849 int ret;
850
851 if (!str)
852 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200853 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800854 if (ret < 0)
855 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800856 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800857 return 1;
858}
859__setup("tracing_thresh=", set_tracing_thresh);
860
Steven Rostedt57f50be2008-05-12 21:20:44 +0200861unsigned long nsecs_to_usecs(unsigned long nsecs)
862{
863 return nsecs / 1000;
864}
865
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -0400866/*
867 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
868 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
869 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
870 * of strings in the order that the enums were defined.
871 */
872#undef C
873#define C(a, b) b
874
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200875/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200876static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -0400877 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200878 NULL
879};
880
Zhaolei5079f322009-08-25 16:12:56 +0800881static struct {
882 u64 (*func)(void);
883 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800884 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800885} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000886 { trace_clock_local, "local", 1 },
887 { trace_clock_global, "global", 1 },
888 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700889 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000890 { trace_clock, "perf", 1 },
891 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -0700892 { ktime_get_raw_fast_ns, "mono_raw", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800893 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800894};
895
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200896/*
897 * trace_parser_get_init - gets the buffer for trace parser
898 */
899int trace_parser_get_init(struct trace_parser *parser, int size)
900{
901 memset(parser, 0, sizeof(*parser));
902
903 parser->buffer = kmalloc(size, GFP_KERNEL);
904 if (!parser->buffer)
905 return 1;
906
907 parser->size = size;
908 return 0;
909}
910
911/*
912 * trace_parser_put - frees the buffer for trace parser
913 */
914void trace_parser_put(struct trace_parser *parser)
915{
916 kfree(parser->buffer);
917}
918
919/*
920 * trace_get_user - reads the user input string separated by space
921 * (matched by isspace(ch))
922 *
923 * For each string found the 'struct trace_parser' is updated,
924 * and the function returns.
925 *
926 * Returns number of bytes read.
927 *
928 * See kernel/trace/trace.h for 'struct trace_parser' details.
929 */
930int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
931 size_t cnt, loff_t *ppos)
932{
933 char ch;
934 size_t read = 0;
935 ssize_t ret;
936
937 if (!*ppos)
938 trace_parser_clear(parser);
939
940 ret = get_user(ch, ubuf++);
941 if (ret)
942 goto out;
943
944 read++;
945 cnt--;
946
947 /*
948 * The parser is not finished with the last write,
949 * continue reading the user input without skipping spaces.
950 */
951 if (!parser->cont) {
952 /* skip white space */
953 while (cnt && isspace(ch)) {
954 ret = get_user(ch, ubuf++);
955 if (ret)
956 goto out;
957 read++;
958 cnt--;
959 }
960
961 /* only spaces were written */
962 if (isspace(ch)) {
963 *ppos += read;
964 ret = read;
965 goto out;
966 }
967
968 parser->idx = 0;
969 }
970
971 /* read the non-space input */
972 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800973 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200974 parser->buffer[parser->idx++] = ch;
975 else {
976 ret = -EINVAL;
977 goto out;
978 }
979 ret = get_user(ch, ubuf++);
980 if (ret)
981 goto out;
982 read++;
983 cnt--;
984 }
985
986 /* We either got finished input or we have to wait for another call. */
987 if (isspace(ch)) {
988 parser->buffer[parser->idx] = 0;
989 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400990 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200991 parser->cont = true;
992 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400993 } else {
994 ret = -EINVAL;
995 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200996 }
997
998 *ppos += read;
999 ret = read;
1000
1001out:
1002 return ret;
1003}
1004
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001005/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001006static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001007{
1008 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001009
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001010 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001011 return -EBUSY;
1012
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001013 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001014 if (cnt > len)
1015 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001016 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001017
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001018 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001019 return cnt;
1020}
1021
Tim Bird0e950172010-02-25 15:36:43 -08001022unsigned long __read_mostly tracing_thresh;
1023
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001024#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001025/*
1026 * Copy the new maximum trace into the separate maximum-trace
1027 * structure. (this way the maximum trace is permanently saved,
1028 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1029 */
1030static void
1031__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1032{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001033 struct trace_buffer *trace_buf = &tr->trace_buffer;
1034 struct trace_buffer *max_buf = &tr->max_buffer;
1035 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1036 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001037
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001038 max_buf->cpu = cpu;
1039 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001040
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001041 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001042 max_data->critical_start = data->critical_start;
1043 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001044
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001045 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001046 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001047 /*
1048 * If tsk == current, then use current_uid(), as that does not use
1049 * RCU. The irq tracer can be called out of RCU scope.
1050 */
1051 if (tsk == current)
1052 max_data->uid = current_uid();
1053 else
1054 max_data->uid = task_uid(tsk);
1055
Steven Rostedt8248ac02009-09-02 12:27:41 -04001056 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1057 max_data->policy = tsk->policy;
1058 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001059
1060 /* record this tasks comm */
1061 tracing_record_cmdline(tsk);
1062}
1063
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001064/**
1065 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1066 * @tr: tracer
1067 * @tsk: the task with the latency
1068 * @cpu: The cpu that initiated the trace.
1069 *
1070 * Flip the buffers between the @tr and the max_tr and record information
1071 * about which task was the cause of this latency.
1072 */
Ingo Molnare309b412008-05-12 21:20:51 +02001073void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001074update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1075{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001076 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001077
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001078 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001079 return;
1080
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001081 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001082
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001083 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001084 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001085 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001086 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001087 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001088
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001089 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001090
Masami Hiramatsu731ccd92018-07-14 01:28:15 +09001091 /* Inherit the recordable setting from trace_buffer */
1092 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1093 ring_buffer_record_on(tr->max_buffer.buffer);
1094 else
1095 ring_buffer_record_off(tr->max_buffer.buffer);
1096
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001097 buf = tr->trace_buffer.buffer;
1098 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1099 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001100
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001101 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001102 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001103}
1104
1105/**
1106 * update_max_tr_single - only copy one trace over, and reset the rest
1107 * @tr - tracer
1108 * @tsk - task with the latency
1109 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001110 *
1111 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001112 */
Ingo Molnare309b412008-05-12 21:20:51 +02001113void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001114update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1115{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001116 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001117
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001118 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001119 return;
1120
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001121 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001122 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001123 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001124 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001125 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001126 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001127
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001128 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001129
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001130 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001131
Steven Rostedte8165db2009-09-03 19:13:05 -04001132 if (ret == -EBUSY) {
1133 /*
1134 * We failed to swap the buffer due to a commit taking
1135 * place on this CPU. We fail to record, but we reset
1136 * the max trace buffer (no one writes directly to it)
1137 * and flag that it failed.
1138 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001139 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001140 "Failed to swap buffers due to commit in progress\n");
1141 }
1142
Steven Rostedte8165db2009-09-03 19:13:05 -04001143 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001144
1145 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001146 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001147}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001148#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001149
Rabin Vincente30f53a2014-11-10 19:46:34 +01001150static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001151{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001152 /* Iterators are static, they should be filled or empty */
1153 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001154 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001155
Rabin Vincente30f53a2014-11-10 19:46:34 +01001156 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1157 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001158}
1159
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001160#ifdef CONFIG_FTRACE_STARTUP_TEST
1161static int run_tracer_selftest(struct tracer *type)
1162{
1163 struct trace_array *tr = &global_trace;
1164 struct tracer *saved_tracer = tr->current_trace;
1165 int ret;
1166
1167 if (!type->selftest || tracing_selftest_disabled)
1168 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169
1170 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001171 * Run a selftest on this tracer.
1172 * Here we reset the trace buffer, and set the current
1173 * tracer to be this tracer. The tracer can then run some
1174 * internal tracing to verify that everything is in order.
1175 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001176 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001177 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001178
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001179 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001180
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001181#ifdef CONFIG_TRACER_MAX_TRACE
1182 if (type->use_max_tr) {
1183 /* If we expanded the buffers, make sure the max is expanded too */
1184 if (ring_buffer_expanded)
1185 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1186 RING_BUFFER_ALL_CPUS);
1187 tr->allocated_snapshot = true;
1188 }
1189#endif
1190
1191 /* the test is responsible for initializing and enabling */
1192 pr_info("Testing tracer %s: ", type->name);
1193 ret = type->selftest(type, tr);
1194 /* the test is responsible for resetting too */
1195 tr->current_trace = saved_tracer;
1196 if (ret) {
1197 printk(KERN_CONT "FAILED!\n");
1198 /* Add the warning after printing 'FAILED' */
1199 WARN_ON(1);
1200 return -1;
1201 }
1202 /* Only reset on passing, to avoid touching corrupted buffers */
1203 tracing_reset_online_cpus(&tr->trace_buffer);
1204
1205#ifdef CONFIG_TRACER_MAX_TRACE
1206 if (type->use_max_tr) {
1207 tr->allocated_snapshot = false;
1208
1209 /* Shrink the max buffer again */
1210 if (ring_buffer_expanded)
1211 ring_buffer_resize(tr->max_buffer.buffer, 1,
1212 RING_BUFFER_ALL_CPUS);
1213 }
1214#endif
1215
1216 printk(KERN_CONT "PASSED\n");
1217 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001218}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001219#else
1220static inline int run_tracer_selftest(struct tracer *type)
1221{
1222 return 0;
1223}
1224#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001225
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001226static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1227
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001228static void __init apply_trace_boot_options(void);
1229
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001230/**
1231 * register_tracer - register a tracer with the ftrace system.
1232 * @type - the plugin for the tracer
1233 *
1234 * Register a new plugin tracer.
1235 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001236int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001237{
1238 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001239 int ret = 0;
1240
1241 if (!type->name) {
1242 pr_info("Tracer must have a name\n");
1243 return -1;
1244 }
1245
Dan Carpenter24a461d2010-07-10 12:06:44 +02001246 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001247 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1248 return -1;
1249 }
1250
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001251 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001252
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001253 tracing_selftest_running = true;
1254
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001255 for (t = trace_types; t; t = t->next) {
1256 if (strcmp(type->name, t->name) == 0) {
1257 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001258 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001259 type->name);
1260 ret = -1;
1261 goto out;
1262 }
1263 }
1264
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001265 if (!type->set_flag)
1266 type->set_flag = &dummy_set_flag;
1267 if (!type->flags)
1268 type->flags = &dummy_tracer_flags;
1269 else
1270 if (!type->flags->opts)
1271 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001272
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001273 ret = run_tracer_selftest(type);
1274 if (ret < 0)
1275 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001276
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001277 type->next = trace_types;
1278 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001279 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001280
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001281 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001282 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001283 mutex_unlock(&trace_types_lock);
1284
Steven Rostedtdac74942009-02-05 01:13:38 -05001285 if (ret || !default_bootup_tracer)
1286 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001287
Li Zefanee6c2c12009-09-18 14:06:47 +08001288 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001289 goto out_unlock;
1290
1291 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1292 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001293 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001294 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001295
1296 apply_trace_boot_options();
1297
Steven Rostedtdac74942009-02-05 01:13:38 -05001298 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001299 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001300#ifdef CONFIG_FTRACE_STARTUP_TEST
1301 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1302 type->name);
1303#endif
1304
1305 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001306 return ret;
1307}
1308
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001309void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001310{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001311 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001312
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001313 if (!buffer)
1314 return;
1315
Steven Rostedtf6339032009-09-04 12:35:16 -04001316 ring_buffer_record_disable(buffer);
1317
1318 /* Make sure all commits have finished */
1319 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001320 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001321
1322 ring_buffer_record_enable(buffer);
1323}
1324
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001325void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001326{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001327 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001328 int cpu;
1329
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001330 if (!buffer)
1331 return;
1332
Steven Rostedt621968c2009-09-04 12:02:35 -04001333 ring_buffer_record_disable(buffer);
1334
1335 /* Make sure all commits have finished */
1336 synchronize_sched();
1337
Alexander Z Lam94571582013-08-02 18:36:16 -07001338 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001339
1340 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001341 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001342
1343 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001344}
1345
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001346/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001347void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001348{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001349 struct trace_array *tr;
1350
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001351 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001352 tracing_reset_online_cpus(&tr->trace_buffer);
1353#ifdef CONFIG_TRACER_MAX_TRACE
1354 tracing_reset_online_cpus(&tr->max_buffer);
1355#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001356 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001357}
1358
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001359#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001360#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001361static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001362struct saved_cmdlines_buffer {
1363 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1364 unsigned *map_cmdline_to_pid;
Adrian Salidocd1566d2017-04-18 11:44:33 -07001365 unsigned *map_cmdline_to_tgid;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001366 unsigned cmdline_num;
1367 int cmdline_idx;
1368 char *saved_cmdlines;
1369};
1370static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001371
Steven Rostedt25b0b442008-05-12 21:21:00 +02001372/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001373static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001374
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001375static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001376{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001377 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1378}
1379
1380static inline void set_cmdline(int idx, const char *cmdline)
1381{
1382 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1383}
1384
1385static int allocate_cmdlines_buffer(unsigned int val,
1386 struct saved_cmdlines_buffer *s)
1387{
1388 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1389 GFP_KERNEL);
1390 if (!s->map_cmdline_to_pid)
1391 return -ENOMEM;
1392
1393 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1394 if (!s->saved_cmdlines) {
1395 kfree(s->map_cmdline_to_pid);
1396 return -ENOMEM;
1397 }
1398
Adrian Salidocd1566d2017-04-18 11:44:33 -07001399 s->map_cmdline_to_tgid = kmalloc_array(val,
1400 sizeof(*s->map_cmdline_to_tgid),
1401 GFP_KERNEL);
1402 if (!s->map_cmdline_to_tgid) {
1403 kfree(s->map_cmdline_to_pid);
1404 kfree(s->saved_cmdlines);
1405 return -ENOMEM;
1406 }
1407
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001408 s->cmdline_idx = 0;
1409 s->cmdline_num = val;
1410 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1411 sizeof(s->map_pid_to_cmdline));
1412 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1413 val * sizeof(*s->map_cmdline_to_pid));
Adrian Salidocd1566d2017-04-18 11:44:33 -07001414 memset(s->map_cmdline_to_tgid, NO_CMDLINE_MAP,
1415 val * sizeof(*s->map_cmdline_to_tgid));
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001416
1417 return 0;
1418}
1419
1420static int trace_create_savedcmd(void)
1421{
1422 int ret;
1423
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001424 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001425 if (!savedcmd)
1426 return -ENOMEM;
1427
1428 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1429 if (ret < 0) {
1430 kfree(savedcmd);
1431 savedcmd = NULL;
1432 return -ENOMEM;
1433 }
1434
1435 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001436}
1437
Carsten Emdeb5130b12009-09-13 01:43:07 +02001438int is_tracing_stopped(void)
1439{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001440 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001441}
1442
Steven Rostedt0f048702008-11-05 16:05:44 -05001443/**
1444 * tracing_start - quick start of the tracer
1445 *
1446 * If tracing is enabled but was stopped by tracing_stop,
1447 * this will start the tracer back up.
1448 */
1449void tracing_start(void)
1450{
1451 struct ring_buffer *buffer;
1452 unsigned long flags;
1453
1454 if (tracing_disabled)
1455 return;
1456
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001457 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1458 if (--global_trace.stop_count) {
1459 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001460 /* Someone screwed up their debugging */
1461 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001462 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001463 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001464 goto out;
1465 }
1466
Steven Rostedta2f80712010-03-12 19:56:00 -05001467 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001468 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001469
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001470 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001471 if (buffer)
1472 ring_buffer_record_enable(buffer);
1473
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001474#ifdef CONFIG_TRACER_MAX_TRACE
1475 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001476 if (buffer)
1477 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001478#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001479
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001480 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001481
Steven Rostedt0f048702008-11-05 16:05:44 -05001482 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001483 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1484}
1485
1486static void tracing_start_tr(struct trace_array *tr)
1487{
1488 struct ring_buffer *buffer;
1489 unsigned long flags;
1490
1491 if (tracing_disabled)
1492 return;
1493
1494 /* If global, we need to also start the max tracer */
1495 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1496 return tracing_start();
1497
1498 raw_spin_lock_irqsave(&tr->start_lock, flags);
1499
1500 if (--tr->stop_count) {
1501 if (tr->stop_count < 0) {
1502 /* Someone screwed up their debugging */
1503 WARN_ON_ONCE(1);
1504 tr->stop_count = 0;
1505 }
1506 goto out;
1507 }
1508
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001509 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001510 if (buffer)
1511 ring_buffer_record_enable(buffer);
1512
1513 out:
1514 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001515}
1516
1517/**
1518 * tracing_stop - quick stop of the tracer
1519 *
1520 * Light weight way to stop tracing. Use in conjunction with
1521 * tracing_start.
1522 */
1523void tracing_stop(void)
1524{
1525 struct ring_buffer *buffer;
1526 unsigned long flags;
1527
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001528 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1529 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001530 goto out;
1531
Steven Rostedta2f80712010-03-12 19:56:00 -05001532 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001533 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001534
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001535 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001536 if (buffer)
1537 ring_buffer_record_disable(buffer);
1538
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001539#ifdef CONFIG_TRACER_MAX_TRACE
1540 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001541 if (buffer)
1542 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001543#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001544
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001545 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001546
Steven Rostedt0f048702008-11-05 16:05:44 -05001547 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001548 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1549}
1550
1551static void tracing_stop_tr(struct trace_array *tr)
1552{
1553 struct ring_buffer *buffer;
1554 unsigned long flags;
1555
1556 /* If global, we need to also stop the max tracer */
1557 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1558 return tracing_stop();
1559
1560 raw_spin_lock_irqsave(&tr->start_lock, flags);
1561 if (tr->stop_count++)
1562 goto out;
1563
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001564 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001565 if (buffer)
1566 ring_buffer_record_disable(buffer);
1567
1568 out:
1569 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001570}
1571
Ingo Molnare309b412008-05-12 21:20:51 +02001572void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001573
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001574static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001575{
Carsten Emdea635cf02009-03-18 09:00:41 +01001576 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001577
1578 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001579 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001580
Adrian Salidocd1566d2017-04-18 11:44:33 -07001581 preempt_disable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001582 /*
1583 * It's not the end of the world if we don't get
1584 * the lock, but we also don't want to spin
1585 * nor do we want to disable interrupts,
1586 * so if we miss here, then better luck next time.
1587 */
Adrian Salidocd1566d2017-04-18 11:44:33 -07001588 if (!arch_spin_trylock(&trace_cmdline_lock)) {
1589 preempt_enable();
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001590 return 0;
Adrian Salidocd1566d2017-04-18 11:44:33 -07001591 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001592
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001593 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001594 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001595 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001596
Carsten Emdea635cf02009-03-18 09:00:41 +01001597 /*
1598 * Check whether the cmdline buffer at idx has a pid
1599 * mapped. We are going to overwrite that entry so we
1600 * need to clear the map_pid_to_cmdline. Otherwise we
1601 * would read the new comm for the old pid.
1602 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001603 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001604 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001605 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001606
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001607 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1608 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001609
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001610 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001611 }
1612
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001613 set_cmdline(idx, tsk->comm);
Adrian Salidocd1566d2017-04-18 11:44:33 -07001614 savedcmd->map_cmdline_to_tgid[idx] = tsk->tgid;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001615 arch_spin_unlock(&trace_cmdline_lock);
Adrian Salidocd1566d2017-04-18 11:44:33 -07001616 preempt_enable();
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001617
1618 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001619}
1620
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001621static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001622{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001623 unsigned map;
1624
Steven Rostedt4ca53082009-03-16 19:20:15 -04001625 if (!pid) {
1626 strcpy(comm, "<idle>");
1627 return;
1628 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001629
Steven Rostedt74bf4072010-01-25 15:11:53 -05001630 if (WARN_ON_ONCE(pid < 0)) {
1631 strcpy(comm, "<XXX>");
1632 return;
1633 }
1634
Steven Rostedt4ca53082009-03-16 19:20:15 -04001635 if (pid > PID_MAX_DEFAULT) {
1636 strcpy(comm, "<...>");
1637 return;
1638 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001639
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001640 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001641 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001642 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001643 else
1644 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001645}
1646
1647void trace_find_cmdline(int pid, char comm[])
1648{
1649 preempt_disable();
1650 arch_spin_lock(&trace_cmdline_lock);
1651
1652 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001653
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001654 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001655 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001656}
1657
Adrian Salidocd1566d2017-04-18 11:44:33 -07001658static int __find_tgid_locked(int pid)
Jamie Gennis6019e592012-11-21 15:04:25 -08001659{
1660 unsigned map;
1661 int tgid;
1662
Dmitry Shmidt99dd30a2015-10-28 10:45:04 -07001663 map = savedcmd->map_pid_to_cmdline[pid];
Jamie Gennis6019e592012-11-21 15:04:25 -08001664 if (map != NO_CMDLINE_MAP)
Adrian Salidocd1566d2017-04-18 11:44:33 -07001665 tgid = savedcmd->map_cmdline_to_tgid[map];
Jamie Gennis6019e592012-11-21 15:04:25 -08001666 else
1667 tgid = -1;
1668
Adrian Salidocd1566d2017-04-18 11:44:33 -07001669 return tgid;
1670}
1671
1672int trace_find_tgid(int pid)
1673{
1674 int tgid;
1675
1676 preempt_disable();
1677 arch_spin_lock(&trace_cmdline_lock);
1678
1679 tgid = __find_tgid_locked(pid);
1680
Jamie Gennis6019e592012-11-21 15:04:25 -08001681 arch_spin_unlock(&trace_cmdline_lock);
1682 preempt_enable();
1683
1684 return tgid;
1685}
1686
Ingo Molnare309b412008-05-12 21:20:51 +02001687void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001688{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001689 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001690 return;
1691
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001692 if (!__this_cpu_read(trace_cmdline_save))
1693 return;
1694
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001695 if (trace_save_cmdline(tsk))
1696 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001697}
1698
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001699void
Steven Rostedt38697052008-10-01 13:14:09 -04001700tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1701 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001702{
1703 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001704
Steven Rostedt777e2082008-09-29 23:02:42 -04001705 entry->preempt_count = pc & 0xff;
1706 entry->pid = (tsk) ? tsk->pid : 0;
1707 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001708#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001709 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001710#else
1711 TRACE_FLAG_IRQS_NOSUPPORT |
1712#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001713 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondeti999b96b2016-12-09 21:50:17 +05301714 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001715 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1716 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001717}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001718EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001719
Steven Rostedte77405a2009-09-02 14:17:06 -04001720struct ring_buffer_event *
1721trace_buffer_lock_reserve(struct ring_buffer *buffer,
1722 int type,
1723 unsigned long len,
1724 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001725{
1726 struct ring_buffer_event *event;
1727
Steven Rostedte77405a2009-09-02 14:17:06 -04001728 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001729 if (event != NULL) {
1730 struct trace_entry *ent = ring_buffer_event_data(event);
1731
1732 tracing_generic_entry_update(ent, flags, pc);
1733 ent->type = type;
1734 }
1735
1736 return event;
1737}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001738
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001739void
1740__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1741{
1742 __this_cpu_write(trace_cmdline_save, true);
1743 ring_buffer_unlock_commit(buffer, event);
1744}
1745
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04001746void trace_buffer_unlock_commit(struct trace_array *tr,
1747 struct ring_buffer *buffer,
1748 struct ring_buffer_event *event,
1749 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001750{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001751 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001752
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001753 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedte77405a2009-09-02 14:17:06 -04001754 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001755}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001756EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001757
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001758static struct ring_buffer *temp_buffer;
1759
Steven Rostedtef5580d2009-02-27 19:38:04 -05001760struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001761trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001762 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001763 int type, unsigned long len,
1764 unsigned long flags, int pc)
1765{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001766 struct ring_buffer_event *entry;
1767
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001768 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001769 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001770 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001771 /*
1772 * If tracing is off, but we have triggers enabled
1773 * we still need to look at the event data. Use the temp_buffer
1774 * to store the trace event for the tigger to use. It's recusive
1775 * safe and will not be recorded anywhere.
1776 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001777 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001778 *current_rb = temp_buffer;
1779 entry = trace_buffer_lock_reserve(*current_rb,
1780 type, len, flags, pc);
1781 }
1782 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001783}
1784EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1785
1786struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001787trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1788 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001789 unsigned long flags, int pc)
1790{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001791 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001792 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001793 type, len, flags, pc);
1794}
Steven Rostedt94487d62009-05-05 19:22:53 -04001795EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001796
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04001797void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1798 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001799 struct ring_buffer_event *event,
1800 unsigned long flags, int pc,
1801 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001802{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001803 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001804
Steven Rostedt (Red Hat)70b3d6c2016-06-23 14:03:47 -04001805 /*
1806 * If regs is not set, then skip the following callers:
1807 * trace_buffer_unlock_commit_regs
1808 * event_trigger_unlock_commit
1809 * trace_event_buffer_commit
1810 * trace_event_raw_event_sched_switch
1811 * Note, we can still get here via blktrace, wakeup tracer
1812 * and mmiotrace, but that's ok if they lose a function or
1813 * two. They are that meaningful.
1814 */
1815 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : 4, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001816 ftrace_trace_userstack(buffer, flags, pc);
1817}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001818EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001819
Steven Rostedte77405a2009-09-02 14:17:06 -04001820void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1821 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001822{
Steven Rostedte77405a2009-09-02 14:17:06 -04001823 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001824}
Steven Rostedt12acd472009-04-17 16:01:56 -04001825EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001826
Ingo Molnare309b412008-05-12 21:20:51 +02001827void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001828trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001829 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1830 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001831{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001832 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001833 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001834 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001835 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001836
Steven Rostedte77405a2009-09-02 14:17:06 -04001837 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001838 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001839 if (!event)
1840 return;
1841 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001842 entry->ip = ip;
1843 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001844
Tom Zanussif306cc82013-10-24 08:34:17 -05001845 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001846 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001847}
1848
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001849#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001850
1851#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1852struct ftrace_stack {
1853 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1854};
1855
1856static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1857static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1858
Steven Rostedte77405a2009-09-02 14:17:06 -04001859static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001860 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001861 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001862{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001863 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001864 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001865 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001866 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001867 int use_stack;
1868 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001869
1870 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001871 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001872
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001873 /*
Steven Rostedt (Red Hat)70b3d6c2016-06-23 14:03:47 -04001874 * Add two, for this function and the call to save_stack_trace()
1875 * If regs is set, then these functions will not be in the way.
1876 */
1877 if (!regs)
1878 trace.skip += 2;
1879
1880 /*
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001881 * Since events can happen in NMIs there's no safe way to
1882 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1883 * or NMI comes in, it will just have to use the default
1884 * FTRACE_STACK_SIZE.
1885 */
1886 preempt_disable_notrace();
1887
Shan Wei82146522012-11-19 13:21:01 +08001888 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001889 /*
1890 * We don't need any atomic variables, just a barrier.
1891 * If an interrupt comes in, we don't care, because it would
1892 * have exited and put the counter back to what we want.
1893 * We just need a barrier to keep gcc from moving things
1894 * around.
1895 */
1896 barrier();
1897 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001898 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001899 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1900
1901 if (regs)
1902 save_stack_trace_regs(regs, &trace);
1903 else
1904 save_stack_trace(&trace);
1905
1906 if (trace.nr_entries > size)
1907 size = trace.nr_entries;
1908 } else
1909 /* From now on, use_stack is a boolean */
1910 use_stack = 0;
1911
1912 size *= sizeof(unsigned long);
1913
1914 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1915 sizeof(*entry) + size, flags, pc);
1916 if (!event)
1917 goto out;
1918 entry = ring_buffer_event_data(event);
1919
1920 memset(&entry->caller, 0, size);
1921
1922 if (use_stack)
1923 memcpy(&entry->caller, trace.entries,
1924 trace.nr_entries * sizeof(unsigned long));
1925 else {
1926 trace.max_entries = FTRACE_STACK_ENTRIES;
1927 trace.entries = entry->caller;
1928 if (regs)
1929 save_stack_trace_regs(regs, &trace);
1930 else
1931 save_stack_trace(&trace);
1932 }
1933
1934 entry->size = trace.nr_entries;
1935
Tom Zanussif306cc82013-10-24 08:34:17 -05001936 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001937 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001938
1939 out:
1940 /* Again, don't let gcc optimize things here */
1941 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001942 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001943 preempt_enable_notrace();
1944
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001945}
1946
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001947static inline void ftrace_trace_stack(struct trace_array *tr,
1948 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001949 unsigned long flags,
1950 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05001951{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001952 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05001953 return;
1954
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001955 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05001956}
1957
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001958void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1959 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001960{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001961 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001962}
1963
Steven Rostedt03889382009-12-11 09:48:22 -05001964/**
1965 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001966 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001967 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001968void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001969{
1970 unsigned long flags;
1971
1972 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001973 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001974
1975 local_save_flags(flags);
1976
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001977 /*
1978 * Skip 3 more, seems to get us at the caller of
1979 * this function.
1980 */
1981 skip += 3;
1982 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1983 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001984}
1985
Steven Rostedt91e86e52010-11-10 12:56:12 +01001986static DEFINE_PER_CPU(int, user_stack_count);
1987
Steven Rostedte77405a2009-09-02 14:17:06 -04001988void
1989ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001990{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001991 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001992 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001993 struct userstack_entry *entry;
1994 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001995
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001996 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02001997 return;
1998
Steven Rostedtb6345872010-03-12 20:03:30 -05001999 /*
2000 * NMIs can not handle page faults, even with fix ups.
2001 * The save user stack can (and often does) fault.
2002 */
2003 if (unlikely(in_nmi()))
2004 return;
2005
Steven Rostedt91e86e52010-11-10 12:56:12 +01002006 /*
2007 * prevent recursion, since the user stack tracing may
2008 * trigger other kernel events.
2009 */
2010 preempt_disable();
2011 if (__this_cpu_read(user_stack_count))
2012 goto out;
2013
2014 __this_cpu_inc(user_stack_count);
2015
Steven Rostedte77405a2009-09-02 14:17:06 -04002016 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02002017 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02002018 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08002019 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02002020 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02002021
Steven Rostedt48659d32009-09-11 11:36:23 -04002022 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02002023 memset(&entry->caller, 0, sizeof(entry->caller));
2024
2025 trace.nr_entries = 0;
2026 trace.max_entries = FTRACE_STACK_ENTRIES;
2027 trace.skip = 0;
2028 trace.entries = entry->caller;
2029
2030 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05002031 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002032 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002033
Li Zefan1dbd1952010-12-09 15:47:56 +08002034 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01002035 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01002036 out:
2037 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02002038}
2039
Hannes Eder4fd27352009-02-10 19:44:12 +01002040#ifdef UNUSED
2041static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02002042{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05002043 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02002044}
Hannes Eder4fd27352009-02-10 19:44:12 +01002045#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02002046
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02002047#endif /* CONFIG_STACKTRACE */
2048
Steven Rostedt07d777f2011-09-22 14:01:55 -04002049/* created for use with alloc_percpu */
2050struct trace_buffer_struct {
2051 char buffer[TRACE_BUF_SIZE];
2052};
2053
2054static struct trace_buffer_struct *trace_percpu_buffer;
2055static struct trace_buffer_struct *trace_percpu_sirq_buffer;
2056static struct trace_buffer_struct *trace_percpu_irq_buffer;
2057static struct trace_buffer_struct *trace_percpu_nmi_buffer;
2058
2059/*
2060 * The buffer used is dependent on the context. There is a per cpu
2061 * buffer for normal context, softirq contex, hard irq context and
2062 * for NMI context. Thise allows for lockless recording.
2063 *
2064 * Note, if the buffers failed to be allocated, then this returns NULL
2065 */
2066static char *get_trace_buf(void)
2067{
2068 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002069
2070 /*
2071 * If we have allocated per cpu buffers, then we do not
2072 * need to do any locking.
2073 */
2074 if (in_nmi())
2075 percpu_buffer = trace_percpu_nmi_buffer;
2076 else if (in_irq())
2077 percpu_buffer = trace_percpu_irq_buffer;
2078 else if (in_softirq())
2079 percpu_buffer = trace_percpu_sirq_buffer;
2080 else
2081 percpu_buffer = trace_percpu_buffer;
2082
2083 if (!percpu_buffer)
2084 return NULL;
2085
Shan Weid8a03492012-11-13 09:53:04 +08002086 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002087}
2088
2089static int alloc_percpu_trace_buffer(void)
2090{
2091 struct trace_buffer_struct *buffers;
2092 struct trace_buffer_struct *sirq_buffers;
2093 struct trace_buffer_struct *irq_buffers;
2094 struct trace_buffer_struct *nmi_buffers;
2095
2096 buffers = alloc_percpu(struct trace_buffer_struct);
2097 if (!buffers)
2098 goto err_warn;
2099
2100 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2101 if (!sirq_buffers)
2102 goto err_sirq;
2103
2104 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2105 if (!irq_buffers)
2106 goto err_irq;
2107
2108 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2109 if (!nmi_buffers)
2110 goto err_nmi;
2111
2112 trace_percpu_buffer = buffers;
2113 trace_percpu_sirq_buffer = sirq_buffers;
2114 trace_percpu_irq_buffer = irq_buffers;
2115 trace_percpu_nmi_buffer = nmi_buffers;
2116
2117 return 0;
2118
2119 err_nmi:
2120 free_percpu(irq_buffers);
2121 err_irq:
2122 free_percpu(sirq_buffers);
2123 err_sirq:
2124 free_percpu(buffers);
2125 err_warn:
2126 WARN(1, "Could not allocate percpu trace_printk buffer");
2127 return -ENOMEM;
2128}
2129
Steven Rostedt81698832012-10-11 10:15:05 -04002130static int buffers_allocated;
2131
Steven Rostedt07d777f2011-09-22 14:01:55 -04002132void trace_printk_init_buffers(void)
2133{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002134 if (buffers_allocated)
2135 return;
2136
2137 if (alloc_percpu_trace_buffer())
2138 return;
2139
Steven Rostedt2184db42014-05-28 13:14:40 -04002140 /* trace_printk() is for debug use only. Don't use it in production. */
2141
Sandipan Patrac0ddfd52017-03-23 09:34:49 +05302142 pr_debug("**********************************************************\n");
2143 pr_debug("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2144 pr_debug("** **\n");
2145 pr_debug("** trace_printk() being used. Allocating extra memory. **\n");
2146 pr_debug("** **\n");
2147 pr_debug("** This means that this is a DEBUG kernel and it is **\n");
2148 pr_debug("** unsafe for production use. **\n");
2149 pr_debug("** **\n");
2150 pr_debug("** If you see this message and you are not debugging **\n");
2151 pr_debug("** the kernel, report this immediately to your vendor! **\n");
2152 pr_debug("** **\n");
2153 pr_debug("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2154 pr_debug("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002155
Steven Rostedtb382ede62012-10-10 21:44:34 -04002156 /* Expand the buffers to set size */
2157 tracing_update_buffers();
2158
Steven Rostedt07d777f2011-09-22 14:01:55 -04002159 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002160
2161 /*
2162 * trace_printk_init_buffers() can be called by modules.
2163 * If that happens, then we need to start cmdline recording
2164 * directly here. If the global_trace.buffer is already
2165 * allocated here, then this was called by module code.
2166 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002167 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002168 tracing_start_cmdline_record();
2169}
2170
2171void trace_printk_start_comm(void)
2172{
2173 /* Start tracing comms if trace printk is set */
2174 if (!buffers_allocated)
2175 return;
2176 tracing_start_cmdline_record();
2177}
2178
2179static void trace_printk_start_stop_comm(int enabled)
2180{
2181 if (!buffers_allocated)
2182 return;
2183
2184 if (enabled)
2185 tracing_start_cmdline_record();
2186 else
2187 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002188}
2189
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002190/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002191 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002192 *
2193 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002194int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002195{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002196 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002197 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002198 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002199 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002200 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002201 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002202 char *tbuffer;
2203 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002204
2205 if (unlikely(tracing_selftest_running || tracing_disabled))
2206 return 0;
2207
2208 /* Don't pollute graph traces with trace_vprintk internals */
2209 pause_graph_tracing();
2210
2211 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002212 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002213
Steven Rostedt07d777f2011-09-22 14:01:55 -04002214 tbuffer = get_trace_buf();
2215 if (!tbuffer) {
2216 len = 0;
2217 goto out;
2218 }
2219
2220 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2221
2222 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002223 goto out;
2224
Steven Rostedt07d777f2011-09-22 14:01:55 -04002225 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002226 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002227 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002228 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2229 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002230 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002231 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002232 entry = ring_buffer_event_data(event);
2233 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002234 entry->fmt = fmt;
2235
Steven Rostedt07d777f2011-09-22 14:01:55 -04002236 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002237 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002238 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002239 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002240 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002241
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002242out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002243 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002244 unpause_graph_tracing();
2245
2246 return len;
2247}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002248EXPORT_SYMBOL_GPL(trace_vbprintk);
2249
Mathieu Malaterrebfb1c342018-03-08 21:58:43 +01002250__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002251static int
2252__trace_array_vprintk(struct ring_buffer *buffer,
2253 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002254{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002255 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002256 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002257 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002258 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002259 unsigned long flags;
2260 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002261
2262 if (tracing_disabled || tracing_selftest_running)
2263 return 0;
2264
Steven Rostedt07d777f2011-09-22 14:01:55 -04002265 /* Don't pollute graph traces with trace_vprintk internals */
2266 pause_graph_tracing();
2267
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002268 pc = preempt_count();
2269 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002270
Steven Rostedt07d777f2011-09-22 14:01:55 -04002271
2272 tbuffer = get_trace_buf();
2273 if (!tbuffer) {
2274 len = 0;
2275 goto out;
2276 }
2277
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002278 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002279
Steven Rostedt07d777f2011-09-22 14:01:55 -04002280 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002281 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002282 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002283 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002284 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002285 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002286 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002287 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002288
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002289 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002290 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002291 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002292 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002293 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002294 out:
2295 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002296 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002297
2298 return len;
2299}
Steven Rostedt659372d2009-09-03 19:11:07 -04002300
Mathieu Malaterrebfb1c342018-03-08 21:58:43 +01002301__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002302int trace_array_vprintk(struct trace_array *tr,
2303 unsigned long ip, const char *fmt, va_list args)
2304{
2305 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2306}
2307
Mathieu Malaterrebfb1c342018-03-08 21:58:43 +01002308__printf(3, 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002309int trace_array_printk(struct trace_array *tr,
2310 unsigned long ip, const char *fmt, ...)
2311{
2312 int ret;
2313 va_list ap;
2314
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002315 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002316 return 0;
2317
2318 va_start(ap, fmt);
2319 ret = trace_array_vprintk(tr, ip, fmt, ap);
2320 va_end(ap);
2321 return ret;
2322}
2323
Mathieu Malaterrebfb1c342018-03-08 21:58:43 +01002324__printf(3, 4)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002325int trace_array_printk_buf(struct ring_buffer *buffer,
2326 unsigned long ip, const char *fmt, ...)
2327{
2328 int ret;
2329 va_list ap;
2330
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002331 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002332 return 0;
2333
2334 va_start(ap, fmt);
2335 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2336 va_end(ap);
2337 return ret;
2338}
2339
Mathieu Malaterrebfb1c342018-03-08 21:58:43 +01002340__printf(2, 0)
Steven Rostedt659372d2009-09-03 19:11:07 -04002341int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2342{
Steven Rostedta813a152009-10-09 01:41:35 -04002343 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002344}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002345EXPORT_SYMBOL_GPL(trace_vprintk);
2346
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002347static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002348{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002349 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2350
Steven Rostedt5a90f572008-09-03 17:42:51 -04002351 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002352 if (buf_iter)
2353 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002354}
2355
Ingo Molnare309b412008-05-12 21:20:51 +02002356static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002357peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2358 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002359{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002360 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002361 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002362
Steven Rostedtd7690412008-10-01 00:29:53 -04002363 if (buf_iter)
2364 event = ring_buffer_iter_peek(buf_iter, ts);
2365 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002366 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002367 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002368
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002369 if (event) {
2370 iter->ent_size = ring_buffer_event_length(event);
2371 return ring_buffer_event_data(event);
2372 }
2373 iter->ent_size = 0;
2374 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002375}
Steven Rostedtd7690412008-10-01 00:29:53 -04002376
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002377static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002378__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2379 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002380{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002381 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002382 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002383 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002384 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002385 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002386 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002387 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002388 int cpu;
2389
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002390 /*
2391 * If we are in a per_cpu trace file, don't bother by iterating over
2392 * all cpu and peek directly.
2393 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002394 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002395 if (ring_buffer_empty_cpu(buffer, cpu_file))
2396 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002397 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002398 if (ent_cpu)
2399 *ent_cpu = cpu_file;
2400
2401 return ent;
2402 }
2403
Steven Rostedtab464282008-05-12 21:21:00 +02002404 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002405
2406 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002407 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002408
Steven Rostedtbc21b472010-03-31 19:49:26 -04002409 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002410
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002411 /*
2412 * Pick the entry with the smallest timestamp:
2413 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002414 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002415 next = ent;
2416 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002417 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002418 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002419 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002420 }
2421 }
2422
Steven Rostedt12b5da32012-03-27 10:43:28 -04002423 iter->ent_size = next_size;
2424
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002425 if (ent_cpu)
2426 *ent_cpu = next_cpu;
2427
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002428 if (ent_ts)
2429 *ent_ts = next_ts;
2430
Steven Rostedtbc21b472010-03-31 19:49:26 -04002431 if (missing_events)
2432 *missing_events = next_lost;
2433
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002434 return next;
2435}
2436
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002437/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002438struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2439 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002440{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002441 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002442}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002443
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002444/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002445void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002446{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002447 iter->ent = __find_next_entry(iter, &iter->cpu,
2448 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002449
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002450 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002451 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002452
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002453 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002454}
2455
Ingo Molnare309b412008-05-12 21:20:51 +02002456static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002457{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002458 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002459 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002460}
2461
Ingo Molnare309b412008-05-12 21:20:51 +02002462static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002463{
2464 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002465 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002466 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002467
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002468 WARN_ON_ONCE(iter->leftover);
2469
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002470 (*pos)++;
2471
2472 /* can't go backwards */
2473 if (iter->idx > i)
2474 return NULL;
2475
2476 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002477 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002478 else
2479 ent = iter;
2480
2481 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002482 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002483
2484 iter->pos = *pos;
2485
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002486 return ent;
2487}
2488
Jason Wessel955b61e2010-08-05 09:22:23 -05002489void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002490{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002491 struct ring_buffer_event *event;
2492 struct ring_buffer_iter *buf_iter;
2493 unsigned long entries = 0;
2494 u64 ts;
2495
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002496 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002497
Steven Rostedt6d158a82012-06-27 20:46:14 -04002498 buf_iter = trace_buffer_iter(iter, cpu);
2499 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002500 return;
2501
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002502 ring_buffer_iter_reset(buf_iter);
2503
2504 /*
2505 * We could have the case with the max latency tracers
2506 * that a reset never took place on a cpu. This is evident
2507 * by the timestamp being before the start of the buffer.
2508 */
2509 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002510 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002511 break;
2512 entries++;
2513 ring_buffer_read(buf_iter, NULL);
2514 }
2515
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002516 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002517}
2518
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002519/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002520 * The current tracer is copied to avoid a global locking
2521 * all around.
2522 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002523static void *s_start(struct seq_file *m, loff_t *pos)
2524{
2525 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002526 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002527 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002528 void *p = NULL;
2529 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002530 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002531
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002532 /*
2533 * copy the tracer to avoid using a global lock all around.
2534 * iter->trace is a copy of current_trace, the pointer to the
2535 * name may be used instead of a strcmp(), as iter->trace->name
2536 * will point to the same string as current_trace->name.
2537 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002538 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002539 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2540 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002541 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002542
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002543#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002544 if (iter->snapshot && iter->trace->use_max_tr)
2545 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002546#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002547
2548 if (!iter->snapshot)
2549 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002550
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002551 if (*pos != iter->pos) {
2552 iter->ent = NULL;
2553 iter->cpu = 0;
2554 iter->idx = -1;
2555
Steven Rostedtae3b5092013-01-23 15:22:59 -05002556 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002557 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002558 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002559 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002560 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002561
Lai Jiangshanac91d852010-03-02 17:54:50 +08002562 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002563 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2564 ;
2565
2566 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002567 /*
2568 * If we overflowed the seq_file before, then we want
2569 * to just reuse the trace_seq buffer again.
2570 */
2571 if (iter->leftover)
2572 p = iter;
2573 else {
2574 l = *pos - 1;
2575 p = s_next(m, p, &l);
2576 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002577 }
2578
Lai Jiangshan4f535962009-05-18 19:35:34 +08002579 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002580 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002581 return p;
2582}
2583
2584static void s_stop(struct seq_file *m, void *p)
2585{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002586 struct trace_iterator *iter = m->private;
2587
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002588#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002589 if (iter->snapshot && iter->trace->use_max_tr)
2590 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002591#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002592
2593 if (!iter->snapshot)
2594 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002595
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002596 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002597 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002598}
2599
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002600static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002601get_total_entries(struct trace_buffer *buf,
2602 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002603{
2604 unsigned long count;
2605 int cpu;
2606
2607 *total = 0;
2608 *entries = 0;
2609
2610 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002611 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002612 /*
2613 * If this buffer has skipped entries, then we hold all
2614 * entries for the trace and we need to ignore the
2615 * ones before the time stamp.
2616 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002617 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2618 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002619 /* total is the same as the entries */
2620 *total += count;
2621 } else
2622 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002623 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002624 *entries += count;
2625 }
2626}
2627
Ingo Molnare309b412008-05-12 21:20:51 +02002628static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002629{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002630 seq_puts(m, "# _------=> CPU# \n"
2631 "# / _-----=> irqs-off \n"
2632 "# | / _----=> need-resched \n"
2633 "# || / _---=> hardirq/softirq \n"
2634 "# ||| / _--=> preempt-depth \n"
2635 "# |||| / delay \n"
2636 "# cmd pid ||||| time | caller \n"
2637 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002638}
2639
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002640static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002641{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002642 unsigned long total;
2643 unsigned long entries;
2644
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002645 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002646 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2647 entries, total, num_online_cpus());
2648 seq_puts(m, "#\n");
2649}
2650
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002651static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002652{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002653 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002654 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2655 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002656}
2657
Jamie Gennis6019e592012-11-21 15:04:25 -08002658static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
2659{
2660 print_event_info(buf, m);
2661 seq_puts(m, "# TASK-PID TGID CPU# TIMESTAMP FUNCTION\n");
2662 seq_puts(m, "# | | | | | |\n");
2663}
2664
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002665static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002666{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002667 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002668 seq_puts(m, "# _-----=> irqs-off\n"
2669 "# / _----=> need-resched\n"
2670 "# | / _---=> hardirq/softirq\n"
2671 "# || / _--=> preempt-depth\n"
2672 "# ||| / delay\n"
2673 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2674 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002675}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002676
Jamie Gennis6019e592012-11-21 15:04:25 -08002677static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
2678{
2679 print_event_info(buf, m);
2680 seq_puts(m, "# _-----=> irqs-off\n");
2681 seq_puts(m, "# / _----=> need-resched\n");
2682 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2683 seq_puts(m, "# || / _--=> preempt-depth\n");
2684 seq_puts(m, "# ||| / delay\n");
2685 seq_puts(m, "# TASK-PID TGID CPU# |||| TIMESTAMP FUNCTION\n");
2686 seq_puts(m, "# | | | | |||| | |\n");
2687}
2688
Jiri Olsa62b915f2010-04-02 19:01:22 +02002689void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002690print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2691{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002692 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002693 struct trace_buffer *buf = iter->trace_buffer;
2694 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002695 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002696 unsigned long entries;
2697 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002698 const char *name = "preemption";
2699
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002700 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002701
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002702 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002703
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002704 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002705 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002706 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002707 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002708 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002709 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002710 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002711 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002712 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002713 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002714#if defined(CONFIG_PREEMPT_NONE)
2715 "server",
2716#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2717 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002718#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002719 "preempt",
2720#else
2721 "unknown",
2722#endif
2723 /* These are reserved for later use */
2724 0, 0, 0, 0);
2725#ifdef CONFIG_SMP
2726 seq_printf(m, " #P:%d)\n", num_online_cpus());
2727#else
2728 seq_puts(m, ")\n");
2729#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002730 seq_puts(m, "# -----------------\n");
2731 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002732 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002733 data->comm, data->pid,
2734 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002735 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002736 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002737
2738 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002739 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002740 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2741 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002742 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002743 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2744 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002745 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002746 }
2747
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002748 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002749}
2750
Steven Rostedta3097202008-11-07 22:36:02 -05002751static void test_cpu_buff_start(struct trace_iterator *iter)
2752{
2753 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002754 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05002755
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002756 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002757 return;
2758
2759 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2760 return;
2761
Sasha Levin919cd972015-09-04 12:45:56 -04002762 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002763 return;
2764
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002765 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002766 return;
2767
Sasha Levin919cd972015-09-04 12:45:56 -04002768 if (iter->started)
2769 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002770
2771 /* Don't print started cpu buffer for the first entry of the trace */
2772 if (iter->idx > 1)
2773 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2774 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002775}
2776
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002777static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002778{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002779 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02002780 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002781 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002782 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002783 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002784
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002785 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002786
Steven Rostedta3097202008-11-07 22:36:02 -05002787 test_cpu_buff_start(iter);
2788
Steven Rostedtf633cef2008-12-23 23:24:13 -05002789 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002790
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002791 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002792 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2793 trace_print_lat_context(iter);
2794 else
2795 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002796 }
2797
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002798 if (trace_seq_has_overflowed(s))
2799 return TRACE_TYPE_PARTIAL_LINE;
2800
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002801 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002802 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002803
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002804 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002805
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002806 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002807}
2808
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002809static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002810{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002811 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002812 struct trace_seq *s = &iter->seq;
2813 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002814 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002815
2816 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002817
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002818 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002819 trace_seq_printf(s, "%d %d %llu ",
2820 entry->pid, iter->cpu, iter->ts);
2821
2822 if (trace_seq_has_overflowed(s))
2823 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002824
Steven Rostedtf633cef2008-12-23 23:24:13 -05002825 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002826 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002827 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002828
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002829 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002830
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002831 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002832}
2833
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002834static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002835{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002836 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002837 struct trace_seq *s = &iter->seq;
2838 unsigned char newline = '\n';
2839 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002840 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002841
2842 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002843
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002844 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002845 SEQ_PUT_HEX_FIELD(s, entry->pid);
2846 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2847 SEQ_PUT_HEX_FIELD(s, iter->ts);
2848 if (trace_seq_has_overflowed(s))
2849 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002850 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002851
Steven Rostedtf633cef2008-12-23 23:24:13 -05002852 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002853 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002854 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002855 if (ret != TRACE_TYPE_HANDLED)
2856 return ret;
2857 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002858
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002859 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002860
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002861 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002862}
2863
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002864static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002865{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002866 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002867 struct trace_seq *s = &iter->seq;
2868 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002869 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002870
2871 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002872
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002873 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002874 SEQ_PUT_FIELD(s, entry->pid);
2875 SEQ_PUT_FIELD(s, iter->cpu);
2876 SEQ_PUT_FIELD(s, iter->ts);
2877 if (trace_seq_has_overflowed(s))
2878 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002879 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002880
Steven Rostedtf633cef2008-12-23 23:24:13 -05002881 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002882 return event ? event->funcs->binary(iter, 0, event) :
2883 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002884}
2885
Jiri Olsa62b915f2010-04-02 19:01:22 +02002886int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002887{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002888 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002889 int cpu;
2890
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002891 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002892 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002893 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002894 buf_iter = trace_buffer_iter(iter, cpu);
2895 if (buf_iter) {
2896 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002897 return 0;
2898 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002899 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002900 return 0;
2901 }
2902 return 1;
2903 }
2904
Steven Rostedtab464282008-05-12 21:21:00 +02002905 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002906 buf_iter = trace_buffer_iter(iter, cpu);
2907 if (buf_iter) {
2908 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002909 return 0;
2910 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002911 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002912 return 0;
2913 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002914 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002915
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002916 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002917}
2918
Lai Jiangshan4f535962009-05-18 19:35:34 +08002919/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002920enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002921{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002922 struct trace_array *tr = iter->tr;
2923 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002924 enum print_line_t ret;
2925
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002926 if (iter->lost_events) {
2927 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2928 iter->cpu, iter->lost_events);
2929 if (trace_seq_has_overflowed(&iter->seq))
2930 return TRACE_TYPE_PARTIAL_LINE;
2931 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002932
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002933 if (iter->trace && iter->trace->print_line) {
2934 ret = iter->trace->print_line(iter);
2935 if (ret != TRACE_TYPE_UNHANDLED)
2936 return ret;
2937 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002938
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002939 if (iter->ent->type == TRACE_BPUTS &&
2940 trace_flags & TRACE_ITER_PRINTK &&
2941 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2942 return trace_print_bputs_msg_only(iter);
2943
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002944 if (iter->ent->type == TRACE_BPRINT &&
2945 trace_flags & TRACE_ITER_PRINTK &&
2946 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002947 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002948
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002949 if (iter->ent->type == TRACE_PRINT &&
2950 trace_flags & TRACE_ITER_PRINTK &&
2951 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002952 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002953
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002954 if (trace_flags & TRACE_ITER_BIN)
2955 return print_bin_fmt(iter);
2956
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002957 if (trace_flags & TRACE_ITER_HEX)
2958 return print_hex_fmt(iter);
2959
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002960 if (trace_flags & TRACE_ITER_RAW)
2961 return print_raw_fmt(iter);
2962
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002963 return print_trace_fmt(iter);
2964}
2965
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002966void trace_latency_header(struct seq_file *m)
2967{
2968 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002969 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002970
2971 /* print nothing if the buffers are empty */
2972 if (trace_empty(iter))
2973 return;
2974
2975 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2976 print_trace_header(m, iter);
2977
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002978 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002979 print_lat_help_header(m);
2980}
2981
Jiri Olsa62b915f2010-04-02 19:01:22 +02002982void trace_default_header(struct seq_file *m)
2983{
2984 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002985 struct trace_array *tr = iter->tr;
2986 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02002987
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002988 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2989 return;
2990
Jiri Olsa62b915f2010-04-02 19:01:22 +02002991 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2992 /* print nothing if the buffers are empty */
2993 if (trace_empty(iter))
2994 return;
2995 print_trace_header(m, iter);
2996 if (!(trace_flags & TRACE_ITER_VERBOSE))
2997 print_lat_help_header(m);
2998 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002999 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3000 if (trace_flags & TRACE_ITER_IRQ_INFO)
Jamie Gennis6019e592012-11-21 15:04:25 -08003001 if (trace_flags & TRACE_ITER_TGID)
3002 print_func_help_header_irq_tgid(iter->trace_buffer, m);
3003 else
3004 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003005 else
Jamie Gennis6019e592012-11-21 15:04:25 -08003006 if (trace_flags & TRACE_ITER_TGID)
3007 print_func_help_header_tgid(iter->trace_buffer, m);
3008 else
3009 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05003010 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02003011 }
3012}
3013
Steven Rostedte0a413f2011-09-29 21:26:16 -04003014static void test_ftrace_alive(struct seq_file *m)
3015{
3016 if (!ftrace_is_dead())
3017 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003018 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3019 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003020}
3021
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003022#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003023static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003024{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003025 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3026 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3027 "# Takes a snapshot of the main buffer.\n"
3028 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3029 "# (Doesn't have to be '2' works with any number that\n"
3030 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003031}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003032
3033static void show_snapshot_percpu_help(struct seq_file *m)
3034{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003035 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003036#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003037 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3038 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003039#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003040 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3041 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003042#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01003043 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3044 "# (Doesn't have to be '2' works with any number that\n"
3045 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003046}
3047
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003048static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3049{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05003050 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003051 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003052 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003053 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003054
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003055 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003056 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3057 show_snapshot_main_help(m);
3058 else
3059 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003060}
3061#else
3062/* Should never be called */
3063static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3064#endif
3065
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003066static int s_show(struct seq_file *m, void *v)
3067{
3068 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003069 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003070
3071 if (iter->ent == NULL) {
3072 if (iter->tr) {
3073 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3074 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003075 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003076 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003077 if (iter->snapshot && trace_empty(iter))
3078 print_snapshot_help(m, iter);
3079 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003080 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003081 else
3082 trace_default_header(m);
3083
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003084 } else if (iter->leftover) {
3085 /*
3086 * If we filled the seq_file buffer earlier, we
3087 * want to just show it now.
3088 */
3089 ret = trace_print_seq(m, &iter->seq);
3090
3091 /* ret should this time be zero, but you never know */
3092 iter->leftover = ret;
3093
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003094 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003095 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003096 ret = trace_print_seq(m, &iter->seq);
3097 /*
3098 * If we overflow the seq_file buffer, then it will
3099 * ask us for this data again at start up.
3100 * Use that instead.
3101 * ret is 0 if seq_file write succeeded.
3102 * -1 otherwise.
3103 */
3104 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003105 }
3106
3107 return 0;
3108}
3109
Oleg Nesterov649e9c72013-07-23 17:25:54 +02003110/*
3111 * Should be used after trace_array_get(), trace_types_lock
3112 * ensures that i_cdev was already initialized.
3113 */
3114static inline int tracing_get_cpu(struct inode *inode)
3115{
3116 if (inode->i_cdev) /* See trace_create_cpu_file() */
3117 return (long)inode->i_cdev - 1;
3118 return RING_BUFFER_ALL_CPUS;
3119}
3120
James Morris88e9d342009-09-22 16:43:43 -07003121static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003122 .start = s_start,
3123 .next = s_next,
3124 .stop = s_stop,
3125 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003126};
3127
Ingo Molnare309b412008-05-12 21:20:51 +02003128static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003129__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003130{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003131 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003132 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003133 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003134
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003135 if (tracing_disabled)
3136 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003137
Jiri Olsa50e18b92012-04-25 10:23:39 +02003138 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003139 if (!iter)
3140 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003141
Gil Fruchter72917232015-06-09 10:32:35 +03003142 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003143 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003144 if (!iter->buffer_iter)
3145 goto release;
3146
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003147 /*
3148 * We make a copy of the current tracer to avoid concurrent
3149 * changes on it while we are reading.
3150 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003151 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003152 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003153 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003154 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003155
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003156 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003157
Li Zefan79f55992009-06-15 14:58:26 +08003158 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003159 goto fail;
3160
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003161 iter->tr = tr;
3162
3163#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003164 /* Currently only the top directory has a snapshot */
3165 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003166 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003167 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003168#endif
3169 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003170 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003171 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003172 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003173 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003174
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003175 /* Notify the tracer early; before we stop tracing. */
3176 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003177 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003178
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003179 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003180 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003181 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3182
David Sharp8be07092012-11-13 12:18:22 -08003183 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003184 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003185 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3186
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003187 /* stop the trace while dumping if we are not opening "snapshot" */
3188 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003189 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003190
Steven Rostedtae3b5092013-01-23 15:22:59 -05003191 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003192 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003193 iter->buffer_iter[cpu] =
Douglas Andersonf7b7a592019-03-08 11:32:04 -08003194 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3195 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07003196 }
3197 ring_buffer_read_prepare_sync();
3198 for_each_tracing_cpu(cpu) {
3199 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003200 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003201 }
3202 } else {
3203 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003204 iter->buffer_iter[cpu] =
Douglas Andersonf7b7a592019-03-08 11:32:04 -08003205 ring_buffer_read_prepare(iter->trace_buffer->buffer,
3206 cpu, GFP_KERNEL);
David Miller72c9ddf2010-04-20 15:47:11 -07003207 ring_buffer_read_prepare_sync();
3208 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003209 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003210 }
3211
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003212 mutex_unlock(&trace_types_lock);
3213
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003214 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003215
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003216 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003217 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003218 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003219 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003220release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003221 seq_release_private(inode, file);
3222 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003223}
3224
3225int tracing_open_generic(struct inode *inode, struct file *filp)
3226{
Steven Rostedt60a11772008-05-12 21:20:44 +02003227 if (tracing_disabled)
3228 return -ENODEV;
3229
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003230 filp->private_data = inode->i_private;
3231 return 0;
3232}
3233
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003234bool tracing_is_disabled(void)
3235{
3236 return (tracing_disabled) ? true: false;
3237}
3238
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003239/*
3240 * Open and update trace_array ref count.
3241 * Must have the current trace_array passed to it.
3242 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003243static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003244{
3245 struct trace_array *tr = inode->i_private;
3246
3247 if (tracing_disabled)
3248 return -ENODEV;
3249
3250 if (trace_array_get(tr) < 0)
3251 return -ENODEV;
3252
3253 filp->private_data = inode->i_private;
3254
3255 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003256}
3257
Hannes Eder4fd27352009-02-10 19:44:12 +01003258static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003259{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003260 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003261 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003262 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003263 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003264
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003265 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003266 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003267 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003268 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003269
Oleg Nesterov6484c712013-07-23 17:26:10 +02003270 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003271 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003272 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003273
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003274 for_each_tracing_cpu(cpu) {
3275 if (iter->buffer_iter[cpu])
3276 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3277 }
3278
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003279 if (iter->trace && iter->trace->close)
3280 iter->trace->close(iter);
3281
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003282 if (!iter->snapshot)
3283 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003284 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003285
3286 __trace_array_put(tr);
3287
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003288 mutex_unlock(&trace_types_lock);
3289
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003290 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003291 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003292 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003293 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003294 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003295
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003296 return 0;
3297}
3298
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003299static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3300{
3301 struct trace_array *tr = inode->i_private;
3302
3303 trace_array_put(tr);
3304 return 0;
3305}
3306
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003307static int tracing_single_release_tr(struct inode *inode, struct file *file)
3308{
3309 struct trace_array *tr = inode->i_private;
3310
3311 trace_array_put(tr);
3312
3313 return single_release(inode, file);
3314}
3315
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003316static int tracing_open(struct inode *inode, struct file *file)
3317{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003318 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003319 struct trace_iterator *iter;
3320 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003321
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003322 if (trace_array_get(tr) < 0)
3323 return -ENODEV;
3324
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003325 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003326 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3327 int cpu = tracing_get_cpu(inode);
Bo Yan68a4a522017-09-18 10:03:35 -07003328 struct trace_buffer *trace_buf = &tr->trace_buffer;
3329
3330#ifdef CONFIG_TRACER_MAX_TRACE
3331 if (tr->current_trace->print_max)
3332 trace_buf = &tr->max_buffer;
3333#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02003334
3335 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan68a4a522017-09-18 10:03:35 -07003336 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003337 else
Bo Yan68a4a522017-09-18 10:03:35 -07003338 tracing_reset(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003339 }
3340
3341 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003342 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003343 if (IS_ERR(iter))
3344 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003345 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003346 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3347 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003348
3349 if (ret < 0)
3350 trace_array_put(tr);
3351
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003352 return ret;
3353}
3354
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003355/*
3356 * Some tracers are not suitable for instance buffers.
3357 * A tracer is always available for the global array (toplevel)
3358 * or if it explicitly states that it is.
3359 */
3360static bool
3361trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3362{
3363 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3364}
3365
3366/* Find the next tracer that this trace array may use */
3367static struct tracer *
3368get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3369{
3370 while (t && !trace_ok_for_array(t, tr))
3371 t = t->next;
3372
3373 return t;
3374}
3375
Ingo Molnare309b412008-05-12 21:20:51 +02003376static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003377t_next(struct seq_file *m, void *v, loff_t *pos)
3378{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003379 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003380 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003381
3382 (*pos)++;
3383
3384 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003385 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003386
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003387 return t;
3388}
3389
3390static void *t_start(struct seq_file *m, loff_t *pos)
3391{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003392 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003393 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003394 loff_t l = 0;
3395
3396 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003397
3398 t = get_tracer_for_array(tr, trace_types);
3399 for (; t && l < *pos; t = t_next(m, t, &l))
3400 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003401
3402 return t;
3403}
3404
3405static void t_stop(struct seq_file *m, void *p)
3406{
3407 mutex_unlock(&trace_types_lock);
3408}
3409
3410static int t_show(struct seq_file *m, void *v)
3411{
3412 struct tracer *t = v;
3413
3414 if (!t)
3415 return 0;
3416
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003417 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003418 if (t->next)
3419 seq_putc(m, ' ');
3420 else
3421 seq_putc(m, '\n');
3422
3423 return 0;
3424}
3425
James Morris88e9d342009-09-22 16:43:43 -07003426static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003427 .start = t_start,
3428 .next = t_next,
3429 .stop = t_stop,
3430 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003431};
3432
3433static int show_traces_open(struct inode *inode, struct file *file)
3434{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003435 struct trace_array *tr = inode->i_private;
3436 struct seq_file *m;
3437 int ret;
3438
Steven Rostedt60a11772008-05-12 21:20:44 +02003439 if (tracing_disabled)
3440 return -ENODEV;
3441
Steven Rostedt (VMware)35fa86e2019-10-11 18:19:17 -04003442 if (trace_array_get(tr) < 0)
3443 return -ENODEV;
3444
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003445 ret = seq_open(file, &show_traces_seq_ops);
Steven Rostedt (VMware)35fa86e2019-10-11 18:19:17 -04003446 if (ret) {
3447 trace_array_put(tr);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003448 return ret;
Steven Rostedt (VMware)35fa86e2019-10-11 18:19:17 -04003449 }
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003450
3451 m = file->private_data;
3452 m->private = tr;
3453
3454 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003455}
3456
Steven Rostedt (VMware)35fa86e2019-10-11 18:19:17 -04003457static int show_traces_release(struct inode *inode, struct file *file)
3458{
3459 struct trace_array *tr = inode->i_private;
3460
3461 trace_array_put(tr);
3462 return seq_release(inode, file);
3463}
3464
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003465static ssize_t
3466tracing_write_stub(struct file *filp, const char __user *ubuf,
3467 size_t count, loff_t *ppos)
3468{
3469 return count;
3470}
3471
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003472loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003473{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003474 int ret;
3475
Slava Pestov364829b2010-11-24 15:13:16 -08003476 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003477 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003478 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003479 file->f_pos = ret = 0;
3480
3481 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003482}
3483
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003484static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003485 .open = tracing_open,
3486 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003487 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003488 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003489 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003490};
3491
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003492static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003493 .open = show_traces_open,
3494 .read = seq_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003495 .llseek = seq_lseek,
Steven Rostedt (VMware)35fa86e2019-10-11 18:19:17 -04003496 .release = show_traces_release,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003497};
3498
3499static ssize_t
3500tracing_cpumask_read(struct file *filp, char __user *ubuf,
3501 size_t count, loff_t *ppos)
3502{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003503 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Dua34419b2017-11-30 11:39:43 +08003504 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003505 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003506
Changbin Dua34419b2017-11-30 11:39:43 +08003507 len = snprintf(NULL, 0, "%*pb\n",
3508 cpumask_pr_args(tr->tracing_cpumask)) + 1;
3509 mask_str = kmalloc(len, GFP_KERNEL);
3510 if (!mask_str)
3511 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003512
Changbin Dua34419b2017-11-30 11:39:43 +08003513 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08003514 cpumask_pr_args(tr->tracing_cpumask));
3515 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003516 count = -EINVAL;
3517 goto out_err;
3518 }
Changbin Dua34419b2017-11-30 11:39:43 +08003519 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003520
3521out_err:
Changbin Dua34419b2017-11-30 11:39:43 +08003522 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003523
3524 return count;
3525}
3526
3527static ssize_t
3528tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3529 size_t count, loff_t *ppos)
3530{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003531 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303532 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003533 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303534
3535 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3536 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003537
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303538 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003539 if (err)
3540 goto err_unlock;
3541
Steven Rostedta5e25882008-12-02 15:34:05 -05003542 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003543 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003544 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003545 /*
3546 * Increase/decrease the disabled counter if we are
3547 * about to flip a bit in the cpumask:
3548 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003549 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303550 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003551 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3552 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003553 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003554 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303555 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003556 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3557 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003558 }
3559 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003560 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003561 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003562
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003563 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303564 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003565
Ingo Molnarc7078de2008-05-12 21:20:52 +02003566 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003567
3568err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003569 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003570
3571 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003572}
3573
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003574static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003575 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003576 .read = tracing_cpumask_read,
3577 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003578 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003579 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003580};
3581
Li Zefanfdb372e2009-12-08 11:15:59 +08003582static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003583{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003584 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003585 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003586 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003587 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003588
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003589 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003590 tracer_flags = tr->current_trace->flags->val;
3591 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003592
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003593 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003594 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003595 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003596 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003597 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003598 }
3599
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003600 for (i = 0; trace_opts[i].name; i++) {
3601 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003602 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003603 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003604 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003605 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003606 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003607
Li Zefanfdb372e2009-12-08 11:15:59 +08003608 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003609}
3610
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003611static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003612 struct tracer_flags *tracer_flags,
3613 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003614{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003615 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003616 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003617
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003618 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003619 if (ret)
3620 return ret;
3621
3622 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003623 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003624 else
Zhaolei77708412009-08-07 18:53:21 +08003625 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003626 return 0;
3627}
3628
Li Zefan8d18eaa2009-12-08 11:17:06 +08003629/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003630static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003631{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003632 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003633 struct tracer_flags *tracer_flags = trace->flags;
3634 struct tracer_opt *opts = NULL;
3635 int i;
3636
3637 for (i = 0; tracer_flags->opts[i].name; i++) {
3638 opts = &tracer_flags->opts[i];
3639
3640 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003641 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003642 }
3643
3644 return -EINVAL;
3645}
3646
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003647/* Some tracers require overwrite to stay enabled */
3648int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3649{
3650 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3651 return -1;
3652
3653 return 0;
3654}
3655
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003656int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003657{
3658 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003659 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003660 return 0;
3661
3662 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003663 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003664 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003665 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003666
3667 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003668 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003669 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003670 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003671
3672 if (mask == TRACE_ITER_RECORD_CMD)
3673 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003674
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003675 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003676 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003677#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003678 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003679#endif
3680 }
Steven Rostedt81698832012-10-11 10:15:05 -04003681
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003682 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04003683 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003684 trace_printk_control(enabled);
3685 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003686
3687 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003688}
3689
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003690static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003691{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003692 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003693 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003694 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003695 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003696 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003697
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003698 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003699
Li Zefan8d18eaa2009-12-08 11:17:06 +08003700 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003701 neg = 1;
3702 cmp += 2;
3703 }
3704
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003705 mutex_lock(&trace_types_lock);
3706
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003707 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003708 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003709 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003710 break;
3711 }
3712 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003713
3714 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003715 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003716 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003717
3718 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003719
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003720 /*
3721 * If the first trailing whitespace is replaced with '\0' by strstrip,
3722 * turn it back into a space.
3723 */
3724 if (orig_len > strlen(option))
3725 option[strlen(option)] = ' ';
3726
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003727 return ret;
3728}
3729
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003730static void __init apply_trace_boot_options(void)
3731{
3732 char *buf = trace_boot_options_buf;
3733 char *option;
3734
3735 while (true) {
3736 option = strsep(&buf, ",");
3737
3738 if (!option)
3739 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003740
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05003741 if (*option)
3742 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003743
3744 /* Put back the comma to allow this to be called again */
3745 if (buf)
3746 *(buf - 1) = ',';
3747 }
3748}
3749
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003750static ssize_t
3751tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3752 size_t cnt, loff_t *ppos)
3753{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003754 struct seq_file *m = filp->private_data;
3755 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003756 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003757 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003758
3759 if (cnt >= sizeof(buf))
3760 return -EINVAL;
3761
3762 if (copy_from_user(&buf, ubuf, cnt))
3763 return -EFAULT;
3764
Steven Rostedta8dd2172013-01-09 20:54:17 -05003765 buf[cnt] = 0;
3766
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003767 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003768 if (ret < 0)
3769 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003770
Jiri Olsacf8517c2009-10-23 19:36:16 -04003771 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003772
3773 return cnt;
3774}
3775
Li Zefanfdb372e2009-12-08 11:15:59 +08003776static int tracing_trace_options_open(struct inode *inode, struct file *file)
3777{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003778 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003779 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003780
Li Zefanfdb372e2009-12-08 11:15:59 +08003781 if (tracing_disabled)
3782 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003783
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003784 if (trace_array_get(tr) < 0)
3785 return -ENODEV;
3786
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003787 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3788 if (ret < 0)
3789 trace_array_put(tr);
3790
3791 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003792}
3793
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003794static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003795 .open = tracing_trace_options_open,
3796 .read = seq_read,
3797 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003798 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003799 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003800};
3801
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003802static const char readme_msg[] =
3803 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003804 "# echo 0 > tracing_on : quick way to disable tracing\n"
3805 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3806 " Important files:\n"
3807 " trace\t\t\t- The static contents of the buffer\n"
3808 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3809 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3810 " current_tracer\t- function and latency tracers\n"
3811 " available_tracers\t- list of configured tracers for current_tracer\n"
3812 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3813 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3814 " trace_clock\t\t-change the clock used to order events\n"
3815 " local: Per cpu clock but may not be synced across CPUs\n"
3816 " global: Synced across CPUs but slows tracing down.\n"
3817 " counter: Not a clock, but just an increment\n"
3818 " uptime: Jiffy counter from time of boot\n"
3819 " perf: Same clock that perf events use\n"
3820#ifdef CONFIG_X86_64
3821 " x86-tsc: TSC cycle counter\n"
3822#endif
3823 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3824 " tracing_cpumask\t- Limit which CPUs to trace\n"
3825 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3826 "\t\t\t Remove sub-buffer with rmdir\n"
3827 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003828 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3829 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003830 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003831#ifdef CONFIG_DYNAMIC_FTRACE
3832 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003833 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3834 "\t\t\t functions\n"
3835 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3836 "\t modules: Can select a group via module\n"
3837 "\t Format: :mod:<module-name>\n"
3838 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3839 "\t triggers: a command to perform when function is hit\n"
3840 "\t Format: <function>:<trigger>[:count]\n"
3841 "\t trigger: traceon, traceoff\n"
3842 "\t\t enable_event:<system>:<event>\n"
3843 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003844#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003845 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003846#endif
3847#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003848 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003849#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003850 "\t\t dump\n"
3851 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003852 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3853 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3854 "\t The first one will disable tracing every time do_fault is hit\n"
3855 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3856 "\t The first time do trap is hit and it disables tracing, the\n"
3857 "\t counter will decrement to 2. If tracing is already disabled,\n"
3858 "\t the counter will not decrement. It only decrements when the\n"
3859 "\t trigger did work\n"
3860 "\t To remove trigger without count:\n"
3861 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3862 "\t To remove trigger with a count:\n"
3863 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003864 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003865 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3866 "\t modules: Can select a group via module command :mod:\n"
3867 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003868#endif /* CONFIG_DYNAMIC_FTRACE */
3869#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003870 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3871 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003872#endif
3873#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3874 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003875 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003876 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3877#endif
3878#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003879 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3880 "\t\t\t snapshot buffer. Read the contents for more\n"
3881 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003882#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003883#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003884 " stack_trace\t\t- Shows the max stack trace when active\n"
3885 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003886 "\t\t\t Write into this file to reset the max size (trigger a\n"
3887 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003888#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003889 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3890 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003891#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003892#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003893 " events/\t\t- Directory containing all trace event subsystems:\n"
3894 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3895 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003896 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3897 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003898 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003899 " events/<system>/<event>/\t- Directory containing control files for\n"
3900 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003901 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3902 " filter\t\t- If set, only events passing filter are traced\n"
3903 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003904 "\t Format: <trigger>[:count][if <filter>]\n"
3905 "\t trigger: traceon, traceoff\n"
3906 "\t enable_event:<system>:<event>\n"
3907 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003908#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003909 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003910#endif
3911#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003912 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003913#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003914 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3915 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3916 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3917 "\t events/block/block_unplug/trigger\n"
3918 "\t The first disables tracing every time block_unplug is hit.\n"
3919 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3920 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3921 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3922 "\t Like function triggers, the counter is only decremented if it\n"
3923 "\t enabled or disabled tracing.\n"
3924 "\t To remove a trigger without a count:\n"
3925 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3926 "\t To remove a trigger with a count:\n"
3927 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3928 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003929;
3930
3931static ssize_t
3932tracing_readme_read(struct file *filp, char __user *ubuf,
3933 size_t cnt, loff_t *ppos)
3934{
3935 return simple_read_from_buffer(ubuf, cnt, ppos,
3936 readme_msg, strlen(readme_msg));
3937}
3938
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003939static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003940 .open = tracing_open_generic,
3941 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003942 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003943};
3944
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003945static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003946{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003947 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003948
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003949 if (*pos || m->count)
3950 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003951
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003952 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003953
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003954 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3955 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003956 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003957 continue;
3958
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003959 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003960 }
3961
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003962 return NULL;
3963}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003964
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003965static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3966{
3967 void *v;
3968 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003969
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003970 preempt_disable();
3971 arch_spin_lock(&trace_cmdline_lock);
3972
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003973 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003974 while (l <= *pos) {
3975 v = saved_cmdlines_next(m, v, &l);
3976 if (!v)
3977 return NULL;
3978 }
3979
3980 return v;
3981}
3982
3983static void saved_cmdlines_stop(struct seq_file *m, void *v)
3984{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003985 arch_spin_unlock(&trace_cmdline_lock);
3986 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003987}
3988
3989static int saved_cmdlines_show(struct seq_file *m, void *v)
3990{
3991 char buf[TASK_COMM_LEN];
3992 unsigned int *pid = v;
3993
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003994 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003995 seq_printf(m, "%d %s\n", *pid, buf);
3996 return 0;
3997}
3998
3999static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4000 .start = saved_cmdlines_start,
4001 .next = saved_cmdlines_next,
4002 .stop = saved_cmdlines_stop,
4003 .show = saved_cmdlines_show,
4004};
4005
4006static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4007{
4008 if (tracing_disabled)
4009 return -ENODEV;
4010
4011 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04004012}
4013
4014static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09004015 .open = tracing_saved_cmdlines_open,
4016 .read = seq_read,
4017 .llseek = seq_lseek,
4018 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04004019};
4020
4021static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004022tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4023 size_t cnt, loff_t *ppos)
4024{
4025 char buf[64];
4026 int r;
Adrian Salidocd1566d2017-04-18 11:44:33 -07004027 unsigned int n;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004028
Adrian Salidocd1566d2017-04-18 11:44:33 -07004029 preempt_disable();
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004030 arch_spin_lock(&trace_cmdline_lock);
Adrian Salidocd1566d2017-04-18 11:44:33 -07004031 n = savedcmd->cmdline_num;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004032 arch_spin_unlock(&trace_cmdline_lock);
Adrian Salidocd1566d2017-04-18 11:44:33 -07004033 preempt_enable();
4034
4035 r = scnprintf(buf, sizeof(buf), "%u\n", n);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004036
4037 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4038}
4039
4040static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4041{
4042 kfree(s->saved_cmdlines);
4043 kfree(s->map_cmdline_to_pid);
Adrian Salidocd1566d2017-04-18 11:44:33 -07004044 kfree(s->map_cmdline_to_tgid);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004045 kfree(s);
4046}
4047
4048static int tracing_resize_saved_cmdlines(unsigned int val)
4049{
4050 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4051
Namhyung Kima6af8fb2014-06-10 16:11:35 +09004052 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004053 if (!s)
4054 return -ENOMEM;
4055
4056 if (allocate_cmdlines_buffer(val, s) < 0) {
4057 kfree(s);
4058 return -ENOMEM;
4059 }
4060
Adrian Salidocd1566d2017-04-18 11:44:33 -07004061 preempt_disable();
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004062 arch_spin_lock(&trace_cmdline_lock);
4063 savedcmd_temp = savedcmd;
4064 savedcmd = s;
4065 arch_spin_unlock(&trace_cmdline_lock);
Adrian Salidocd1566d2017-04-18 11:44:33 -07004066 preempt_enable();
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004067 free_saved_cmdlines_buffer(savedcmd_temp);
4068
4069 return 0;
4070}
4071
4072static ssize_t
4073tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4074 size_t cnt, loff_t *ppos)
4075{
4076 unsigned long val;
4077 int ret;
4078
4079 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4080 if (ret)
4081 return ret;
4082
4083 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4084 if (!val || val > PID_MAX_DEFAULT)
4085 return -EINVAL;
4086
4087 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4088 if (ret < 0)
4089 return ret;
4090
4091 *ppos += cnt;
4092
4093 return cnt;
4094}
4095
4096static const struct file_operations tracing_saved_cmdlines_size_fops = {
4097 .open = tracing_open_generic,
4098 .read = tracing_saved_cmdlines_size_read,
4099 .write = tracing_saved_cmdlines_size_write,
4100};
4101
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004102#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4103static union trace_enum_map_item *
4104update_enum_map(union trace_enum_map_item *ptr)
4105{
4106 if (!ptr->map.enum_string) {
4107 if (ptr->tail.next) {
4108 ptr = ptr->tail.next;
4109 /* Set ptr to the next real item (skip head) */
4110 ptr++;
4111 } else
4112 return NULL;
4113 }
4114 return ptr;
4115}
4116
4117static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4118{
4119 union trace_enum_map_item *ptr = v;
4120
4121 /*
4122 * Paranoid! If ptr points to end, we don't want to increment past it.
4123 * This really should never happen.
4124 */
4125 ptr = update_enum_map(ptr);
4126 if (WARN_ON_ONCE(!ptr))
4127 return NULL;
4128
4129 ptr++;
4130
4131 (*pos)++;
4132
4133 ptr = update_enum_map(ptr);
4134
4135 return ptr;
4136}
4137
4138static void *enum_map_start(struct seq_file *m, loff_t *pos)
4139{
4140 union trace_enum_map_item *v;
4141 loff_t l = 0;
4142
4143 mutex_lock(&trace_enum_mutex);
4144
4145 v = trace_enum_maps;
4146 if (v)
4147 v++;
4148
4149 while (v && l < *pos) {
4150 v = enum_map_next(m, v, &l);
4151 }
4152
4153 return v;
4154}
4155
4156static void enum_map_stop(struct seq_file *m, void *v)
4157{
4158 mutex_unlock(&trace_enum_mutex);
4159}
4160
4161static int enum_map_show(struct seq_file *m, void *v)
4162{
4163 union trace_enum_map_item *ptr = v;
4164
4165 seq_printf(m, "%s %ld (%s)\n",
4166 ptr->map.enum_string, ptr->map.enum_value,
4167 ptr->map.system);
4168
4169 return 0;
4170}
4171
4172static const struct seq_operations tracing_enum_map_seq_ops = {
4173 .start = enum_map_start,
4174 .next = enum_map_next,
4175 .stop = enum_map_stop,
4176 .show = enum_map_show,
4177};
4178
4179static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4180{
4181 if (tracing_disabled)
4182 return -ENODEV;
4183
4184 return seq_open(filp, &tracing_enum_map_seq_ops);
4185}
4186
4187static const struct file_operations tracing_enum_map_fops = {
4188 .open = tracing_enum_map_open,
4189 .read = seq_read,
4190 .llseek = seq_lseek,
4191 .release = seq_release,
4192};
4193
4194static inline union trace_enum_map_item *
4195trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4196{
4197 /* Return tail of array given the head */
4198 return ptr + ptr->head.length + 1;
4199}
4200
4201static void
4202trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4203 int len)
4204{
4205 struct trace_enum_map **stop;
4206 struct trace_enum_map **map;
4207 union trace_enum_map_item *map_array;
4208 union trace_enum_map_item *ptr;
4209
4210 stop = start + len;
4211
4212 /*
4213 * The trace_enum_maps contains the map plus a head and tail item,
4214 * where the head holds the module and length of array, and the
4215 * tail holds a pointer to the next list.
4216 */
4217 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4218 if (!map_array) {
4219 pr_warning("Unable to allocate trace enum mapping\n");
4220 return;
4221 }
4222
4223 mutex_lock(&trace_enum_mutex);
4224
4225 if (!trace_enum_maps)
4226 trace_enum_maps = map_array;
4227 else {
4228 ptr = trace_enum_maps;
4229 for (;;) {
4230 ptr = trace_enum_jmp_to_tail(ptr);
4231 if (!ptr->tail.next)
4232 break;
4233 ptr = ptr->tail.next;
4234
4235 }
4236 ptr->tail.next = map_array;
4237 }
4238 map_array->head.mod = mod;
4239 map_array->head.length = len;
4240 map_array++;
4241
4242 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4243 map_array->map = **map;
4244 map_array++;
4245 }
4246 memset(map_array, 0, sizeof(*map_array));
4247
4248 mutex_unlock(&trace_enum_mutex);
4249}
4250
4251static void trace_create_enum_file(struct dentry *d_tracer)
4252{
4253 trace_create_file("enum_map", 0444, d_tracer,
4254 NULL, &tracing_enum_map_fops);
4255}
4256
4257#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4258static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4259static inline void trace_insert_enum_map_file(struct module *mod,
4260 struct trace_enum_map **start, int len) { }
4261#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4262
4263static void trace_insert_enum_map(struct module *mod,
4264 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004265{
4266 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004267
4268 if (len <= 0)
4269 return;
4270
4271 map = start;
4272
4273 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004274
4275 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004276}
4277
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004278static ssize_t
Jamie Gennis6019e592012-11-21 15:04:25 -08004279tracing_saved_tgids_read(struct file *file, char __user *ubuf,
4280 size_t cnt, loff_t *ppos)
4281{
4282 char *file_buf;
4283 char *buf;
4284 int len = 0;
Jamie Gennis6019e592012-11-21 15:04:25 -08004285 int i;
Adrian Salidocd1566d2017-04-18 11:44:33 -07004286 int *pids;
4287 int n = 0;
Jamie Gennis6019e592012-11-21 15:04:25 -08004288
Adrian Salidocd1566d2017-04-18 11:44:33 -07004289 preempt_disable();
4290 arch_spin_lock(&trace_cmdline_lock);
4291
4292 pids = kmalloc_array(savedcmd->cmdline_num, 2*sizeof(int), GFP_KERNEL);
4293 if (!pids) {
4294 arch_spin_unlock(&trace_cmdline_lock);
4295 preempt_enable();
Jamie Gennis6019e592012-11-21 15:04:25 -08004296 return -ENOMEM;
Adrian Salidocd1566d2017-04-18 11:44:33 -07004297 }
Jamie Gennis6019e592012-11-21 15:04:25 -08004298
Adrian Salidocd1566d2017-04-18 11:44:33 -07004299 for (i = 0; i < savedcmd->cmdline_num; i++) {
4300 int pid;
Jamie Gennis6019e592012-11-21 15:04:25 -08004301
Dmitry Shmidt99dd30a2015-10-28 10:45:04 -07004302 pid = savedcmd->map_cmdline_to_pid[i];
Jamie Gennis6019e592012-11-21 15:04:25 -08004303 if (pid == -1 || pid == NO_CMDLINE_MAP)
4304 continue;
4305
Adrian Salidocd1566d2017-04-18 11:44:33 -07004306 pids[n] = pid;
4307 pids[n+1] = __find_tgid_locked(pid);
4308 n += 2;
4309 }
4310 arch_spin_unlock(&trace_cmdline_lock);
4311 preempt_enable();
4312
4313 if (n == 0) {
4314 kfree(pids);
4315 return 0;
4316 }
4317
4318 /* enough to hold max pair of pids + space, lr and nul */
4319 len = n * 12;
4320 file_buf = kmalloc(len, GFP_KERNEL);
4321 if (!file_buf) {
4322 kfree(pids);
4323 return -ENOMEM;
4324 }
4325
4326 buf = file_buf;
4327 for (i = 0; i < n && len > 0; i += 2) {
4328 int r;
4329
4330 r = snprintf(buf, len, "%d %d\n", pids[i], pids[i+1]);
Jamie Gennis6019e592012-11-21 15:04:25 -08004331 buf += r;
Adrian Salidocd1566d2017-04-18 11:44:33 -07004332 len -= r;
Jamie Gennis6019e592012-11-21 15:04:25 -08004333 }
4334
4335 len = simple_read_from_buffer(ubuf, cnt, ppos,
Adrian Salidocd1566d2017-04-18 11:44:33 -07004336 file_buf, buf - file_buf);
Jamie Gennis6019e592012-11-21 15:04:25 -08004337
4338 kfree(file_buf);
Adrian Salidocd1566d2017-04-18 11:44:33 -07004339 kfree(pids);
Jamie Gennis6019e592012-11-21 15:04:25 -08004340
4341 return len;
4342}
4343
4344static const struct file_operations tracing_saved_tgids_fops = {
4345 .open = tracing_open_generic,
4346 .read = tracing_saved_tgids_read,
4347 .llseek = generic_file_llseek,
4348};
4349
4350static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004351tracing_set_trace_read(struct file *filp, char __user *ubuf,
4352 size_t cnt, loff_t *ppos)
4353{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004354 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004355 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004356 int r;
4357
4358 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004359 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004360 mutex_unlock(&trace_types_lock);
4361
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004362 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004363}
4364
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004365int tracer_init(struct tracer *t, struct trace_array *tr)
4366{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004367 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004368 return t->init(tr);
4369}
4370
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004371static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004372{
4373 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004374
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004375 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004376 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004377}
4378
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004379#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004380/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004381static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4382 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004383{
4384 int cpu, ret = 0;
4385
4386 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4387 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004388 ret = ring_buffer_resize(trace_buf->buffer,
4389 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004390 if (ret < 0)
4391 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004392 per_cpu_ptr(trace_buf->data, cpu)->entries =
4393 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004394 }
4395 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004396 ret = ring_buffer_resize(trace_buf->buffer,
4397 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004398 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004399 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4400 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004401 }
4402
4403 return ret;
4404}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004405#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004406
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004407static int __tracing_resize_ring_buffer(struct trace_array *tr,
4408 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004409{
4410 int ret;
4411
4412 /*
4413 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004414 * we use the size that was given, and we can forget about
4415 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004416 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004417 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004418
Steven Rostedtb382ede62012-10-10 21:44:34 -04004419 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004420 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004421 return 0;
4422
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004423 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004424 if (ret < 0)
4425 return ret;
4426
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004427#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004428 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4429 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004430 goto out;
4431
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004432 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004433 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004434 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4435 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004436 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004437 /*
4438 * AARGH! We are left with different
4439 * size max buffer!!!!
4440 * The max buffer is our "snapshot" buffer.
4441 * When a tracer needs a snapshot (one of the
4442 * latency tracers), it swaps the max buffer
4443 * with the saved snap shot. We succeeded to
4444 * update the size of the main buffer, but failed to
4445 * update the size of the max buffer. But when we tried
4446 * to reset the main buffer to the original size, we
4447 * failed there too. This is very unlikely to
4448 * happen, but if it does, warn and kill all
4449 * tracing.
4450 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004451 WARN_ON(1);
4452 tracing_disabled = 1;
4453 }
4454 return ret;
4455 }
4456
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004457 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004458 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004459 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004460 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004461
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004462 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004463#endif /* CONFIG_TRACER_MAX_TRACE */
4464
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004465 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004466 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004467 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004468 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004469
4470 return ret;
4471}
4472
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004473static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4474 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004475{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004476 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004477
4478 mutex_lock(&trace_types_lock);
4479
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004480 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4481 /* make sure, this cpu is enabled in the mask */
4482 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4483 ret = -EINVAL;
4484 goto out;
4485 }
4486 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004487
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004488 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004489 if (ret < 0)
4490 ret = -ENOMEM;
4491
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004492out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004493 mutex_unlock(&trace_types_lock);
4494
4495 return ret;
4496}
4497
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004498
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004499/**
4500 * tracing_update_buffers - used by tracing facility to expand ring buffers
4501 *
4502 * To save on memory when the tracing is never used on a system with it
4503 * configured in. The ring buffers are set to a minimum size. But once
4504 * a user starts to use the tracing facility, then they need to grow
4505 * to their default size.
4506 *
4507 * This function is to be called when a tracer is about to be used.
4508 */
4509int tracing_update_buffers(void)
4510{
4511 int ret = 0;
4512
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004513 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004514 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004515 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004516 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004517 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004518
4519 return ret;
4520}
4521
Steven Rostedt577b7852009-02-26 23:43:05 -05004522struct trace_option_dentry;
4523
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004524static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004525create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004526
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004527/*
4528 * Used to clear out the tracer before deletion of an instance.
4529 * Must have trace_types_lock held.
4530 */
4531static void tracing_set_nop(struct trace_array *tr)
4532{
4533 if (tr->current_trace == &nop_trace)
4534 return;
4535
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004536 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004537
4538 if (tr->current_trace->reset)
4539 tr->current_trace->reset(tr);
4540
4541 tr->current_trace = &nop_trace;
4542}
4543
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04004544static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004545{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004546 /* Only enable if the directory has been created already. */
4547 if (!tr->dir)
4548 return;
4549
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004550 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004551}
4552
4553static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4554{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004555 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004556#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004557 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004558#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004559 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004560
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004561 mutex_lock(&trace_types_lock);
4562
Steven Rostedt73c51622009-03-11 13:42:01 -04004563 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004564 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004565 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004566 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004567 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004568 ret = 0;
4569 }
4570
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004571 for (t = trace_types; t; t = t->next) {
4572 if (strcmp(t->name, buf) == 0)
4573 break;
4574 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004575 if (!t) {
4576 ret = -EINVAL;
4577 goto out;
4578 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004579 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004580 goto out;
4581
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004582 /* Some tracers are only allowed for the top level buffer */
4583 if (!trace_ok_for_array(t, tr)) {
4584 ret = -EINVAL;
4585 goto out;
4586 }
4587
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004588 /* If trace pipe files are being read, we can't change the tracer */
4589 if (tr->current_trace->ref) {
4590 ret = -EBUSY;
4591 goto out;
4592 }
4593
Steven Rostedt9f029e82008-11-12 15:24:24 -05004594 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004595
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004596 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004597
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004598 if (tr->current_trace->reset)
4599 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004600
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004601 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004602 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004603
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004604#ifdef CONFIG_TRACER_MAX_TRACE
4605 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004606
4607 if (had_max_tr && !t->use_max_tr) {
4608 /*
4609 * We need to make sure that the update_max_tr sees that
4610 * current_trace changed to nop_trace to keep it from
4611 * swapping the buffers after we resize it.
4612 * The update_max_tr is called from interrupts disabled
4613 * so a synchronized_sched() is sufficient.
4614 */
4615 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004616 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004617 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004618#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004619
4620#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004621 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004622 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004623 if (ret < 0)
4624 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004625 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004626#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004627
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004628 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004629 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004630 if (ret)
4631 goto out;
4632 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004633
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004634 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004635 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004636 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004637 out:
4638 mutex_unlock(&trace_types_lock);
4639
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004640 return ret;
4641}
4642
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004643static ssize_t
4644tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4645 size_t cnt, loff_t *ppos)
4646{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004647 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004648 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004649 int i;
4650 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004651 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004652
Steven Rostedt60063a62008-10-28 10:44:24 -04004653 ret = cnt;
4654
Li Zefanee6c2c12009-09-18 14:06:47 +08004655 if (cnt > MAX_TRACER_SIZE)
4656 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004657
4658 if (copy_from_user(&buf, ubuf, cnt))
4659 return -EFAULT;
4660
4661 buf[cnt] = 0;
4662
4663 /* strip ending whitespace. */
4664 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4665 buf[i] = 0;
4666
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004667 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004668 if (err)
4669 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004670
Jiri Olsacf8517c2009-10-23 19:36:16 -04004671 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004672
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004673 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004674}
4675
4676static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004677tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4678 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004679{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004680 char buf[64];
4681 int r;
4682
Steven Rostedtcffae432008-05-12 21:21:00 +02004683 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004684 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004685 if (r > sizeof(buf))
4686 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004687 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004688}
4689
4690static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004691tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4692 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004693{
Hannes Eder5e398412009-02-10 19:44:34 +01004694 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004695 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004696
Peter Huewe22fe9b52011-06-07 21:58:27 +02004697 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4698 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004699 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004700
4701 *ptr = val * 1000;
4702
4703 return cnt;
4704}
4705
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004706static ssize_t
4707tracing_thresh_read(struct file *filp, char __user *ubuf,
4708 size_t cnt, loff_t *ppos)
4709{
4710 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4711}
4712
4713static ssize_t
4714tracing_thresh_write(struct file *filp, const char __user *ubuf,
4715 size_t cnt, loff_t *ppos)
4716{
4717 struct trace_array *tr = filp->private_data;
4718 int ret;
4719
4720 mutex_lock(&trace_types_lock);
4721 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4722 if (ret < 0)
4723 goto out;
4724
4725 if (tr->current_trace->update_thresh) {
4726 ret = tr->current_trace->update_thresh(tr);
4727 if (ret < 0)
4728 goto out;
4729 }
4730
4731 ret = cnt;
4732out:
4733 mutex_unlock(&trace_types_lock);
4734
4735 return ret;
4736}
4737
Chen Gange428abb2015-11-10 05:15:15 +08004738#ifdef CONFIG_TRACER_MAX_TRACE
4739
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004740static ssize_t
4741tracing_max_lat_read(struct file *filp, char __user *ubuf,
4742 size_t cnt, loff_t *ppos)
4743{
4744 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4745}
4746
4747static ssize_t
4748tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4749 size_t cnt, loff_t *ppos)
4750{
4751 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4752}
4753
Chen Gange428abb2015-11-10 05:15:15 +08004754#endif
4755
Steven Rostedtb3806b42008-05-12 21:20:46 +02004756static int tracing_open_pipe(struct inode *inode, struct file *filp)
4757{
Oleg Nesterov15544202013-07-23 17:25:57 +02004758 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004759 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004760 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004761
4762 if (tracing_disabled)
4763 return -ENODEV;
4764
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004765 if (trace_array_get(tr) < 0)
4766 return -ENODEV;
4767
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004768 mutex_lock(&trace_types_lock);
4769
Steven Rostedtb3806b42008-05-12 21:20:46 +02004770 /* create a buffer to store the information to pass to userspace */
4771 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004772 if (!iter) {
4773 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004774 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004775 goto out;
4776 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004777
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004778 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004779 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004780
4781 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4782 ret = -ENOMEM;
4783 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304784 }
4785
Steven Rostedta3097202008-11-07 22:36:02 -05004786 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304787 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004788
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004789 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04004790 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4791
David Sharp8be07092012-11-13 12:18:22 -08004792 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004793 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004794 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4795
Oleg Nesterov15544202013-07-23 17:25:57 +02004796 iter->tr = tr;
4797 iter->trace_buffer = &tr->trace_buffer;
4798 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004799 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004800 filp->private_data = iter;
4801
Steven Rostedt107bad82008-05-12 21:21:01 +02004802 if (iter->trace->pipe_open)
4803 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004804
Arnd Bergmannb4447862010-07-07 23:40:11 +02004805 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004806
4807 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004808out:
4809 mutex_unlock(&trace_types_lock);
4810 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004811
4812fail:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004813 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004814 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004815 mutex_unlock(&trace_types_lock);
4816 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004817}
4818
4819static int tracing_release_pipe(struct inode *inode, struct file *file)
4820{
4821 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004822 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004823
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004824 mutex_lock(&trace_types_lock);
4825
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004826 tr->current_trace->ref--;
4827
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004828 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004829 iter->trace->pipe_close(iter);
4830
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004831 mutex_unlock(&trace_types_lock);
4832
Rusty Russell44623442009-01-01 10:12:23 +10304833 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004834 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004835 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004836
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004837 trace_array_put(tr);
4838
Steven Rostedtb3806b42008-05-12 21:20:46 +02004839 return 0;
4840}
4841
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004842static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004843trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004844{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004845 struct trace_array *tr = iter->tr;
4846
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004847 /* Iterators are static, they should be filled or empty */
4848 if (trace_buffer_iter(iter, iter->cpu_file))
4849 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004850
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004851 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004852 /*
4853 * Always select as readable when in blocking mode
4854 */
4855 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004856 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004857 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004858 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004859}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004860
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004861static unsigned int
4862tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4863{
4864 struct trace_iterator *iter = filp->private_data;
4865
4866 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004867}
4868
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004869/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004870static int tracing_wait_pipe(struct file *filp)
4871{
4872 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004873 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004874
4875 while (trace_empty(iter)) {
4876
4877 if ((filp->f_flags & O_NONBLOCK)) {
4878 return -EAGAIN;
4879 }
4880
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004881 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004882 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004883 * We still block if tracing is disabled, but we have never
4884 * read anything. This allows a user to cat this file, and
4885 * then enable tracing. But after we have read something,
4886 * we give an EOF when tracing is again disabled.
4887 *
4888 * iter->pos will be 0 if we haven't read anything.
4889 */
Tahsin Erdogan9c5afa72017-09-17 03:23:48 -07004890 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004891 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004892
4893 mutex_unlock(&iter->mutex);
4894
Rabin Vincente30f53a2014-11-10 19:46:34 +01004895 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004896
4897 mutex_lock(&iter->mutex);
4898
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004899 if (ret)
4900 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004901 }
4902
4903 return 1;
4904}
4905
Steven Rostedtb3806b42008-05-12 21:20:46 +02004906/*
4907 * Consumer reader.
4908 */
4909static ssize_t
4910tracing_read_pipe(struct file *filp, char __user *ubuf,
4911 size_t cnt, loff_t *ppos)
4912{
4913 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004914 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004915
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004916 /*
4917 * Avoid more than one consumer on a single file descriptor
4918 * This is just a matter of traces coherency, the ring buffer itself
4919 * is protected.
4920 */
4921 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)2ef33702016-09-23 22:57:13 -04004922
4923 /* return any leftover data */
4924 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4925 if (sret != -EBUSY)
4926 goto out;
4927
4928 trace_seq_init(&iter->seq);
4929
Steven Rostedt107bad82008-05-12 21:21:01 +02004930 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004931 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4932 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004933 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004934 }
4935
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004936waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004937 sret = tracing_wait_pipe(filp);
4938 if (sret <= 0)
4939 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004940
4941 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004942 if (trace_empty(iter)) {
4943 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004944 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004945 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004946
4947 if (cnt >= PAGE_SIZE)
4948 cnt = PAGE_SIZE - 1;
4949
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004950 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004951 memset(&iter->seq, 0,
4952 sizeof(struct trace_iterator) -
4953 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004954 cpumask_clear(iter->started);
Petr Mladekb36612b2019-10-11 16:21:34 +02004955 trace_seq_init(&iter->seq);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004956 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004957
Lai Jiangshan4f535962009-05-18 19:35:34 +08004958 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004959 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004960 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004961 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004962 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004963
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004964 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004965 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004966 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004967 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004968 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004969 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004970 if (ret != TRACE_TYPE_NO_CONSUME)
4971 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004972
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004973 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004974 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004975
4976 /*
4977 * Setting the full flag means we reached the trace_seq buffer
4978 * size and we should leave by partial output condition above.
4979 * One of the trace_seq_* functions is not used properly.
4980 */
4981 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4982 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004983 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004984 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004985 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004986
Steven Rostedtb3806b42008-05-12 21:20:46 +02004987 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004988 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004989 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004990 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004991
4992 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004993 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004994 * entries, go back to wait for more entries.
4995 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004996 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004997 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004998
Steven Rostedt107bad82008-05-12 21:21:01 +02004999out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005000 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02005001
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02005002 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02005003}
5004
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005005static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5006 unsigned int idx)
5007{
5008 __free_page(spd->pages[idx]);
5009}
5010
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005011static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005012 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005013 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05005014 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005015 .steal = generic_pipe_buf_steal,
5016 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005017};
5018
Steven Rostedt34cd4992009-02-09 12:06:29 -05005019static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005020tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005021{
5022 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005023 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005024 int ret;
5025
5026 /* Seq buffer is page-sized, exactly what we need. */
5027 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005028 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005029 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005030
5031 if (trace_seq_has_overflowed(&iter->seq)) {
5032 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005033 break;
5034 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005035
5036 /*
5037 * This should not be hit, because it should only
5038 * be set if the iter->seq overflowed. But check it
5039 * anyway to be safe.
5040 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05005041 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005042 iter->seq.seq.len = save_len;
5043 break;
5044 }
5045
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005046 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05005047 if (rem < count) {
5048 rem = 0;
5049 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005050 break;
5051 }
5052
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08005053 if (ret != TRACE_TYPE_NO_CONSUME)
5054 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05005055 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05005056 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05005057 rem = 0;
5058 iter->ent = NULL;
5059 break;
5060 }
5061 }
5062
5063 return rem;
5064}
5065
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005066static ssize_t tracing_splice_read_pipe(struct file *filp,
5067 loff_t *ppos,
5068 struct pipe_inode_info *pipe,
5069 size_t len,
5070 unsigned int flags)
5071{
Jens Axboe35f3d142010-05-20 10:43:18 +02005072 struct page *pages_def[PIPE_DEF_BUFFERS];
5073 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005074 struct trace_iterator *iter = filp->private_data;
5075 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005076 .pages = pages_def,
5077 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005078 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02005079 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05005080 .flags = flags,
5081 .ops = &tracing_pipe_buf_ops,
5082 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005083 };
5084 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005085 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005086 unsigned int i;
5087
Jens Axboe35f3d142010-05-20 10:43:18 +02005088 if (splice_grow_spd(pipe, &spd))
5089 return -ENOMEM;
5090
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005091 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005092
5093 if (iter->trace->splice_read) {
5094 ret = iter->trace->splice_read(iter, filp,
5095 ppos, pipe, len, flags);
5096 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005097 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005098 }
5099
5100 ret = tracing_wait_pipe(filp);
5101 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005102 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005103
Jason Wessel955b61e2010-08-05 09:22:23 -05005104 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005105 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005106 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005107 }
5108
Lai Jiangshan4f535962009-05-18 19:35:34 +08005109 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005110 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005111
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005112 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005113 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005114 spd.pages[i] = alloc_page(GFP_KERNEL);
5115 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005116 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005117
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005118 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005119
5120 /* Copy the data into the page, so we can start over. */
5121 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005122 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005123 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005124 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005125 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005126 break;
5127 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005128 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005129 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005130
Steven Rostedtf9520752009-03-02 14:04:40 -05005131 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005132 }
5133
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005134 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005135 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005136 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005137
5138 spd.nr_pages = i;
5139
Steven Rostedt (Red Hat)aab3ba82016-03-18 15:46:48 -04005140 if (i)
5141 ret = splice_to_pipe(pipe, &spd);
5142 else
5143 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005144out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005145 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005146 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005147
Steven Rostedt34cd4992009-02-09 12:06:29 -05005148out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005149 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005150 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005151}
5152
Steven Rostedta98a3c32008-05-12 21:20:59 +02005153static ssize_t
5154tracing_entries_read(struct file *filp, char __user *ubuf,
5155 size_t cnt, loff_t *ppos)
5156{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005157 struct inode *inode = file_inode(filp);
5158 struct trace_array *tr = inode->i_private;
5159 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005160 char buf[64];
5161 int r = 0;
5162 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005163
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005164 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005165
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005166 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005167 int cpu, buf_size_same;
5168 unsigned long size;
5169
5170 size = 0;
5171 buf_size_same = 1;
5172 /* check if all cpu sizes are same */
5173 for_each_tracing_cpu(cpu) {
5174 /* fill in the size from first enabled cpu */
5175 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005176 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5177 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005178 buf_size_same = 0;
5179 break;
5180 }
5181 }
5182
5183 if (buf_size_same) {
5184 if (!ring_buffer_expanded)
5185 r = sprintf(buf, "%lu (expanded: %lu)\n",
5186 size >> 10,
5187 trace_buf_size >> 10);
5188 else
5189 r = sprintf(buf, "%lu\n", size >> 10);
5190 } else
5191 r = sprintf(buf, "X\n");
5192 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005193 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005194
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005195 mutex_unlock(&trace_types_lock);
5196
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005197 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5198 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005199}
5200
5201static ssize_t
5202tracing_entries_write(struct file *filp, const char __user *ubuf,
5203 size_t cnt, loff_t *ppos)
5204{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005205 struct inode *inode = file_inode(filp);
5206 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005207 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005208 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005209
Peter Huewe22fe9b52011-06-07 21:58:27 +02005210 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5211 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005212 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005213
5214 /* must have at least 1 entry */
5215 if (!val)
5216 return -EINVAL;
5217
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005218 /* value is in KB */
5219 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005220 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005221 if (ret < 0)
5222 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005223
Jiri Olsacf8517c2009-10-23 19:36:16 -04005224 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005225
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005226 return cnt;
5227}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005228
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005229static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005230tracing_total_entries_read(struct file *filp, char __user *ubuf,
5231 size_t cnt, loff_t *ppos)
5232{
5233 struct trace_array *tr = filp->private_data;
5234 char buf[64];
5235 int r, cpu;
5236 unsigned long size = 0, expanded_size = 0;
5237
5238 mutex_lock(&trace_types_lock);
5239 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005240 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005241 if (!ring_buffer_expanded)
5242 expanded_size += trace_buf_size >> 10;
5243 }
5244 if (ring_buffer_expanded)
5245 r = sprintf(buf, "%lu\n", size);
5246 else
5247 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5248 mutex_unlock(&trace_types_lock);
5249
5250 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5251}
5252
5253static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005254tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5255 size_t cnt, loff_t *ppos)
5256{
5257 /*
5258 * There is no need to read what the user has written, this function
5259 * is just to make sure that there is no error when "echo" is used
5260 */
5261
5262 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005263
5264 return cnt;
5265}
5266
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005267static int
5268tracing_free_buffer_release(struct inode *inode, struct file *filp)
5269{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005270 struct trace_array *tr = inode->i_private;
5271
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005272 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005273 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005274 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005275 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005276 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005277
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005278 trace_array_put(tr);
5279
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005280 return 0;
5281}
5282
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005283static ssize_t
5284tracing_mark_write(struct file *filp, const char __user *ubuf,
5285 size_t cnt, loff_t *fpos)
5286{
Steven Rostedtd696b582011-09-22 11:50:27 -04005287 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005288 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005289 struct ring_buffer_event *event;
5290 struct ring_buffer *buffer;
5291 struct print_entry *entry;
5292 unsigned long irq_flags;
5293 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005294 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005295 int nr_pages = 1;
5296 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005297 int offset;
5298 int size;
5299 int len;
5300 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005301 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005302
Steven Rostedtc76f0692008-11-07 22:36:02 -05005303 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005304 return -EINVAL;
5305
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005306 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005307 return -EINVAL;
5308
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005309 if (cnt > TRACE_BUF_SIZE)
5310 cnt = TRACE_BUF_SIZE;
5311
Steven Rostedtd696b582011-09-22 11:50:27 -04005312 /*
5313 * Userspace is injecting traces into the kernel trace buffer.
5314 * We want to be as non intrusive as possible.
5315 * To do so, we do not want to allocate any special buffers
5316 * or take any locks, but instead write the userspace data
5317 * straight into the ring buffer.
5318 *
5319 * First we need to pin the userspace buffer into memory,
5320 * which, most likely it is, because it just referenced it.
5321 * But there's no guarantee that it is. By using get_user_pages_fast()
5322 * and kmap_atomic/kunmap_atomic() we can get access to the
5323 * pages directly. We then write the data directly into the
5324 * ring buffer.
5325 */
5326 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005327
Steven Rostedtd696b582011-09-22 11:50:27 -04005328 /* check if we cross pages */
5329 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5330 nr_pages = 2;
5331
5332 offset = addr & (PAGE_SIZE - 1);
5333 addr &= PAGE_MASK;
5334
5335 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5336 if (ret < nr_pages) {
5337 while (--ret >= 0)
5338 put_page(pages[ret]);
5339 written = -EFAULT;
5340 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005341 }
5342
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005343 for (i = 0; i < nr_pages; i++)
5344 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005345
5346 local_save_flags(irq_flags);
5347 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005348 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005349 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5350 irq_flags, preempt_count());
5351 if (!event) {
5352 /* Ring buffer disabled, return as if not open for write */
5353 written = -EBADF;
5354 goto out_unlock;
5355 }
5356
5357 entry = ring_buffer_event_data(event);
5358 entry->ip = _THIS_IP_;
5359
5360 if (nr_pages == 2) {
5361 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005362 memcpy(&entry->buf, map_page[0] + offset, len);
5363 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005364 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005365 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005366
5367 if (entry->buf[cnt - 1] != '\n') {
5368 entry->buf[cnt] = '\n';
5369 entry->buf[cnt + 1] = '\0';
5370 } else
5371 entry->buf[cnt] = '\0';
5372
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005373 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005374
5375 written = cnt;
5376
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005377 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005378
Steven Rostedtd696b582011-09-22 11:50:27 -04005379 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005380 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005381 kunmap_atomic(map_page[i]);
5382 put_page(pages[i]);
5383 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005384 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005385 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005386}
5387
Li Zefan13f16d22009-12-08 11:16:11 +08005388static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005389{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005390 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005391 int i;
5392
5393 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005394 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005395 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005396 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5397 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005398 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005399
Li Zefan13f16d22009-12-08 11:16:11 +08005400 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005401}
5402
Steven Rostedte1e232c2014-02-10 23:38:46 -05005403static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005404{
Zhaolei5079f322009-08-25 16:12:56 +08005405 int i;
5406
Zhaolei5079f322009-08-25 16:12:56 +08005407 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5408 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5409 break;
5410 }
5411 if (i == ARRAY_SIZE(trace_clocks))
5412 return -EINVAL;
5413
Zhaolei5079f322009-08-25 16:12:56 +08005414 mutex_lock(&trace_types_lock);
5415
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005416 tr->clock_id = i;
5417
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005418 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005419
David Sharp60303ed2012-10-11 16:27:52 -07005420 /*
5421 * New clock may not be consistent with the previous clock.
5422 * Reset the buffer so that it doesn't have incomparable timestamps.
5423 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005424 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005425
5426#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liud28e96b2017-09-05 16:57:19 -05005427 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005428 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005429 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005430#endif
David Sharp60303ed2012-10-11 16:27:52 -07005431
Zhaolei5079f322009-08-25 16:12:56 +08005432 mutex_unlock(&trace_types_lock);
5433
Steven Rostedte1e232c2014-02-10 23:38:46 -05005434 return 0;
5435}
5436
5437static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5438 size_t cnt, loff_t *fpos)
5439{
5440 struct seq_file *m = filp->private_data;
5441 struct trace_array *tr = m->private;
5442 char buf[64];
5443 const char *clockstr;
5444 int ret;
5445
5446 if (cnt >= sizeof(buf))
5447 return -EINVAL;
5448
5449 if (copy_from_user(&buf, ubuf, cnt))
5450 return -EFAULT;
5451
5452 buf[cnt] = 0;
5453
5454 clockstr = strstrip(buf);
5455
5456 ret = tracing_set_clock(tr, clockstr);
5457 if (ret)
5458 return ret;
5459
Zhaolei5079f322009-08-25 16:12:56 +08005460 *fpos += cnt;
5461
5462 return cnt;
5463}
5464
Li Zefan13f16d22009-12-08 11:16:11 +08005465static int tracing_clock_open(struct inode *inode, struct file *file)
5466{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005467 struct trace_array *tr = inode->i_private;
5468 int ret;
5469
Li Zefan13f16d22009-12-08 11:16:11 +08005470 if (tracing_disabled)
5471 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005472
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005473 if (trace_array_get(tr))
5474 return -ENODEV;
5475
5476 ret = single_open(file, tracing_clock_show, inode->i_private);
5477 if (ret < 0)
5478 trace_array_put(tr);
5479
5480 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005481}
5482
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005483struct ftrace_buffer_info {
5484 struct trace_iterator iter;
5485 void *spare;
5486 unsigned int read;
5487};
5488
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005489#ifdef CONFIG_TRACER_SNAPSHOT
5490static int tracing_snapshot_open(struct inode *inode, struct file *file)
5491{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005492 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005493 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005494 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005495 int ret = 0;
5496
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005497 if (trace_array_get(tr) < 0)
5498 return -ENODEV;
5499
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005500 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005501 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005502 if (IS_ERR(iter))
5503 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005504 } else {
5505 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005506 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005507 m = kzalloc(sizeof(*m), GFP_KERNEL);
5508 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005509 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005510 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5511 if (!iter) {
5512 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005513 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005514 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005515 ret = 0;
5516
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005517 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005518 iter->trace_buffer = &tr->max_buffer;
5519 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005520 m->private = iter;
5521 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005522 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005523out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005524 if (ret < 0)
5525 trace_array_put(tr);
5526
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005527 return ret;
5528}
5529
5530static ssize_t
5531tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5532 loff_t *ppos)
5533{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005534 struct seq_file *m = filp->private_data;
5535 struct trace_iterator *iter = m->private;
5536 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005537 unsigned long val;
5538 int ret;
5539
5540 ret = tracing_update_buffers();
5541 if (ret < 0)
5542 return ret;
5543
5544 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5545 if (ret)
5546 return ret;
5547
5548 mutex_lock(&trace_types_lock);
5549
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005550 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005551 ret = -EBUSY;
5552 goto out;
5553 }
5554
5555 switch (val) {
5556 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005557 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5558 ret = -EINVAL;
5559 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005560 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005561 if (tr->allocated_snapshot)
5562 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005563 break;
5564 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005565/* Only allow per-cpu swap if the ring buffer supports it */
5566#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5567 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5568 ret = -EINVAL;
5569 break;
5570 }
5571#endif
Eiichi Tsukata95067cb2019-06-25 10:29:10 +09005572 if (!tr->allocated_snapshot)
5573 ret = resize_buffer_duplicate_size(&tr->max_buffer,
5574 &tr->trace_buffer, iter->cpu_file);
5575 else
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005576 ret = alloc_snapshot(tr);
Eiichi Tsukata95067cb2019-06-25 10:29:10 +09005577
5578 if (ret < 0)
5579 break;
5580
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005581 local_irq_disable();
5582 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005583 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005584 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005585 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005586 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005587 local_irq_enable();
5588 break;
5589 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005590 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005591 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5592 tracing_reset_online_cpus(&tr->max_buffer);
5593 else
5594 tracing_reset(&tr->max_buffer, iter->cpu_file);
5595 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005596 break;
5597 }
5598
5599 if (ret >= 0) {
5600 *ppos += cnt;
5601 ret = cnt;
5602 }
5603out:
5604 mutex_unlock(&trace_types_lock);
5605 return ret;
5606}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005607
5608static int tracing_snapshot_release(struct inode *inode, struct file *file)
5609{
5610 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005611 int ret;
5612
5613 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005614
5615 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005616 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005617
5618 /* If write only, the seq_file is just a stub */
5619 if (m)
5620 kfree(m->private);
5621 kfree(m);
5622
5623 return 0;
5624}
5625
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005626static int tracing_buffers_open(struct inode *inode, struct file *filp);
5627static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5628 size_t count, loff_t *ppos);
5629static int tracing_buffers_release(struct inode *inode, struct file *file);
5630static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5631 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5632
5633static int snapshot_raw_open(struct inode *inode, struct file *filp)
5634{
5635 struct ftrace_buffer_info *info;
5636 int ret;
5637
5638 ret = tracing_buffers_open(inode, filp);
5639 if (ret < 0)
5640 return ret;
5641
5642 info = filp->private_data;
5643
5644 if (info->iter.trace->use_max_tr) {
5645 tracing_buffers_release(inode, filp);
5646 return -EBUSY;
5647 }
5648
5649 info->iter.snapshot = true;
5650 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5651
5652 return ret;
5653}
5654
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005655#endif /* CONFIG_TRACER_SNAPSHOT */
5656
5657
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005658static const struct file_operations tracing_thresh_fops = {
5659 .open = tracing_open_generic,
5660 .read = tracing_thresh_read,
5661 .write = tracing_thresh_write,
5662 .llseek = generic_file_llseek,
5663};
5664
Chen Gange428abb2015-11-10 05:15:15 +08005665#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005666static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005667 .open = tracing_open_generic,
5668 .read = tracing_max_lat_read,
5669 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005670 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005671};
Chen Gange428abb2015-11-10 05:15:15 +08005672#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005673
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005674static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005675 .open = tracing_open_generic,
5676 .read = tracing_set_trace_read,
5677 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005678 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005679};
5680
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005681static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005682 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005683 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005684 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005685 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005686 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005687 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005688};
5689
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005690static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005691 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005692 .read = tracing_entries_read,
5693 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005694 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005695 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005696};
5697
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005698static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005699 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005700 .read = tracing_total_entries_read,
5701 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005702 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005703};
5704
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005705static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005706 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005707 .write = tracing_free_buffer_write,
5708 .release = tracing_free_buffer_release,
5709};
5710
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005711static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005712 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005713 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005714 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005715 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005716};
5717
Zhaolei5079f322009-08-25 16:12:56 +08005718static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005719 .open = tracing_clock_open,
5720 .read = seq_read,
5721 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005722 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005723 .write = tracing_clock_write,
5724};
5725
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005726#ifdef CONFIG_TRACER_SNAPSHOT
5727static const struct file_operations snapshot_fops = {
5728 .open = tracing_snapshot_open,
5729 .read = seq_read,
5730 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005731 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005732 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005733};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005734
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005735static const struct file_operations snapshot_raw_fops = {
5736 .open = snapshot_raw_open,
5737 .read = tracing_buffers_read,
5738 .release = tracing_buffers_release,
5739 .splice_read = tracing_buffers_splice_read,
5740 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005741};
5742
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005743#endif /* CONFIG_TRACER_SNAPSHOT */
5744
Steven Rostedt2cadf912008-12-01 22:20:19 -05005745static int tracing_buffers_open(struct inode *inode, struct file *filp)
5746{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005747 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005748 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005749 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005750
5751 if (tracing_disabled)
5752 return -ENODEV;
5753
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005754 if (trace_array_get(tr) < 0)
5755 return -ENODEV;
5756
Steven Rostedt2cadf912008-12-01 22:20:19 -05005757 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005758 if (!info) {
5759 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005760 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005761 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005762
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005763 mutex_lock(&trace_types_lock);
5764
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005765 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005766 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005767 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005768 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005769 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005770 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005771 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005772
5773 filp->private_data = info;
5774
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005775 tr->current_trace->ref++;
5776
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005777 mutex_unlock(&trace_types_lock);
5778
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005779 ret = nonseekable_open(inode, filp);
5780 if (ret < 0)
5781 trace_array_put(tr);
5782
5783 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005784}
5785
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005786static unsigned int
5787tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5788{
5789 struct ftrace_buffer_info *info = filp->private_data;
5790 struct trace_iterator *iter = &info->iter;
5791
5792 return trace_poll(iter, filp, poll_table);
5793}
5794
Steven Rostedt2cadf912008-12-01 22:20:19 -05005795static ssize_t
5796tracing_buffers_read(struct file *filp, char __user *ubuf,
5797 size_t count, loff_t *ppos)
5798{
5799 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005800 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005801 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005802 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005803
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005804 if (!count)
5805 return 0;
5806
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005807#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005808 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5809 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005810#endif
5811
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005812 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005813 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5814 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005815 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005816 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005817
Steven Rostedt2cadf912008-12-01 22:20:19 -05005818 /* Do we have previous read data to read? */
5819 if (info->read < PAGE_SIZE)
5820 goto read;
5821
Steven Rostedtb6273442013-02-28 13:44:11 -05005822 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005823 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005824 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005825 &info->spare,
5826 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005827 iter->cpu_file, 0);
5828 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005829
5830 if (ret < 0) {
5831 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005832 if ((filp->f_flags & O_NONBLOCK))
5833 return -EAGAIN;
5834
Rabin Vincente30f53a2014-11-10 19:46:34 +01005835 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005836 if (ret)
5837 return ret;
5838
Steven Rostedtb6273442013-02-28 13:44:11 -05005839 goto again;
5840 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005841 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005842 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005843
Steven Rostedt436fc282011-10-14 10:44:25 -04005844 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005845 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005846 size = PAGE_SIZE - info->read;
5847 if (size > count)
5848 size = count;
5849
5850 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005851 if (ret == size)
5852 return -EFAULT;
5853
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005854 size -= ret;
5855
Steven Rostedt2cadf912008-12-01 22:20:19 -05005856 *ppos += size;
5857 info->read += size;
5858
5859 return size;
5860}
5861
5862static int tracing_buffers_release(struct inode *inode, struct file *file)
5863{
5864 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005865 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005866
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005867 mutex_lock(&trace_types_lock);
5868
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005869 iter->tr->current_trace->ref--;
5870
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005871 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005872
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005873 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005874 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005875 kfree(info);
5876
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005877 mutex_unlock(&trace_types_lock);
5878
Steven Rostedt2cadf912008-12-01 22:20:19 -05005879 return 0;
5880}
5881
5882struct buffer_ref {
5883 struct ring_buffer *buffer;
5884 void *page;
5885 int ref;
5886};
5887
5888static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5889 struct pipe_buffer *buf)
5890{
5891 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5892
5893 if (--ref->ref)
5894 return;
5895
5896 ring_buffer_free_read_page(ref->buffer, ref->page);
5897 kfree(ref);
5898 buf->private = 0;
5899}
5900
Matthew Wilcoxc3265852020-02-26 01:46:14 +05305901static bool buffer_pipe_buf_get(struct pipe_inode_info *pipe,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005902 struct pipe_buffer *buf)
5903{
5904 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5905
Matthew Wilcoxc3265852020-02-26 01:46:14 +05305906 if (ref->ref > INT_MAX/2)
5907 return false;
5908
Steven Rostedt2cadf912008-12-01 22:20:19 -05005909 ref->ref++;
Matthew Wilcoxc3265852020-02-26 01:46:14 +05305910 return true;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005911}
5912
5913/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005914static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005915 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005916 .confirm = generic_pipe_buf_confirm,
5917 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005918 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005919 .get = buffer_pipe_buf_get,
5920};
5921
5922/*
5923 * Callback from splice_to_pipe(), if we need to release some pages
5924 * at the end of the spd in case we error'ed out in filling the pipe.
5925 */
5926static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5927{
5928 struct buffer_ref *ref =
5929 (struct buffer_ref *)spd->partial[i].private;
5930
5931 if (--ref->ref)
5932 return;
5933
5934 ring_buffer_free_read_page(ref->buffer, ref->page);
5935 kfree(ref);
5936 spd->partial[i].private = 0;
5937}
5938
5939static ssize_t
5940tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5941 struct pipe_inode_info *pipe, size_t len,
5942 unsigned int flags)
5943{
5944 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005945 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005946 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5947 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005948 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005949 .pages = pages_def,
5950 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005951 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005952 .flags = flags,
5953 .ops = &buffer_pipe_buf_ops,
5954 .spd_release = buffer_spd_release,
5955 };
5956 struct buffer_ref *ref;
Steven Rostedt (VMware)07524212017-12-22 20:38:57 -05005957 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01005958 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005959
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005960#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005961 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5962 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005963#endif
5964
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005965 if (*ppos & (PAGE_SIZE - 1))
5966 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005967
5968 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005969 if (len < PAGE_SIZE)
5970 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005971 len &= PAGE_MASK;
5972 }
5973
Al Viroaad426c2016-09-17 18:31:46 -04005974 if (splice_grow_spd(pipe, &spd))
5975 return -ENOMEM;
5976
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005977 again:
5978 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005979 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005980
Al Viroa786c062014-04-11 12:01:03 -04005981 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005982 struct page *page;
5983 int r;
5984
5985 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01005986 if (!ref) {
5987 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005988 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01005989 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005990
Steven Rostedt7267fa62009-04-29 00:16:21 -04005991 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005992 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005993 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005994 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005995 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005996 kfree(ref);
5997 break;
5998 }
5999
6000 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006001 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006002 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07006003 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006004 kfree(ref);
6005 break;
6006 }
6007
Steven Rostedt2cadf912008-12-01 22:20:19 -05006008 page = virt_to_page(ref->page);
6009
6010 spd.pages[i] = page;
6011 spd.partial[i].len = PAGE_SIZE;
6012 spd.partial[i].offset = 0;
6013 spd.partial[i].private = (unsigned long)ref;
6014 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08006015 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04006016
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006017 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006018 }
6019
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006020 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05006021 spd.nr_pages = i;
6022
6023 /* did we read anything? */
6024 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01006025 if (ret)
Al Viroaad426c2016-09-17 18:31:46 -04006026 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01006027
Al Viroaad426c2016-09-17 18:31:46 -04006028 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006029 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viroaad426c2016-09-17 18:31:46 -04006030 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05006031
Rabin Vincente30f53a2014-11-10 19:46:34 +01006032 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04006033 if (ret)
Al Viroaad426c2016-09-17 18:31:46 -04006034 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01006035
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006036 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05006037 }
6038
6039 ret = splice_to_pipe(pipe, &spd);
Al Viroaad426c2016-09-17 18:31:46 -04006040out:
Eric Dumazet047fe362012-06-12 15:24:40 +02006041 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006042
Steven Rostedt2cadf912008-12-01 22:20:19 -05006043 return ret;
6044}
6045
6046static const struct file_operations tracing_buffers_fops = {
6047 .open = tracing_buffers_open,
6048 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05006049 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05006050 .release = tracing_buffers_release,
6051 .splice_read = tracing_buffers_splice_read,
6052 .llseek = no_llseek,
6053};
6054
Steven Rostedtc8d77182009-04-29 18:03:45 -04006055static ssize_t
6056tracing_stats_read(struct file *filp, char __user *ubuf,
6057 size_t count, loff_t *ppos)
6058{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006059 struct inode *inode = file_inode(filp);
6060 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006061 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006062 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006063 struct trace_seq *s;
6064 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006065 unsigned long long t;
6066 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006067
Li Zefane4f2d102009-06-15 10:57:28 +08006068 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006069 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01006070 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04006071
6072 trace_seq_init(s);
6073
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006074 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006075 trace_seq_printf(s, "entries: %ld\n", cnt);
6076
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006077 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006078 trace_seq_printf(s, "overrun: %ld\n", cnt);
6079
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006080 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006081 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
6082
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006083 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006084 trace_seq_printf(s, "bytes: %ld\n", cnt);
6085
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09006086 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006087 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006088 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006089 usec_rem = do_div(t, USEC_PER_SEC);
6090 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
6091 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006092
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006093 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006094 usec_rem = do_div(t, USEC_PER_SEC);
6095 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
6096 } else {
6097 /* counter or tsc mode for trace_clock */
6098 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006099 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006100
6101 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006102 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006103 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006104
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006105 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006106 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6107
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006108 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006109 trace_seq_printf(s, "read events: %ld\n", cnt);
6110
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006111 count = simple_read_from_buffer(ubuf, count, ppos,
6112 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006113
6114 kfree(s);
6115
6116 return count;
6117}
6118
6119static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006120 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006121 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006122 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006123 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006124};
6125
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006126#ifdef CONFIG_DYNAMIC_FTRACE
6127
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006128int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006129{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006130 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006131}
6132
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006133static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006134tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006135 size_t cnt, loff_t *ppos)
6136{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006137 static char ftrace_dyn_info_buffer[1024];
6138 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006139 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006140 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006141 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006142 int r;
6143
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006144 mutex_lock(&dyn_info_mutex);
6145 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006146
Steven Rostedta26a2a22008-10-31 00:03:22 -04006147 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006148 buf[r++] = '\n';
6149
6150 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6151
6152 mutex_unlock(&dyn_info_mutex);
6153
6154 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006155}
6156
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006157static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006158 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006159 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006160 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006161};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006162#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006163
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006164#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6165static void
6166ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006167{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006168 tracing_snapshot();
6169}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006170
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006171static void
6172ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6173{
6174 unsigned long *count = (long *)data;
6175
6176 if (!*count)
6177 return;
6178
6179 if (*count != -1)
6180 (*count)--;
6181
6182 tracing_snapshot();
6183}
6184
6185static int
6186ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6187 struct ftrace_probe_ops *ops, void *data)
6188{
6189 long count = (long)data;
6190
6191 seq_printf(m, "%ps:", (void *)ip);
6192
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006193 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006194
6195 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006196 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006197 else
6198 seq_printf(m, ":count=%ld\n", count);
6199
6200 return 0;
6201}
6202
6203static struct ftrace_probe_ops snapshot_probe_ops = {
6204 .func = ftrace_snapshot,
6205 .print = ftrace_snapshot_print,
6206};
6207
6208static struct ftrace_probe_ops snapshot_count_probe_ops = {
6209 .func = ftrace_count_snapshot,
6210 .print = ftrace_snapshot_print,
6211};
6212
6213static int
6214ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6215 char *glob, char *cmd, char *param, int enable)
6216{
6217 struct ftrace_probe_ops *ops;
6218 void *count = (void *)-1;
6219 char *number;
6220 int ret;
6221
6222 /* hash funcs only work with set_ftrace_filter */
6223 if (!enable)
6224 return -EINVAL;
6225
6226 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6227
6228 if (glob[0] == '!') {
6229 unregister_ftrace_function_probe_func(glob+1, ops);
6230 return 0;
6231 }
6232
6233 if (!param)
6234 goto out_reg;
6235
6236 number = strsep(&param, ":");
6237
6238 if (!strlen(number))
6239 goto out_reg;
6240
6241 /*
6242 * We use the callback data field (which is a pointer)
6243 * as our counter.
6244 */
6245 ret = kstrtoul(number, 0, (unsigned long *)&count);
6246 if (ret)
6247 return ret;
6248
6249 out_reg:
Steven Rostedt (VMware)1dfb1c72017-04-19 12:07:08 -04006250 ret = alloc_snapshot(&global_trace);
6251 if (ret < 0)
6252 goto out;
6253
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006254 ret = register_ftrace_function_probe(glob, ops, count);
6255
Steven Rostedt (VMware)1dfb1c72017-04-19 12:07:08 -04006256 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006257 return ret < 0 ? ret : 0;
6258}
6259
6260static struct ftrace_func_command ftrace_snapshot_cmd = {
6261 .name = "snapshot",
6262 .func = ftrace_trace_snapshot_callback,
6263};
6264
Tom Zanussi38de93a2013-10-24 08:34:18 -05006265static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006266{
6267 return register_ftrace_command(&ftrace_snapshot_cmd);
6268}
6269#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006270static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006271#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006272
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006273static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006274{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006275 if (WARN_ON(!tr->dir))
6276 return ERR_PTR(-ENODEV);
6277
6278 /* Top directory uses NULL as the parent */
6279 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6280 return NULL;
6281
6282 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006283 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006284}
6285
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006286static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6287{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006288 struct dentry *d_tracer;
6289
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006290 if (tr->percpu_dir)
6291 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006292
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006293 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006294 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006295 return NULL;
6296
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006297 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006298
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006299 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006300 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006301
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006302 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006303}
6304
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006305static struct dentry *
6306trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6307 void *data, long cpu, const struct file_operations *fops)
6308{
6309 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6310
6311 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006312 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006313 return ret;
6314}
6315
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006316static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006317tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006318{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006319 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006320 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006321 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006322
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006323 if (!d_percpu)
6324 return;
6325
Steven Rostedtdd49a382010-10-20 21:51:26 -04006326 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006327 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006328 if (!d_cpu) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006329 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006330 return;
6331 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006332
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006333 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006334 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006335 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006336
6337 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006338 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006339 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006340
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006341 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006342 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006343
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006344 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006345 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006346
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006347 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006348 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006349
6350#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006351 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006352 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006353
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006354 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006355 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006356#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006357}
6358
Steven Rostedt60a11772008-05-12 21:20:44 +02006359#ifdef CONFIG_FTRACE_SELFTEST
6360/* Let selftest have access to static functions in this file */
6361#include "trace_selftest.c"
6362#endif
6363
Steven Rostedt577b7852009-02-26 23:43:05 -05006364static ssize_t
6365trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6366 loff_t *ppos)
6367{
6368 struct trace_option_dentry *topt = filp->private_data;
6369 char *buf;
6370
6371 if (topt->flags->val & topt->opt->bit)
6372 buf = "1\n";
6373 else
6374 buf = "0\n";
6375
6376 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6377}
6378
6379static ssize_t
6380trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6381 loff_t *ppos)
6382{
6383 struct trace_option_dentry *topt = filp->private_data;
6384 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006385 int ret;
6386
Peter Huewe22fe9b52011-06-07 21:58:27 +02006387 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6388 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006389 return ret;
6390
Li Zefan8d18eaa2009-12-08 11:17:06 +08006391 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006392 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006393
6394 if (!!(topt->flags->val & topt->opt->bit) != val) {
6395 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006396 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006397 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006398 mutex_unlock(&trace_types_lock);
6399 if (ret)
6400 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006401 }
6402
6403 *ppos += cnt;
6404
6405 return cnt;
6406}
6407
6408
6409static const struct file_operations trace_options_fops = {
6410 .open = tracing_open_generic,
6411 .read = trace_options_read,
6412 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006413 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006414};
6415
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006416/*
6417 * In order to pass in both the trace_array descriptor as well as the index
6418 * to the flag that the trace option file represents, the trace_array
6419 * has a character array of trace_flags_index[], which holds the index
6420 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6421 * The address of this character array is passed to the flag option file
6422 * read/write callbacks.
6423 *
6424 * In order to extract both the index and the trace_array descriptor,
6425 * get_tr_index() uses the following algorithm.
6426 *
6427 * idx = *ptr;
6428 *
6429 * As the pointer itself contains the address of the index (remember
6430 * index[1] == 1).
6431 *
6432 * Then to get the trace_array descriptor, by subtracting that index
6433 * from the ptr, we get to the start of the index itself.
6434 *
6435 * ptr - idx == &index[0]
6436 *
6437 * Then a simple container_of() from that pointer gets us to the
6438 * trace_array descriptor.
6439 */
6440static void get_tr_index(void *data, struct trace_array **ptr,
6441 unsigned int *pindex)
6442{
6443 *pindex = *(unsigned char *)data;
6444
6445 *ptr = container_of(data - *pindex, struct trace_array,
6446 trace_flags_index);
6447}
6448
Steven Rostedta8259072009-02-26 22:19:12 -05006449static ssize_t
6450trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6451 loff_t *ppos)
6452{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006453 void *tr_index = filp->private_data;
6454 struct trace_array *tr;
6455 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006456 char *buf;
6457
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006458 get_tr_index(tr_index, &tr, &index);
6459
6460 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05006461 buf = "1\n";
6462 else
6463 buf = "0\n";
6464
6465 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6466}
6467
6468static ssize_t
6469trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6470 loff_t *ppos)
6471{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006472 void *tr_index = filp->private_data;
6473 struct trace_array *tr;
6474 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006475 unsigned long val;
6476 int ret;
6477
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006478 get_tr_index(tr_index, &tr, &index);
6479
Peter Huewe22fe9b52011-06-07 21:58:27 +02006480 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6481 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006482 return ret;
6483
Zhaoleif2d84b62009-08-07 18:55:48 +08006484 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006485 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006486
6487 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006488 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006489 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006490
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006491 if (ret < 0)
6492 return ret;
6493
Steven Rostedta8259072009-02-26 22:19:12 -05006494 *ppos += cnt;
6495
6496 return cnt;
6497}
6498
Steven Rostedta8259072009-02-26 22:19:12 -05006499static const struct file_operations trace_options_core_fops = {
6500 .open = tracing_open_generic,
6501 .read = trace_options_core_read,
6502 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006503 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006504};
6505
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006506struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006507 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006508 struct dentry *parent,
6509 void *data,
6510 const struct file_operations *fops)
6511{
6512 struct dentry *ret;
6513
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006514 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006515 if (!ret)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006516 pr_warning("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006517
6518 return ret;
6519}
6520
6521
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006522static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006523{
6524 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006525
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006526 if (tr->options)
6527 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006528
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006529 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006530 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006531 return NULL;
6532
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006533 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006534 if (!tr->options) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006535 pr_warning("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006536 return NULL;
6537 }
6538
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006539 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006540}
6541
Steven Rostedt577b7852009-02-26 23:43:05 -05006542static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006543create_trace_option_file(struct trace_array *tr,
6544 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006545 struct tracer_flags *flags,
6546 struct tracer_opt *opt)
6547{
6548 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006549
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006550 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006551 if (!t_options)
6552 return;
6553
6554 topt->flags = flags;
6555 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006556 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006557
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006558 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006559 &trace_options_fops);
6560
Steven Rostedt577b7852009-02-26 23:43:05 -05006561}
6562
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006563static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006564create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006565{
6566 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006567 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05006568 struct tracer_flags *flags;
6569 struct tracer_opt *opts;
6570 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006571 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05006572
6573 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006574 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05006575
6576 flags = tracer->flags;
6577
6578 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006579 return;
6580
6581 /*
6582 * If this is an instance, only create flags for tracers
6583 * the instance may have.
6584 */
6585 if (!trace_ok_for_array(tracer, tr))
6586 return;
6587
6588 for (i = 0; i < tr->nr_topts; i++) {
6589 /*
6590 * Check if these flags have already been added.
6591 * Some tracers share flags.
6592 */
6593 if (tr->topts[i].tracer->flags == tracer->flags)
6594 return;
6595 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006596
6597 opts = flags->opts;
6598
6599 for (cnt = 0; opts[cnt].name; cnt++)
6600 ;
6601
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006602 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006603 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006604 return;
6605
6606 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6607 GFP_KERNEL);
6608 if (!tr_topts) {
6609 kfree(topts);
6610 return;
6611 }
6612
6613 tr->topts = tr_topts;
6614 tr->topts[tr->nr_topts].tracer = tracer;
6615 tr->topts[tr->nr_topts].topts = topts;
6616 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05006617
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006618 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006619 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006620 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006621 WARN_ONCE(topts[cnt].entry == NULL,
6622 "Failed to create trace option: %s",
6623 opts[cnt].name);
6624 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006625}
6626
Steven Rostedta8259072009-02-26 22:19:12 -05006627static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006628create_trace_option_core_file(struct trace_array *tr,
6629 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006630{
6631 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006632
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006633 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006634 if (!t_options)
6635 return NULL;
6636
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006637 return trace_create_file(option, 0644, t_options,
6638 (void *)&tr->trace_flags_index[index],
6639 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006640}
6641
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006642static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006643{
6644 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006645 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006646 int i;
6647
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006648 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006649 if (!t_options)
6650 return;
6651
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006652 for (i = 0; trace_options[i]; i++) {
6653 if (top_level ||
6654 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6655 create_trace_option_core_file(tr, trace_options[i], i);
6656 }
Steven Rostedta8259072009-02-26 22:19:12 -05006657}
6658
Steven Rostedt499e5472012-02-22 15:50:28 -05006659static ssize_t
6660rb_simple_read(struct file *filp, char __user *ubuf,
6661 size_t cnt, loff_t *ppos)
6662{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006663 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006664 char buf[64];
6665 int r;
6666
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006667 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006668 r = sprintf(buf, "%d\n", r);
6669
6670 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6671}
6672
6673static ssize_t
6674rb_simple_write(struct file *filp, const char __user *ubuf,
6675 size_t cnt, loff_t *ppos)
6676{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006677 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006678 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006679 unsigned long val;
6680 int ret;
6681
6682 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6683 if (ret)
6684 return ret;
6685
6686 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006687 mutex_lock(&trace_types_lock);
Steven Rostedt (VMware)0943ce72018-08-01 15:40:57 -04006688 if (!!val == tracer_tracing_is_on(tr)) {
6689 val = 0; /* do nothing */
6690 } else if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006691 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006692 if (tr->current_trace->start)
6693 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006694 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006695 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006696 if (tr->current_trace->stop)
6697 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006698 }
6699 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006700 }
6701
6702 (*ppos)++;
6703
6704 return cnt;
6705}
6706
6707static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006708 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006709 .read = rb_simple_read,
6710 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006711 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006712 .llseek = default_llseek,
6713};
6714
Steven Rostedt277ba042012-08-03 16:10:49 -04006715struct dentry *trace_instance_dir;
6716
6717static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006718init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04006719
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006720static int
6721allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006722{
6723 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006724
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006725 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006726
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006727 buf->tr = tr;
6728
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006729 buf->buffer = ring_buffer_alloc(size, rb_flags);
6730 if (!buf->buffer)
6731 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006732
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006733 buf->data = alloc_percpu(struct trace_array_cpu);
6734 if (!buf->data) {
6735 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)c2a62f82017-12-26 20:07:34 -05006736 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006737 return -ENOMEM;
6738 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006739
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006740 /* Allocate the first page for all buffers */
6741 set_buffer_entries(&tr->trace_buffer,
6742 ring_buffer_size(tr->trace_buffer.buffer, 0));
6743
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006744 return 0;
6745}
6746
6747static int allocate_trace_buffers(struct trace_array *tr, int size)
6748{
6749 int ret;
6750
6751 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6752 if (ret)
6753 return ret;
6754
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006755#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006756 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6757 allocate_snapshot ? size : 1);
6758 if (WARN_ON(ret)) {
6759 ring_buffer_free(tr->trace_buffer.buffer);
Jing Xia25fade62017-12-26 15:12:53 +08006760 tr->trace_buffer.buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006761 free_percpu(tr->trace_buffer.data);
Jing Xia25fade62017-12-26 15:12:53 +08006762 tr->trace_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006763 return -ENOMEM;
6764 }
6765 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006766
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006767 /*
6768 * Only the top level trace array gets its snapshot allocated
6769 * from the kernel command line.
6770 */
6771 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006772#endif
6773 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006774}
6775
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006776static void free_trace_buffer(struct trace_buffer *buf)
6777{
6778 if (buf->buffer) {
6779 ring_buffer_free(buf->buffer);
6780 buf->buffer = NULL;
6781 free_percpu(buf->data);
6782 buf->data = NULL;
6783 }
6784}
6785
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006786static void free_trace_buffers(struct trace_array *tr)
6787{
6788 if (!tr)
6789 return;
6790
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006791 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006792
6793#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006794 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006795#endif
6796}
6797
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006798static void init_trace_flags_index(struct trace_array *tr)
6799{
6800 int i;
6801
6802 /* Used by the trace options files */
6803 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6804 tr->trace_flags_index[i] = i;
6805}
6806
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006807static void __update_tracer_options(struct trace_array *tr)
6808{
6809 struct tracer *t;
6810
6811 for (t = trace_types; t; t = t->next)
6812 add_tracer_options(tr, t);
6813}
6814
6815static void update_tracer_options(struct trace_array *tr)
6816{
6817 mutex_lock(&trace_types_lock);
6818 __update_tracer_options(tr);
6819 mutex_unlock(&trace_types_lock);
6820}
6821
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006822static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006823{
Steven Rostedt277ba042012-08-03 16:10:49 -04006824 struct trace_array *tr;
6825 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006826
6827 mutex_lock(&trace_types_lock);
6828
6829 ret = -EEXIST;
6830 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6831 if (tr->name && strcmp(tr->name, name) == 0)
6832 goto out_unlock;
6833 }
6834
6835 ret = -ENOMEM;
6836 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6837 if (!tr)
6838 goto out_unlock;
6839
6840 tr->name = kstrdup(name, GFP_KERNEL);
6841 if (!tr->name)
6842 goto out_free_tr;
6843
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006844 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6845 goto out_free_tr;
6846
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006847 tr->trace_flags = global_trace.trace_flags;
6848
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006849 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6850
Steven Rostedt277ba042012-08-03 16:10:49 -04006851 raw_spin_lock_init(&tr->start_lock);
6852
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006853 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6854
Steven Rostedt277ba042012-08-03 16:10:49 -04006855 tr->current_trace = &nop_trace;
6856
6857 INIT_LIST_HEAD(&tr->systems);
6858 INIT_LIST_HEAD(&tr->events);
6859
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006860 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006861 goto out_free_tr;
6862
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006863 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006864 if (!tr->dir)
6865 goto out_free_tr;
6866
6867 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006868 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006869 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006870 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006871 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006872
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006873 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006874 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006875 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04006876
6877 list_add(&tr->list, &ftrace_trace_arrays);
6878
6879 mutex_unlock(&trace_types_lock);
6880
6881 return 0;
6882
6883 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006884 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006885 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006886 kfree(tr->name);
6887 kfree(tr);
6888
6889 out_unlock:
6890 mutex_unlock(&trace_types_lock);
6891
6892 return ret;
6893
6894}
6895
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006896static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006897{
6898 struct trace_array *tr;
6899 int found = 0;
6900 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006901 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006902
6903 mutex_lock(&trace_types_lock);
6904
6905 ret = -ENODEV;
6906 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6907 if (tr->name && strcmp(tr->name, name) == 0) {
6908 found = 1;
6909 break;
6910 }
6911 }
6912 if (!found)
6913 goto out_unlock;
6914
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006915 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006916 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006917 goto out_unlock;
6918
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006919 list_del(&tr->list);
6920
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006921 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006922 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006923 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08006924 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006925 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006926
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006927 for (i = 0; i < tr->nr_topts; i++) {
6928 kfree(tr->topts[i].topts);
6929 }
6930 kfree(tr->topts);
6931
Chunyu Hubb8109a2017-07-20 18:36:09 +08006932 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006933 kfree(tr->name);
6934 kfree(tr);
6935
6936 ret = 0;
6937
6938 out_unlock:
6939 mutex_unlock(&trace_types_lock);
6940
6941 return ret;
6942}
6943
Steven Rostedt277ba042012-08-03 16:10:49 -04006944static __init void create_trace_instances(struct dentry *d_tracer)
6945{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006946 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6947 instance_mkdir,
6948 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006949 if (WARN_ON(!trace_instance_dir))
6950 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04006951}
6952
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006953static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006954init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006955{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006956 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006957
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006958 trace_create_file("available_tracers", 0444, d_tracer,
6959 tr, &show_traces_fops);
6960
6961 trace_create_file("current_tracer", 0644, d_tracer,
6962 tr, &set_tracer_fops);
6963
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006964 trace_create_file("tracing_cpumask", 0644, d_tracer,
6965 tr, &tracing_cpumask_fops);
6966
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006967 trace_create_file("trace_options", 0644, d_tracer,
6968 tr, &tracing_iter_fops);
6969
6970 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006971 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006972
6973 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006974 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006975
6976 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006977 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006978
6979 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6980 tr, &tracing_total_entries_fops);
6981
Wang YanQing238ae932013-05-26 16:52:01 +08006982 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006983 tr, &tracing_free_buffer_fops);
6984
6985 trace_create_file("trace_marker", 0220, d_tracer,
6986 tr, &tracing_mark_fops);
6987
Jamie Gennis6019e592012-11-21 15:04:25 -08006988 trace_create_file("saved_tgids", 0444, d_tracer,
6989 tr, &tracing_saved_tgids_fops);
6990
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006991 trace_create_file("trace_clock", 0644, d_tracer, tr,
6992 &trace_clock_fops);
6993
6994 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006995 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006996
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006997 create_trace_options_dir(tr);
6998
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006999#ifdef CONFIG_TRACER_MAX_TRACE
7000 trace_create_file("tracing_max_latency", 0644, d_tracer,
7001 &tr->max_latency, &tracing_max_lat_fops);
7002#endif
7003
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05007004 if (ftrace_create_function_files(tr, d_tracer))
7005 WARN(1, "Could not allocate function filter files");
7006
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007007#ifdef CONFIG_TRACER_SNAPSHOT
7008 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02007009 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05007010#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007011
7012 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007013 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05007014
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007015}
7016
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007017static struct vfsmount *trace_automount(void *ingore)
7018{
7019 struct vfsmount *mnt;
7020 struct file_system_type *type;
7021
7022 /*
7023 * To maintain backward compatibility for tools that mount
7024 * debugfs to get to the tracing facility, tracefs is automatically
7025 * mounted to the debugfs/tracing directory.
7026 */
7027 type = get_fs_type("tracefs");
7028 if (!type)
7029 return NULL;
7030 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
7031 put_filesystem(type);
7032 if (IS_ERR(mnt))
7033 return NULL;
7034 mntget(mnt);
7035
7036 return mnt;
7037}
7038
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007039/**
7040 * tracing_init_dentry - initialize top level trace array
7041 *
7042 * This is called when creating files or directories in the tracing
7043 * directory. It is called via fs_initcall() by any of the boot up code
7044 * and expects to return the dentry of the top level tracing directory.
7045 */
7046struct dentry *tracing_init_dentry(void)
7047{
7048 struct trace_array *tr = &global_trace;
7049
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007050 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007051 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007052 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007053
Jiaxing Wang8b129192015-11-06 16:04:16 +08007054 if (WARN_ON(!tracefs_initialized()) ||
7055 (IS_ENABLED(CONFIG_DEBUG_FS) &&
7056 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007057 return ERR_PTR(-ENODEV);
7058
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05007059 /*
7060 * As there may still be users that expect the tracing
7061 * files to exist in debugfs/tracing, we must automount
7062 * the tracefs file system there, so older tools still
7063 * work with the newer kerenl.
7064 */
7065 tr->dir = debugfs_create_automount("tracing", NULL,
7066 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007067 if (!tr->dir) {
7068 pr_warn_once("Could not create debugfs directory 'tracing'\n");
7069 return ERR_PTR(-ENOMEM);
7070 }
7071
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007072 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05007073}
7074
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007075extern struct trace_enum_map *__start_ftrace_enum_maps[];
7076extern struct trace_enum_map *__stop_ftrace_enum_maps[];
7077
7078static void __init trace_enum_init(void)
7079{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007080 int len;
7081
7082 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007083 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007084}
7085
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007086#ifdef CONFIG_MODULES
7087static void trace_module_add_enums(struct module *mod)
7088{
7089 if (!mod->num_trace_enums)
7090 return;
7091
7092 /*
7093 * Modules with bad taint do not have events created, do
7094 * not bother with enums either.
7095 */
7096 if (trace_module_has_bad_taint(mod))
7097 return;
7098
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007099 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007100}
7101
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007102#ifdef CONFIG_TRACE_ENUM_MAP_FILE
7103static void trace_module_remove_enums(struct module *mod)
7104{
7105 union trace_enum_map_item *map;
7106 union trace_enum_map_item **last = &trace_enum_maps;
7107
7108 if (!mod->num_trace_enums)
7109 return;
7110
7111 mutex_lock(&trace_enum_mutex);
7112
7113 map = trace_enum_maps;
7114
7115 while (map) {
7116 if (map->head.mod == mod)
7117 break;
7118 map = trace_enum_jmp_to_tail(map);
7119 last = &map->tail.next;
7120 map = map->tail.next;
7121 }
7122 if (!map)
7123 goto out;
7124
7125 *last = trace_enum_jmp_to_tail(map)->tail.next;
7126 kfree(map);
7127 out:
7128 mutex_unlock(&trace_enum_mutex);
7129}
7130#else
7131static inline void trace_module_remove_enums(struct module *mod) { }
7132#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7133
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007134static int trace_module_notify(struct notifier_block *self,
7135 unsigned long val, void *data)
7136{
7137 struct module *mod = data;
7138
7139 switch (val) {
7140 case MODULE_STATE_COMING:
7141 trace_module_add_enums(mod);
7142 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007143 case MODULE_STATE_GOING:
7144 trace_module_remove_enums(mod);
7145 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007146 }
7147
7148 return 0;
7149}
7150
7151static struct notifier_block trace_module_nb = {
7152 .notifier_call = trace_module_notify,
7153 .priority = 0,
7154};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007155#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007156
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007157static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007158{
7159 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007160
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007161 trace_access_lock_init();
7162
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007163 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007164 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007165 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007166
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007167 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007168
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007169 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007170 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007171
Li Zefan339ae5d2009-04-17 10:34:30 +08007172 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007173 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007174
Avadh Patel69abe6a2009-04-10 16:04:48 -04007175 trace_create_file("saved_cmdlines", 0444, d_tracer,
7176 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007177
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007178 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7179 NULL, &tracing_saved_cmdlines_size_fops);
7180
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007181 trace_enum_init();
7182
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007183 trace_create_enum_file(d_tracer);
7184
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007185#ifdef CONFIG_MODULES
7186 register_module_notifier(&trace_module_nb);
7187#endif
7188
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007189#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007190 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7191 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007192#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007193
Steven Rostedt277ba042012-08-03 16:10:49 -04007194 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007195
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007196 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007197
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007198 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007199}
7200
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007201static int trace_panic_handler(struct notifier_block *this,
7202 unsigned long event, void *unused)
7203{
Steven Rostedt944ac422008-10-23 19:26:08 -04007204 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007205 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007206 return NOTIFY_OK;
7207}
7208
7209static struct notifier_block trace_panic_notifier = {
7210 .notifier_call = trace_panic_handler,
7211 .next = NULL,
7212 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7213};
7214
7215static int trace_die_handler(struct notifier_block *self,
7216 unsigned long val,
7217 void *data)
7218{
7219 switch (val) {
7220 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007221 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007222 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007223 break;
7224 default:
7225 break;
7226 }
7227 return NOTIFY_OK;
7228}
7229
7230static struct notifier_block trace_die_notifier = {
7231 .notifier_call = trace_die_handler,
7232 .priority = 200
7233};
7234
7235/*
7236 * printk is set to max of 1024, we really don't need it that big.
7237 * Nothing should be printing 1000 characters anyway.
7238 */
7239#define TRACE_MAX_PRINT 1000
7240
7241/*
7242 * Define here KERN_TRACE so that we have one place to modify
7243 * it if we decide to change what log level the ftrace dump
7244 * should be at.
7245 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007246#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007247
Jason Wessel955b61e2010-08-05 09:22:23 -05007248void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007249trace_printk_seq(struct trace_seq *s)
7250{
7251 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007252 if (s->seq.len >= TRACE_MAX_PRINT)
7253 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007254
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007255 /*
7256 * More paranoid code. Although the buffer size is set to
7257 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7258 * an extra layer of protection.
7259 */
7260 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7261 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007262
7263 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007264 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007265
7266 printk(KERN_TRACE "%s", s->buffer);
7267
Steven Rostedtf9520752009-03-02 14:04:40 -05007268 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007269}
7270
Jason Wessel955b61e2010-08-05 09:22:23 -05007271void trace_init_global_iter(struct trace_iterator *iter)
7272{
7273 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007274 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007275 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007276 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007277
7278 if (iter->trace && iter->trace->open)
7279 iter->trace->open(iter);
7280
7281 /* Annotate start of buffers if we had overruns */
7282 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7283 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7284
7285 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7286 if (trace_clocks[iter->tr->clock_id].in_ns)
7287 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007288}
7289
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007290void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007291{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007292 /* use static because iter can be a bit big for the stack */
7293 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007294 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007295 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007296 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007297 unsigned long flags;
7298 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007299
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007300 /* Only allow one dump user at a time. */
7301 if (atomic_inc_return(&dump_running) != 1) {
7302 atomic_dec(&dump_running);
7303 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007304 }
7305
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007306 /*
7307 * Always turn off tracing when we dump.
7308 * We don't need to show trace output of what happens
7309 * between multiple crashes.
7310 *
7311 * If the user does a sysrq-z, then they can re-enable
7312 * tracing with echo 1 > tracing_on.
7313 */
7314 tracing_off();
7315
7316 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007317
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007318 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007319 trace_init_global_iter(&iter);
7320
Steven Rostedtd7690412008-10-01 00:29:53 -04007321 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307322 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007323 }
7324
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007325 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007326
Török Edwinb54d3de2008-11-22 13:28:48 +02007327 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007328 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007329
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007330 switch (oops_dump_mode) {
7331 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007332 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007333 break;
7334 case DUMP_ORIG:
7335 iter.cpu_file = raw_smp_processor_id();
7336 break;
7337 case DUMP_NONE:
7338 goto out_enable;
7339 default:
7340 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007341 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007342 }
7343
7344 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007345
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007346 /* Did function tracer already get disabled? */
7347 if (ftrace_is_dead()) {
7348 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7349 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7350 }
7351
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007352 /*
7353 * We need to stop all tracing on all CPUS to read the
7354 * the next buffer. This is a bit expensive, but is
7355 * not done often. We fill all what we can read,
7356 * and then release the locks again.
7357 */
7358
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007359 while (!trace_empty(&iter)) {
7360
7361 if (!cnt)
7362 printk(KERN_TRACE "---------------------------------\n");
7363
7364 cnt++;
7365
Miguel Ojedaf7247662019-05-23 14:45:35 +02007366 trace_iterator_reset(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007367 iter.iter_flags |= TRACE_FILE_LAT_FMT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007368
Jason Wessel955b61e2010-08-05 09:22:23 -05007369 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007370 int ret;
7371
7372 ret = print_trace_line(&iter);
7373 if (ret != TRACE_TYPE_NO_CONSUME)
7374 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007375 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007376 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007377
7378 trace_printk_seq(&iter.seq);
7379 }
7380
7381 if (!cnt)
7382 printk(KERN_TRACE " (ftrace buffer empty)\n");
7383 else
7384 printk(KERN_TRACE "---------------------------------\n");
7385
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007386 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007387 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007388
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007389 for_each_tracing_cpu(cpu) {
7390 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007391 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007392 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007393 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007394}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007395EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007396
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007397__init static int tracer_alloc_buffers(void)
7398{
Steven Rostedt73c51622009-03-11 13:42:01 -04007399 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307400 int ret = -ENOMEM;
7401
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007402 /*
7403 * Make sure we don't accidently add more trace options
7404 * than we have bits for.
7405 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007406 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007407
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307408 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7409 goto out;
7410
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007411 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307412 goto out_free_buffer_mask;
7413
Steven Rostedt07d777f2011-09-22 14:01:55 -04007414 /* Only allocate trace_printk buffers if a trace_printk exists */
7415 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007416 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007417 trace_printk_init_buffers();
7418
Steven Rostedt73c51622009-03-11 13:42:01 -04007419 /* To save memory, keep the ring buffer size to its minimum */
7420 if (ring_buffer_expanded)
7421 ring_buf_size = trace_buf_size;
7422 else
7423 ring_buf_size = 1;
7424
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307425 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007426 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007427
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007428 raw_spin_lock_init(&global_trace.start_lock);
7429
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007430 /* Used for event triggers */
7431 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7432 if (!temp_buffer)
7433 goto out_free_cpumask;
7434
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007435 if (trace_create_savedcmd() < 0)
7436 goto out_free_temp_buffer;
7437
Steven Rostedtab464282008-05-12 21:21:00 +02007438 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007439 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007440 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7441 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007442 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007443 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007444
Steven Rostedt499e5472012-02-22 15:50:28 -05007445 if (global_trace.buffer_disabled)
7446 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007447
Steven Rostedte1e232c2014-02-10 23:38:46 -05007448 if (trace_boot_clock) {
7449 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7450 if (ret < 0)
7451 pr_warning("Trace clock %s not defined, going back to default\n",
7452 trace_boot_clock);
7453 }
7454
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007455 /*
7456 * register_tracer() might reference current_trace, so it
7457 * needs to be set before we register anything. This is
7458 * just a bootstrap of current_trace anyway.
7459 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007460 global_trace.current_trace = &nop_trace;
7461
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007462 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7463
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007464 ftrace_init_global_array_ops(&global_trace);
7465
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007466 init_trace_flags_index(&global_trace);
7467
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007468 register_tracer(&nop_trace);
7469
Steven Rostedt60a11772008-05-12 21:20:44 +02007470 /* All seems OK, enable tracing */
7471 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007472
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007473 atomic_notifier_chain_register(&panic_notifier_list,
7474 &trace_panic_notifier);
7475
7476 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007477
Steven Rostedtae63b312012-05-03 23:09:03 -04007478 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7479
7480 INIT_LIST_HEAD(&global_trace.systems);
7481 INIT_LIST_HEAD(&global_trace.events);
7482 list_add(&global_trace.list, &ftrace_trace_arrays);
7483
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08007484 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007485
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007486 register_snapshot_cmd();
7487
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007488 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007489
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007490out_free_savedcmd:
7491 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007492out_free_temp_buffer:
7493 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307494out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007495 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307496out_free_buffer_mask:
7497 free_cpumask_var(tracing_buffer_mask);
7498out:
7499 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007500}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007501
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007502void __init trace_init(void)
7503{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007504 if (tracepoint_printk) {
7505 tracepoint_print_iter =
7506 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7507 if (WARN_ON(!tracepoint_print_iter))
7508 tracepoint_printk = 0;
7509 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007510 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007511 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007512}
7513
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007514__init static int clear_boot_tracer(void)
7515{
7516 /*
7517 * The default tracer at boot buffer is an init section.
7518 * This function is called in lateinit. If we did not
7519 * find the boot tracer, then clear it out, to prevent
7520 * later registration from accessing the buffer that is
7521 * about to be freed.
7522 */
7523 if (!default_bootup_tracer)
7524 return 0;
7525
7526 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7527 default_bootup_tracer);
7528 default_bootup_tracer = NULL;
7529
7530 return 0;
7531}
7532
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007533fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007534late_initcall(clear_boot_tracer);