blob: 1b980a8ef791a43a47adcb4cc70876276eaf6c03 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050028#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050046#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020047
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010048/*
Steven Rostedt73c51622009-03-11 13:42:01 -040049 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050052bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040053
54/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010056 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010058 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * at the same time, giving false positive or negative results.
60 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010061static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062
Steven Rostedtb2821ae2009-02-02 21:38:32 -050063/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
Li Zefan020e5f82009-07-01 10:47:05 +080066bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050068/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010072/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050082static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010084{
85 return 0;
86}
Steven Rostedt0f048702008-11-05 16:05:44 -050087
88/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040089 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
95/*
Steven Rostedt0f048702008-11-05 16:05:44 -050096 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100101static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500102
Jason Wessel955b61e2010-08-05 09:22:23 -0500103cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200104
Steven Rostedt944ac422008-10-23 19:26:08 -0400105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400119 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200120
121enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400122
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400126#ifdef CONFIG_TRACE_ENUM_MAP_FILE
127/* Map of enums to their values, for "enum_map" file */
128struct trace_enum_map_head {
129 struct module *mod;
130 unsigned long length;
131};
132
133union trace_enum_map_item;
134
135struct trace_enum_map_tail {
136 /*
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "enum_string"
139 */
140 union trace_enum_map_item *next;
141 const char *end; /* points to NULL */
142};
143
144static DEFINE_MUTEX(trace_enum_mutex);
145
146/*
147 * The trace_enum_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved enum_map items.
152 */
153union trace_enum_map_item {
154 struct trace_enum_map map;
155 struct trace_enum_map_head head;
156 struct trace_enum_map_tail tail;
157};
158
159static union trace_enum_map_item *trace_enum_maps;
160#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
161
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500162static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163
Li Zefanee6c2c12009-09-18 14:06:47 +0800164#define MAX_TRACER_SIZE 100
165static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500166static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100167
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500168static bool allocate_snapshot;
169
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200170static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100171{
Chen Gang67012ab2013-04-08 12:06:44 +0800172 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500173 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400174 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500175 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176 return 1;
177}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200178__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100179
Steven Rostedt944ac422008-10-23 19:26:08 -0400180static int __init set_ftrace_dump_on_oops(char *str)
181{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200182 if (*str++ != '=' || !*str) {
183 ftrace_dump_on_oops = DUMP_ALL;
184 return 1;
185 }
186
187 if (!strcmp("orig_cpu", str)) {
188 ftrace_dump_on_oops = DUMP_ORIG;
189 return 1;
190 }
191
192 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400193}
194__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200195
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400196static int __init stop_trace_on_warning(char *str)
197{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200198 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
199 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200 return 1;
201}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200202__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400203
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400204static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500205{
206 allocate_snapshot = true;
207 /* We also need the main ring buffer expanded */
208 ring_buffer_expanded = true;
209 return 1;
210}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400211__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500212
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400213
214static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400215
216static int __init set_trace_boot_options(char *str)
217{
Chen Gang67012ab2013-04-08 12:06:44 +0800218 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400219 return 0;
220}
221__setup("trace_options=", set_trace_boot_options);
222
Steven Rostedte1e232c2014-02-10 23:38:46 -0500223static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
224static char *trace_boot_clock __initdata;
225
226static int __init set_trace_boot_clock(char *str)
227{
228 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
229 trace_boot_clock = trace_boot_clock_buf;
230 return 0;
231}
232__setup("trace_clock=", set_trace_boot_clock);
233
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500234static int __init set_tracepoint_printk(char *str)
235{
236 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
237 tracepoint_printk = 1;
238 return 1;
239}
240__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400241
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800242unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200243{
244 nsec += 500;
245 do_div(nsec, 1000);
246 return nsec;
247}
248
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400249/* trace_flags holds trace_options default values */
250#define TRACE_DEFAULT_FLAGS \
251 (FUNCTION_DEFAULT_FLAGS | \
252 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
253 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
254 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
255 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
256
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400257/* trace_options that are only supported by global_trace */
258#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
259 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
260
261
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200262/*
263 * The global_trace is the descriptor that holds the tracing
264 * buffers for the live tracing. For each CPU, it contains
265 * a link list of pages that will store trace entries. The
266 * page descriptor of the pages in the memory is used to hold
267 * the link list by linking the lru item in the page descriptor
268 * to each of the pages in the buffer per CPU.
269 *
270 * For each active CPU there is a data field that holds the
271 * pages for the buffer for that CPU. Each CPU has the same number
272 * of pages allocated for its buffer.
273 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400274static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
276};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedtae63b312012-05-03 23:09:03 -0400278LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200279
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400280int trace_array_get(struct trace_array *this_tr)
281{
282 struct trace_array *tr;
283 int ret = -ENODEV;
284
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 if (tr == this_tr) {
288 tr->ref++;
289 ret = 0;
290 break;
291 }
292 }
293 mutex_unlock(&trace_types_lock);
294
295 return ret;
296}
297
298static void __trace_array_put(struct trace_array *this_tr)
299{
300 WARN_ON(!this_tr->ref);
301 this_tr->ref--;
302}
303
304void trace_array_put(struct trace_array *this_tr)
305{
306 mutex_lock(&trace_types_lock);
307 __trace_array_put(this_tr);
308 mutex_unlock(&trace_types_lock);
309}
310
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400311int filter_check_discard(struct trace_event_file *file, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500312 struct ring_buffer *buffer,
313 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500314{
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400315 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
Tom Zanussif306cc82013-10-24 08:34:17 -0500316 !filter_match_preds(file->filter, rec)) {
317 ring_buffer_discard_commit(buffer, event);
318 return 1;
319 }
320
321 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500322}
Tom Zanussif306cc82013-10-24 08:34:17 -0500323EXPORT_SYMBOL_GPL(filter_check_discard);
324
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400325int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
328{
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
331 ring_buffer_discard_commit(buffer, event);
332 return 1;
333 }
334
335 return 0;
336}
337EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500338
Fabian Frederickad1438a2014-04-17 21:44:42 +0200339static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400340{
341 u64 ts;
342
343 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700344 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400345 return trace_clock_local();
346
Alexander Z Lam94571582013-08-02 18:36:16 -0700347 ts = ring_buffer_time_stamp(buf->buffer, cpu);
348 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400349
350 return ts;
351}
352
Alexander Z Lam94571582013-08-02 18:36:16 -0700353cycle_t ftrace_now(int cpu)
354{
355 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
356}
357
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400358/**
359 * tracing_is_enabled - Show if global_trace has been disabled
360 *
361 * Shows if the global trace has been enabled or not. It uses the
362 * mirror flag "buffer_disabled" to be used in fast paths such as for
363 * the irqsoff tracer. But it may be inaccurate due to races. If you
364 * need to know the accurate state, use tracing_is_on() which is a little
365 * slower, but accurate.
366 */
Steven Rostedt90369902008-11-05 16:05:44 -0500367int tracing_is_enabled(void)
368{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400369 /*
370 * For quick access (irqsoff uses this in fast path), just
371 * return the mirror variable of the state of the ring buffer.
372 * It's a little racy, but we don't really care.
373 */
374 smp_rmb();
375 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500376}
377
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200378/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400379 * trace_buf_size is the size in bytes that is allocated
380 * for a buffer. Note, the number of bytes is always rounded
381 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400382 *
383 * This number is purposely set to a low number of 16384.
384 * If the dump on oops happens, it will be much appreciated
385 * to not have to wait for all that output. Anyway this can be
386 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200387 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400388#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400389
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400390static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200391
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200392/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200393static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200394
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200395/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200396 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200397 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700398DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200399
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800400/*
401 * serialize the access of the ring buffer
402 *
403 * ring buffer serializes readers, but it is low level protection.
404 * The validity of the events (which returns by ring_buffer_peek() ..etc)
405 * are not protected by ring buffer.
406 *
407 * The content of events may become garbage if we allow other process consumes
408 * these events concurrently:
409 * A) the page of the consumed events may become a normal page
410 * (not reader page) in ring buffer, and this page will be rewrited
411 * by events producer.
412 * B) The page of the consumed events may become a page for splice_read,
413 * and this page will be returned to system.
414 *
415 * These primitives allow multi process access to different cpu ring buffer
416 * concurrently.
417 *
418 * These primitives don't distinguish read-only and read-consume access.
419 * Multi read-only access are also serialized.
420 */
421
422#ifdef CONFIG_SMP
423static DECLARE_RWSEM(all_cpu_access_lock);
424static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
425
426static inline void trace_access_lock(int cpu)
427{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500428 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800429 /* gain it for accessing the whole ring buffer. */
430 down_write(&all_cpu_access_lock);
431 } else {
432 /* gain it for accessing a cpu ring buffer. */
433
Steven Rostedtae3b5092013-01-23 15:22:59 -0500434 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800435 down_read(&all_cpu_access_lock);
436
437 /* Secondly block other access to this @cpu ring buffer. */
438 mutex_lock(&per_cpu(cpu_access_lock, cpu));
439 }
440}
441
442static inline void trace_access_unlock(int cpu)
443{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500444 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800445 up_write(&all_cpu_access_lock);
446 } else {
447 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
448 up_read(&all_cpu_access_lock);
449 }
450}
451
452static inline void trace_access_lock_init(void)
453{
454 int cpu;
455
456 for_each_possible_cpu(cpu)
457 mutex_init(&per_cpu(cpu_access_lock, cpu));
458}
459
460#else
461
462static DEFINE_MUTEX(access_lock);
463
464static inline void trace_access_lock(int cpu)
465{
466 (void)cpu;
467 mutex_lock(&access_lock);
468}
469
470static inline void trace_access_unlock(int cpu)
471{
472 (void)cpu;
473 mutex_unlock(&access_lock);
474}
475
476static inline void trace_access_lock_init(void)
477{
478}
479
480#endif
481
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400482#ifdef CONFIG_STACKTRACE
483static void __ftrace_trace_stack(struct ring_buffer *buffer,
484 unsigned long flags,
485 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400486static inline void ftrace_trace_stack(struct trace_array *tr,
487 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400488 unsigned long flags,
489 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400490
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400491#else
492static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
493 unsigned long flags,
494 int skip, int pc, struct pt_regs *regs)
495{
496}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400497static inline void ftrace_trace_stack(struct trace_array *tr,
498 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400499 unsigned long flags,
500 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400501{
502}
503
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400504#endif
505
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400506static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400507{
508 if (tr->trace_buffer.buffer)
509 ring_buffer_record_on(tr->trace_buffer.buffer);
510 /*
511 * This flag is looked at when buffers haven't been allocated
512 * yet, or by some tracers (like irqsoff), that just want to
513 * know if the ring buffer has been disabled, but it can handle
514 * races of where it gets disabled but we still do a record.
515 * As the check is in the fast path of the tracers, it is more
516 * important to be fast than accurate.
517 */
518 tr->buffer_disabled = 0;
519 /* Make the flag seen by readers */
520 smp_wmb();
521}
522
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200523/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500524 * tracing_on - enable tracing buffers
525 *
526 * This function enables tracing buffers that may have been
527 * disabled with tracing_off.
528 */
529void tracing_on(void)
530{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400531 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500532}
533EXPORT_SYMBOL_GPL(tracing_on);
534
535/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500536 * __trace_puts - write a constant string into the trace buffer.
537 * @ip: The address of the caller
538 * @str: The constant string to write
539 * @size: The size of the string.
540 */
541int __trace_puts(unsigned long ip, const char *str, int size)
542{
543 struct ring_buffer_event *event;
544 struct ring_buffer *buffer;
545 struct print_entry *entry;
546 unsigned long irq_flags;
547 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800548 int pc;
549
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400550 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800551 return 0;
552
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800553 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500554
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500555 if (unlikely(tracing_selftest_running || tracing_disabled))
556 return 0;
557
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500558 alloc = sizeof(*entry) + size + 2; /* possible \n added */
559
560 local_save_flags(irq_flags);
561 buffer = global_trace.trace_buffer.buffer;
562 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800563 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500564 if (!event)
565 return 0;
566
567 entry = ring_buffer_event_data(event);
568 entry->ip = ip;
569
570 memcpy(&entry->buf, str, size);
571
572 /* Add a newline if necessary */
573 if (entry->buf[size - 1] != '\n') {
574 entry->buf[size] = '\n';
575 entry->buf[size + 1] = '\0';
576 } else
577 entry->buf[size] = '\0';
578
579 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400580 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500581
582 return size;
583}
584EXPORT_SYMBOL_GPL(__trace_puts);
585
586/**
587 * __trace_bputs - write the pointer to a constant string into trace buffer
588 * @ip: The address of the caller
589 * @str: The constant string to write to the buffer to
590 */
591int __trace_bputs(unsigned long ip, const char *str)
592{
593 struct ring_buffer_event *event;
594 struct ring_buffer *buffer;
595 struct bputs_entry *entry;
596 unsigned long irq_flags;
597 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800598 int pc;
599
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400600 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800601 return 0;
602
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800603 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500604
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500605 if (unlikely(tracing_selftest_running || tracing_disabled))
606 return 0;
607
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500608 local_save_flags(irq_flags);
609 buffer = global_trace.trace_buffer.buffer;
610 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800611 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500612 if (!event)
613 return 0;
614
615 entry = ring_buffer_event_data(event);
616 entry->ip = ip;
617 entry->str = str;
618
619 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400620 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500621
622 return 1;
623}
624EXPORT_SYMBOL_GPL(__trace_bputs);
625
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500626#ifdef CONFIG_TRACER_SNAPSHOT
627/**
628 * trace_snapshot - take a snapshot of the current buffer.
629 *
630 * This causes a swap between the snapshot buffer and the current live
631 * tracing buffer. You can use this to take snapshots of the live
632 * trace when some condition is triggered, but continue to trace.
633 *
634 * Note, make sure to allocate the snapshot with either
635 * a tracing_snapshot_alloc(), or by doing it manually
636 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
637 *
638 * If the snapshot buffer is not allocated, it will stop tracing.
639 * Basically making a permanent snapshot.
640 */
641void tracing_snapshot(void)
642{
643 struct trace_array *tr = &global_trace;
644 struct tracer *tracer = tr->current_trace;
645 unsigned long flags;
646
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500647 if (in_nmi()) {
648 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
649 internal_trace_puts("*** snapshot is being ignored ***\n");
650 return;
651 }
652
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500653 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500654 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
655 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500656 tracing_off();
657 return;
658 }
659
660 /* Note, snapshot can not be used when the tracer uses it */
661 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500662 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
663 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 return;
665 }
666
667 local_irq_save(flags);
668 update_max_tr(tr, current, smp_processor_id());
669 local_irq_restore(flags);
670}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500671EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500672
673static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
674 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400675static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
676
677static int alloc_snapshot(struct trace_array *tr)
678{
679 int ret;
680
681 if (!tr->allocated_snapshot) {
682
683 /* allocate spare buffer */
684 ret = resize_buffer_duplicate_size(&tr->max_buffer,
685 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
686 if (ret < 0)
687 return ret;
688
689 tr->allocated_snapshot = true;
690 }
691
692 return 0;
693}
694
Fabian Frederickad1438a2014-04-17 21:44:42 +0200695static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400696{
697 /*
698 * We don't free the ring buffer. instead, resize it because
699 * The max_tr ring buffer has some state (e.g. ring->clock) and
700 * we want preserve it.
701 */
702 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
703 set_buffer_entries(&tr->max_buffer, 1);
704 tracing_reset_online_cpus(&tr->max_buffer);
705 tr->allocated_snapshot = false;
706}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500707
708/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500709 * tracing_alloc_snapshot - allocate snapshot buffer.
710 *
711 * This only allocates the snapshot buffer if it isn't already
712 * allocated - it doesn't also take a snapshot.
713 *
714 * This is meant to be used in cases where the snapshot buffer needs
715 * to be set up for events that can't sleep but need to be able to
716 * trigger a snapshot.
717 */
718int tracing_alloc_snapshot(void)
719{
720 struct trace_array *tr = &global_trace;
721 int ret;
722
723 ret = alloc_snapshot(tr);
724 WARN_ON(ret < 0);
725
726 return ret;
727}
728EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
729
730/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500731 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
732 *
733 * This is similar to trace_snapshot(), but it will allocate the
734 * snapshot buffer if it isn't already allocated. Use this only
735 * where it is safe to sleep, as the allocation may sleep.
736 *
737 * This causes a swap between the snapshot buffer and the current live
738 * tracing buffer. You can use this to take snapshots of the live
739 * trace when some condition is triggered, but continue to trace.
740 */
741void tracing_snapshot_alloc(void)
742{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500743 int ret;
744
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500745 ret = tracing_alloc_snapshot();
746 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400747 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500748
749 tracing_snapshot();
750}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500751EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500752#else
753void tracing_snapshot(void)
754{
755 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
756}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500757EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500758int tracing_alloc_snapshot(void)
759{
760 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
761 return -ENODEV;
762}
763EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500764void tracing_snapshot_alloc(void)
765{
766 /* Give warning */
767 tracing_snapshot();
768}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500769EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500770#endif /* CONFIG_TRACER_SNAPSHOT */
771
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400772static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400773{
774 if (tr->trace_buffer.buffer)
775 ring_buffer_record_off(tr->trace_buffer.buffer);
776 /*
777 * This flag is looked at when buffers haven't been allocated
778 * yet, or by some tracers (like irqsoff), that just want to
779 * know if the ring buffer has been disabled, but it can handle
780 * races of where it gets disabled but we still do a record.
781 * As the check is in the fast path of the tracers, it is more
782 * important to be fast than accurate.
783 */
784 tr->buffer_disabled = 1;
785 /* Make the flag seen by readers */
786 smp_wmb();
787}
788
Steven Rostedt499e5472012-02-22 15:50:28 -0500789/**
790 * tracing_off - turn off tracing buffers
791 *
792 * This function stops the tracing buffers from recording data.
793 * It does not disable any overhead the tracers themselves may
794 * be causing. This function simply causes all recording to
795 * the ring buffers to fail.
796 */
797void tracing_off(void)
798{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400799 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500800}
801EXPORT_SYMBOL_GPL(tracing_off);
802
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400803void disable_trace_on_warning(void)
804{
805 if (__disable_trace_on_warning)
806 tracing_off();
807}
808
Steven Rostedt499e5472012-02-22 15:50:28 -0500809/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400810 * tracer_tracing_is_on - show real state of ring buffer enabled
811 * @tr : the trace array to know if ring buffer is enabled
812 *
813 * Shows real state of the ring buffer if it is enabled or not.
814 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400815static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400816{
817 if (tr->trace_buffer.buffer)
818 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
819 return !tr->buffer_disabled;
820}
821
Steven Rostedt499e5472012-02-22 15:50:28 -0500822/**
823 * tracing_is_on - show state of ring buffers enabled
824 */
825int tracing_is_on(void)
826{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400827 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500828}
829EXPORT_SYMBOL_GPL(tracing_is_on);
830
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400831static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200832{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400833 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200834
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200835 if (!str)
836 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800837 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200838 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800839 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200840 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400841 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200842 return 1;
843}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400844__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200845
Tim Bird0e950172010-02-25 15:36:43 -0800846static int __init set_tracing_thresh(char *str)
847{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800848 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800849 int ret;
850
851 if (!str)
852 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200853 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800854 if (ret < 0)
855 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800856 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800857 return 1;
858}
859__setup("tracing_thresh=", set_tracing_thresh);
860
Steven Rostedt57f50be2008-05-12 21:20:44 +0200861unsigned long nsecs_to_usecs(unsigned long nsecs)
862{
863 return nsecs / 1000;
864}
865
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -0400866/*
867 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
868 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
869 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
870 * of strings in the order that the enums were defined.
871 */
872#undef C
873#define C(a, b) b
874
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200875/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200876static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -0400877 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200878 NULL
879};
880
Zhaolei5079f322009-08-25 16:12:56 +0800881static struct {
882 u64 (*func)(void);
883 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800884 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800885} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000886 { trace_clock_local, "local", 1 },
887 { trace_clock_global, "global", 1 },
888 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700889 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000890 { trace_clock, "perf", 1 },
891 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -0700892 { ktime_get_raw_fast_ns, "mono_raw", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800893 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800894};
895
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200896/*
897 * trace_parser_get_init - gets the buffer for trace parser
898 */
899int trace_parser_get_init(struct trace_parser *parser, int size)
900{
901 memset(parser, 0, sizeof(*parser));
902
903 parser->buffer = kmalloc(size, GFP_KERNEL);
904 if (!parser->buffer)
905 return 1;
906
907 parser->size = size;
908 return 0;
909}
910
911/*
912 * trace_parser_put - frees the buffer for trace parser
913 */
914void trace_parser_put(struct trace_parser *parser)
915{
916 kfree(parser->buffer);
917}
918
919/*
920 * trace_get_user - reads the user input string separated by space
921 * (matched by isspace(ch))
922 *
923 * For each string found the 'struct trace_parser' is updated,
924 * and the function returns.
925 *
926 * Returns number of bytes read.
927 *
928 * See kernel/trace/trace.h for 'struct trace_parser' details.
929 */
930int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
931 size_t cnt, loff_t *ppos)
932{
933 char ch;
934 size_t read = 0;
935 ssize_t ret;
936
937 if (!*ppos)
938 trace_parser_clear(parser);
939
940 ret = get_user(ch, ubuf++);
941 if (ret)
942 goto out;
943
944 read++;
945 cnt--;
946
947 /*
948 * The parser is not finished with the last write,
949 * continue reading the user input without skipping spaces.
950 */
951 if (!parser->cont) {
952 /* skip white space */
953 while (cnt && isspace(ch)) {
954 ret = get_user(ch, ubuf++);
955 if (ret)
956 goto out;
957 read++;
958 cnt--;
959 }
960
961 /* only spaces were written */
962 if (isspace(ch)) {
963 *ppos += read;
964 ret = read;
965 goto out;
966 }
967
968 parser->idx = 0;
969 }
970
971 /* read the non-space input */
972 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800973 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200974 parser->buffer[parser->idx++] = ch;
975 else {
976 ret = -EINVAL;
977 goto out;
978 }
979 ret = get_user(ch, ubuf++);
980 if (ret)
981 goto out;
982 read++;
983 cnt--;
984 }
985
986 /* We either got finished input or we have to wait for another call. */
987 if (isspace(ch)) {
988 parser->buffer[parser->idx] = 0;
989 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400990 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200991 parser->cont = true;
992 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400993 } else {
994 ret = -EINVAL;
995 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200996 }
997
998 *ppos += read;
999 ret = read;
1000
1001out:
1002 return ret;
1003}
1004
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001005/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001006static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001007{
1008 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001009
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001010 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001011 return -EBUSY;
1012
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001013 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001014 if (cnt > len)
1015 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001016 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001017
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001018 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001019 return cnt;
1020}
1021
Tim Bird0e950172010-02-25 15:36:43 -08001022unsigned long __read_mostly tracing_thresh;
1023
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001024#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001025/*
1026 * Copy the new maximum trace into the separate maximum-trace
1027 * structure. (this way the maximum trace is permanently saved,
1028 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1029 */
1030static void
1031__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1032{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001033 struct trace_buffer *trace_buf = &tr->trace_buffer;
1034 struct trace_buffer *max_buf = &tr->max_buffer;
1035 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1036 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001037
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001038 max_buf->cpu = cpu;
1039 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001040
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001041 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001042 max_data->critical_start = data->critical_start;
1043 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001044
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001045 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001046 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001047 /*
1048 * If tsk == current, then use current_uid(), as that does not use
1049 * RCU. The irq tracer can be called out of RCU scope.
1050 */
1051 if (tsk == current)
1052 max_data->uid = current_uid();
1053 else
1054 max_data->uid = task_uid(tsk);
1055
Steven Rostedt8248ac02009-09-02 12:27:41 -04001056 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1057 max_data->policy = tsk->policy;
1058 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001059
1060 /* record this tasks comm */
1061 tracing_record_cmdline(tsk);
1062}
1063
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001064/**
1065 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1066 * @tr: tracer
1067 * @tsk: the task with the latency
1068 * @cpu: The cpu that initiated the trace.
1069 *
1070 * Flip the buffers between the @tr and the max_tr and record information
1071 * about which task was the cause of this latency.
1072 */
Ingo Molnare309b412008-05-12 21:20:51 +02001073void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001074update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1075{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001076 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001077
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001078 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001079 return;
1080
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001081 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001082
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001083 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001084 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001085 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001086 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001087 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001088
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001089 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001090
Masami Hiramatsu731ccd92018-07-14 01:28:15 +09001091 /* Inherit the recordable setting from trace_buffer */
1092 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1093 ring_buffer_record_on(tr->max_buffer.buffer);
1094 else
1095 ring_buffer_record_off(tr->max_buffer.buffer);
1096
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001097 buf = tr->trace_buffer.buffer;
1098 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1099 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001100
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001101 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001102 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001103}
1104
1105/**
1106 * update_max_tr_single - only copy one trace over, and reset the rest
1107 * @tr - tracer
1108 * @tsk - task with the latency
1109 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001110 *
1111 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001112 */
Ingo Molnare309b412008-05-12 21:20:51 +02001113void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001114update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1115{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001116 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001117
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001118 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001119 return;
1120
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001121 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001122 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001123 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001124 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001125 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001126 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001127
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001128 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001129
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001130 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001131
Steven Rostedte8165db2009-09-03 19:13:05 -04001132 if (ret == -EBUSY) {
1133 /*
1134 * We failed to swap the buffer due to a commit taking
1135 * place on this CPU. We fail to record, but we reset
1136 * the max trace buffer (no one writes directly to it)
1137 * and flag that it failed.
1138 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001139 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001140 "Failed to swap buffers due to commit in progress\n");
1141 }
1142
Steven Rostedte8165db2009-09-03 19:13:05 -04001143 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001144
1145 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001146 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001147}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001148#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001149
Rabin Vincente30f53a2014-11-10 19:46:34 +01001150static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001151{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001152 /* Iterators are static, they should be filled or empty */
1153 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001154 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001155
Rabin Vincente30f53a2014-11-10 19:46:34 +01001156 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1157 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001158}
1159
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001160#ifdef CONFIG_FTRACE_STARTUP_TEST
1161static int run_tracer_selftest(struct tracer *type)
1162{
1163 struct trace_array *tr = &global_trace;
1164 struct tracer *saved_tracer = tr->current_trace;
1165 int ret;
1166
1167 if (!type->selftest || tracing_selftest_disabled)
1168 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001169
1170 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001171 * Run a selftest on this tracer.
1172 * Here we reset the trace buffer, and set the current
1173 * tracer to be this tracer. The tracer can then run some
1174 * internal tracing to verify that everything is in order.
1175 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001176 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001177 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001178
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001179 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001180
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001181#ifdef CONFIG_TRACER_MAX_TRACE
1182 if (type->use_max_tr) {
1183 /* If we expanded the buffers, make sure the max is expanded too */
1184 if (ring_buffer_expanded)
1185 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1186 RING_BUFFER_ALL_CPUS);
1187 tr->allocated_snapshot = true;
1188 }
1189#endif
1190
1191 /* the test is responsible for initializing and enabling */
1192 pr_info("Testing tracer %s: ", type->name);
1193 ret = type->selftest(type, tr);
1194 /* the test is responsible for resetting too */
1195 tr->current_trace = saved_tracer;
1196 if (ret) {
1197 printk(KERN_CONT "FAILED!\n");
1198 /* Add the warning after printing 'FAILED' */
1199 WARN_ON(1);
1200 return -1;
1201 }
1202 /* Only reset on passing, to avoid touching corrupted buffers */
1203 tracing_reset_online_cpus(&tr->trace_buffer);
1204
1205#ifdef CONFIG_TRACER_MAX_TRACE
1206 if (type->use_max_tr) {
1207 tr->allocated_snapshot = false;
1208
1209 /* Shrink the max buffer again */
1210 if (ring_buffer_expanded)
1211 ring_buffer_resize(tr->max_buffer.buffer, 1,
1212 RING_BUFFER_ALL_CPUS);
1213 }
1214#endif
1215
1216 printk(KERN_CONT "PASSED\n");
1217 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001218}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001219#else
1220static inline int run_tracer_selftest(struct tracer *type)
1221{
1222 return 0;
1223}
1224#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001225
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001226static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1227
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001228static void __init apply_trace_boot_options(void);
1229
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001230/**
1231 * register_tracer - register a tracer with the ftrace system.
1232 * @type - the plugin for the tracer
1233 *
1234 * Register a new plugin tracer.
1235 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001236int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001237{
1238 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001239 int ret = 0;
1240
1241 if (!type->name) {
1242 pr_info("Tracer must have a name\n");
1243 return -1;
1244 }
1245
Dan Carpenter24a461d2010-07-10 12:06:44 +02001246 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001247 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1248 return -1;
1249 }
1250
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001251 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001252
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001253 tracing_selftest_running = true;
1254
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001255 for (t = trace_types; t; t = t->next) {
1256 if (strcmp(type->name, t->name) == 0) {
1257 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001258 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001259 type->name);
1260 ret = -1;
1261 goto out;
1262 }
1263 }
1264
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001265 if (!type->set_flag)
1266 type->set_flag = &dummy_set_flag;
1267 if (!type->flags)
1268 type->flags = &dummy_tracer_flags;
1269 else
1270 if (!type->flags->opts)
1271 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001272
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001273 ret = run_tracer_selftest(type);
1274 if (ret < 0)
1275 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001276
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001277 type->next = trace_types;
1278 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001279 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001280
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001281 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001282 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001283 mutex_unlock(&trace_types_lock);
1284
Steven Rostedtdac74942009-02-05 01:13:38 -05001285 if (ret || !default_bootup_tracer)
1286 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001287
Li Zefanee6c2c12009-09-18 14:06:47 +08001288 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001289 goto out_unlock;
1290
1291 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1292 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001293 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001294 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001295
1296 apply_trace_boot_options();
1297
Steven Rostedtdac74942009-02-05 01:13:38 -05001298 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001299 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001300#ifdef CONFIG_FTRACE_STARTUP_TEST
1301 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1302 type->name);
1303#endif
1304
1305 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001306 return ret;
1307}
1308
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001309void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001310{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001311 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001312
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001313 if (!buffer)
1314 return;
1315
Steven Rostedtf6339032009-09-04 12:35:16 -04001316 ring_buffer_record_disable(buffer);
1317
1318 /* Make sure all commits have finished */
1319 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001320 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001321
1322 ring_buffer_record_enable(buffer);
1323}
1324
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001325void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001326{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001327 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001328 int cpu;
1329
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001330 if (!buffer)
1331 return;
1332
Steven Rostedt621968c2009-09-04 12:02:35 -04001333 ring_buffer_record_disable(buffer);
1334
1335 /* Make sure all commits have finished */
1336 synchronize_sched();
1337
Alexander Z Lam94571582013-08-02 18:36:16 -07001338 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001339
1340 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001341 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001342
1343 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001344}
1345
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001346/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001347void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001348{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001349 struct trace_array *tr;
1350
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001351 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001352 tracing_reset_online_cpus(&tr->trace_buffer);
1353#ifdef CONFIG_TRACER_MAX_TRACE
1354 tracing_reset_online_cpus(&tr->max_buffer);
1355#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001356 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001357}
1358
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001359#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001360#define NO_CMDLINE_MAP UINT_MAX
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001361static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001362struct saved_cmdlines_buffer {
1363 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1364 unsigned *map_cmdline_to_pid;
1365 unsigned cmdline_num;
1366 int cmdline_idx;
1367 char *saved_cmdlines;
1368};
1369static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001370
Steven Rostedt25b0b442008-05-12 21:21:00 +02001371/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001372static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001373
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001374static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001375{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001376 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1377}
1378
1379static inline void set_cmdline(int idx, const char *cmdline)
1380{
1381 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1382}
1383
1384static int allocate_cmdlines_buffer(unsigned int val,
1385 struct saved_cmdlines_buffer *s)
1386{
1387 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1388 GFP_KERNEL);
1389 if (!s->map_cmdline_to_pid)
1390 return -ENOMEM;
1391
1392 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1393 if (!s->saved_cmdlines) {
1394 kfree(s->map_cmdline_to_pid);
1395 return -ENOMEM;
1396 }
1397
1398 s->cmdline_idx = 0;
1399 s->cmdline_num = val;
1400 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1401 sizeof(s->map_pid_to_cmdline));
1402 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1403 val * sizeof(*s->map_cmdline_to_pid));
1404
1405 return 0;
1406}
1407
1408static int trace_create_savedcmd(void)
1409{
1410 int ret;
1411
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001412 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001413 if (!savedcmd)
1414 return -ENOMEM;
1415
1416 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1417 if (ret < 0) {
1418 kfree(savedcmd);
1419 savedcmd = NULL;
1420 return -ENOMEM;
1421 }
1422
1423 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001424}
1425
Carsten Emdeb5130b12009-09-13 01:43:07 +02001426int is_tracing_stopped(void)
1427{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001428 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001429}
1430
Steven Rostedt0f048702008-11-05 16:05:44 -05001431/**
1432 * tracing_start - quick start of the tracer
1433 *
1434 * If tracing is enabled but was stopped by tracing_stop,
1435 * this will start the tracer back up.
1436 */
1437void tracing_start(void)
1438{
1439 struct ring_buffer *buffer;
1440 unsigned long flags;
1441
1442 if (tracing_disabled)
1443 return;
1444
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001445 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1446 if (--global_trace.stop_count) {
1447 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001448 /* Someone screwed up their debugging */
1449 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001450 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001451 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001452 goto out;
1453 }
1454
Steven Rostedta2f80712010-03-12 19:56:00 -05001455 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001456 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001457
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001458 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001459 if (buffer)
1460 ring_buffer_record_enable(buffer);
1461
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001462#ifdef CONFIG_TRACER_MAX_TRACE
1463 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001464 if (buffer)
1465 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001466#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001467
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001468 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001469
Steven Rostedt0f048702008-11-05 16:05:44 -05001470 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001471 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1472}
1473
1474static void tracing_start_tr(struct trace_array *tr)
1475{
1476 struct ring_buffer *buffer;
1477 unsigned long flags;
1478
1479 if (tracing_disabled)
1480 return;
1481
1482 /* If global, we need to also start the max tracer */
1483 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1484 return tracing_start();
1485
1486 raw_spin_lock_irqsave(&tr->start_lock, flags);
1487
1488 if (--tr->stop_count) {
1489 if (tr->stop_count < 0) {
1490 /* Someone screwed up their debugging */
1491 WARN_ON_ONCE(1);
1492 tr->stop_count = 0;
1493 }
1494 goto out;
1495 }
1496
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001497 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001498 if (buffer)
1499 ring_buffer_record_enable(buffer);
1500
1501 out:
1502 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001503}
1504
1505/**
1506 * tracing_stop - quick stop of the tracer
1507 *
1508 * Light weight way to stop tracing. Use in conjunction with
1509 * tracing_start.
1510 */
1511void tracing_stop(void)
1512{
1513 struct ring_buffer *buffer;
1514 unsigned long flags;
1515
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001516 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1517 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001518 goto out;
1519
Steven Rostedta2f80712010-03-12 19:56:00 -05001520 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001521 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001522
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001523 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001524 if (buffer)
1525 ring_buffer_record_disable(buffer);
1526
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001527#ifdef CONFIG_TRACER_MAX_TRACE
1528 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001529 if (buffer)
1530 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001531#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001532
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001533 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001534
Steven Rostedt0f048702008-11-05 16:05:44 -05001535 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001536 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1537}
1538
1539static void tracing_stop_tr(struct trace_array *tr)
1540{
1541 struct ring_buffer *buffer;
1542 unsigned long flags;
1543
1544 /* If global, we need to also stop the max tracer */
1545 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1546 return tracing_stop();
1547
1548 raw_spin_lock_irqsave(&tr->start_lock, flags);
1549 if (tr->stop_count++)
1550 goto out;
1551
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001552 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001553 if (buffer)
1554 ring_buffer_record_disable(buffer);
1555
1556 out:
1557 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001558}
1559
Ingo Molnare309b412008-05-12 21:20:51 +02001560void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001561
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001562static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001563{
Carsten Emdea635cf02009-03-18 09:00:41 +01001564 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001565
1566 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001567 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001568
1569 /*
1570 * It's not the end of the world if we don't get
1571 * the lock, but we also don't want to spin
1572 * nor do we want to disable interrupts,
1573 * so if we miss here, then better luck next time.
1574 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001575 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001576 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001577
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001578 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001579 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001580 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001581
Carsten Emdea635cf02009-03-18 09:00:41 +01001582 /*
1583 * Check whether the cmdline buffer at idx has a pid
1584 * mapped. We are going to overwrite that entry so we
1585 * need to clear the map_pid_to_cmdline. Otherwise we
1586 * would read the new comm for the old pid.
1587 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001588 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001589 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001590 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001592 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1593 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001594
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001595 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001596 }
1597
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001598 set_cmdline(idx, tsk->comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001599
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001600 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001601
1602 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001603}
1604
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001605static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001606{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001607 unsigned map;
1608
Steven Rostedt4ca53082009-03-16 19:20:15 -04001609 if (!pid) {
1610 strcpy(comm, "<idle>");
1611 return;
1612 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001613
Steven Rostedt74bf4072010-01-25 15:11:53 -05001614 if (WARN_ON_ONCE(pid < 0)) {
1615 strcpy(comm, "<XXX>");
1616 return;
1617 }
1618
Steven Rostedt4ca53082009-03-16 19:20:15 -04001619 if (pid > PID_MAX_DEFAULT) {
1620 strcpy(comm, "<...>");
1621 return;
1622 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001623
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001624 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001625 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001626 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001627 else
1628 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001629}
1630
1631void trace_find_cmdline(int pid, char comm[])
1632{
1633 preempt_disable();
1634 arch_spin_lock(&trace_cmdline_lock);
1635
1636 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001637
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001638 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001639 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001640}
1641
Ingo Molnare309b412008-05-12 21:20:51 +02001642void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001643{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001644 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001645 return;
1646
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001647 if (!__this_cpu_read(trace_cmdline_save))
1648 return;
1649
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001650 if (trace_save_cmdline(tsk))
1651 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001652}
1653
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001654void
Steven Rostedt38697052008-10-01 13:14:09 -04001655tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1656 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001657{
1658 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001659
Steven Rostedt777e2082008-09-29 23:02:42 -04001660 entry->preempt_count = pc & 0xff;
1661 entry->pid = (tsk) ? tsk->pid : 0;
1662 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001663#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001664 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001665#else
1666 TRACE_FLAG_IRQS_NOSUPPORT |
1667#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001668 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
Pavankumar Kondeti999b96b2016-12-09 21:50:17 +05301669 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001670 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1671 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001672}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001673EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001674
Steven Rostedte77405a2009-09-02 14:17:06 -04001675struct ring_buffer_event *
1676trace_buffer_lock_reserve(struct ring_buffer *buffer,
1677 int type,
1678 unsigned long len,
1679 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001680{
1681 struct ring_buffer_event *event;
1682
Steven Rostedte77405a2009-09-02 14:17:06 -04001683 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001684 if (event != NULL) {
1685 struct trace_entry *ent = ring_buffer_event_data(event);
1686
1687 tracing_generic_entry_update(ent, flags, pc);
1688 ent->type = type;
1689 }
1690
1691 return event;
1692}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001693
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001694void
1695__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1696{
1697 __this_cpu_write(trace_cmdline_save, true);
1698 ring_buffer_unlock_commit(buffer, event);
1699}
1700
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04001701void trace_buffer_unlock_commit(struct trace_array *tr,
1702 struct ring_buffer *buffer,
1703 struct ring_buffer_event *event,
1704 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001705{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001706 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001707
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001708 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedte77405a2009-09-02 14:17:06 -04001709 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001710}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001711EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001712
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001713static struct ring_buffer *temp_buffer;
1714
Steven Rostedtef5580d2009-02-27 19:38:04 -05001715struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001716trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001717 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001718 int type, unsigned long len,
1719 unsigned long flags, int pc)
1720{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001721 struct ring_buffer_event *entry;
1722
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001723 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001724 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001725 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001726 /*
1727 * If tracing is off, but we have triggers enabled
1728 * we still need to look at the event data. Use the temp_buffer
1729 * to store the trace event for the tigger to use. It's recusive
1730 * safe and will not be recorded anywhere.
1731 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001732 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001733 *current_rb = temp_buffer;
1734 entry = trace_buffer_lock_reserve(*current_rb,
1735 type, len, flags, pc);
1736 }
1737 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001738}
1739EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1740
1741struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001742trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1743 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001744 unsigned long flags, int pc)
1745{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001746 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001747 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001748 type, len, flags, pc);
1749}
Steven Rostedt94487d62009-05-05 19:22:53 -04001750EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001751
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04001752void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1753 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001754 struct ring_buffer_event *event,
1755 unsigned long flags, int pc,
1756 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001757{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001758 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001759
Steven Rostedt (Red Hat)cc6d9802016-01-13 15:48:54 -05001760 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001761 ftrace_trace_userstack(buffer, flags, pc);
1762}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001763EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001764
Steven Rostedte77405a2009-09-02 14:17:06 -04001765void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1766 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001767{
Steven Rostedte77405a2009-09-02 14:17:06 -04001768 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001769}
Steven Rostedt12acd472009-04-17 16:01:56 -04001770EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001771
Ingo Molnare309b412008-05-12 21:20:51 +02001772void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001773trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001774 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1775 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001776{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001777 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001778 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001779 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001780 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001781
Steven Rostedte77405a2009-09-02 14:17:06 -04001782 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001783 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001784 if (!event)
1785 return;
1786 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001787 entry->ip = ip;
1788 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001789
Tom Zanussif306cc82013-10-24 08:34:17 -05001790 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001791 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001792}
1793
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001794#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001795
1796#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1797struct ftrace_stack {
1798 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1799};
1800
1801static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1802static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1803
Steven Rostedte77405a2009-09-02 14:17:06 -04001804static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001805 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001806 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001807{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001808 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001809 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001810 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001811 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001812 int use_stack;
1813 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001814
1815 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001816 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001817
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001818 /*
1819 * Since events can happen in NMIs there's no safe way to
1820 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1821 * or NMI comes in, it will just have to use the default
1822 * FTRACE_STACK_SIZE.
1823 */
1824 preempt_disable_notrace();
1825
Shan Wei82146522012-11-19 13:21:01 +08001826 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001827 /*
1828 * We don't need any atomic variables, just a barrier.
1829 * If an interrupt comes in, we don't care, because it would
1830 * have exited and put the counter back to what we want.
1831 * We just need a barrier to keep gcc from moving things
1832 * around.
1833 */
1834 barrier();
1835 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001836 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001837 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1838
1839 if (regs)
1840 save_stack_trace_regs(regs, &trace);
1841 else
1842 save_stack_trace(&trace);
1843
1844 if (trace.nr_entries > size)
1845 size = trace.nr_entries;
1846 } else
1847 /* From now on, use_stack is a boolean */
1848 use_stack = 0;
1849
1850 size *= sizeof(unsigned long);
1851
1852 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1853 sizeof(*entry) + size, flags, pc);
1854 if (!event)
1855 goto out;
1856 entry = ring_buffer_event_data(event);
1857
1858 memset(&entry->caller, 0, size);
1859
1860 if (use_stack)
1861 memcpy(&entry->caller, trace.entries,
1862 trace.nr_entries * sizeof(unsigned long));
1863 else {
1864 trace.max_entries = FTRACE_STACK_ENTRIES;
1865 trace.entries = entry->caller;
1866 if (regs)
1867 save_stack_trace_regs(regs, &trace);
1868 else
1869 save_stack_trace(&trace);
1870 }
1871
1872 entry->size = trace.nr_entries;
1873
Tom Zanussif306cc82013-10-24 08:34:17 -05001874 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001875 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001876
1877 out:
1878 /* Again, don't let gcc optimize things here */
1879 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001880 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001881 preempt_enable_notrace();
1882
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001883}
1884
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001885static inline void ftrace_trace_stack(struct trace_array *tr,
1886 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001887 unsigned long flags,
1888 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05001889{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001890 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05001891 return;
1892
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001893 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05001894}
1895
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001896void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1897 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001898{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001899 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001900}
1901
Steven Rostedt03889382009-12-11 09:48:22 -05001902/**
1903 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001904 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001905 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001906void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001907{
1908 unsigned long flags;
1909
1910 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001911 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001912
1913 local_save_flags(flags);
1914
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001915 /*
1916 * Skip 3 more, seems to get us at the caller of
1917 * this function.
1918 */
1919 skip += 3;
1920 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1921 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001922}
1923
Steven Rostedt91e86e52010-11-10 12:56:12 +01001924static DEFINE_PER_CPU(int, user_stack_count);
1925
Steven Rostedte77405a2009-09-02 14:17:06 -04001926void
1927ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001928{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001929 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001930 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001931 struct userstack_entry *entry;
1932 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001933
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001934 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02001935 return;
1936
Steven Rostedtb6345872010-03-12 20:03:30 -05001937 /*
1938 * NMIs can not handle page faults, even with fix ups.
1939 * The save user stack can (and often does) fault.
1940 */
1941 if (unlikely(in_nmi()))
1942 return;
1943
Steven Rostedt91e86e52010-11-10 12:56:12 +01001944 /*
1945 * prevent recursion, since the user stack tracing may
1946 * trigger other kernel events.
1947 */
1948 preempt_disable();
1949 if (__this_cpu_read(user_stack_count))
1950 goto out;
1951
1952 __this_cpu_inc(user_stack_count);
1953
Steven Rostedte77405a2009-09-02 14:17:06 -04001954 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001955 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001956 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001957 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001958 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001959
Steven Rostedt48659d32009-09-11 11:36:23 -04001960 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001961 memset(&entry->caller, 0, sizeof(entry->caller));
1962
1963 trace.nr_entries = 0;
1964 trace.max_entries = FTRACE_STACK_ENTRIES;
1965 trace.skip = 0;
1966 trace.entries = entry->caller;
1967
1968 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001969 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001970 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001971
Li Zefan1dbd1952010-12-09 15:47:56 +08001972 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001973 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001974 out:
1975 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001976}
1977
Hannes Eder4fd27352009-02-10 19:44:12 +01001978#ifdef UNUSED
1979static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001980{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001981 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001982}
Hannes Eder4fd27352009-02-10 19:44:12 +01001983#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001984
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001985#endif /* CONFIG_STACKTRACE */
1986
Steven Rostedt07d777f2011-09-22 14:01:55 -04001987/* created for use with alloc_percpu */
1988struct trace_buffer_struct {
1989 char buffer[TRACE_BUF_SIZE];
1990};
1991
1992static struct trace_buffer_struct *trace_percpu_buffer;
1993static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1994static struct trace_buffer_struct *trace_percpu_irq_buffer;
1995static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1996
1997/*
1998 * The buffer used is dependent on the context. There is a per cpu
1999 * buffer for normal context, softirq contex, hard irq context and
2000 * for NMI context. Thise allows for lockless recording.
2001 *
2002 * Note, if the buffers failed to be allocated, then this returns NULL
2003 */
2004static char *get_trace_buf(void)
2005{
2006 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002007
2008 /*
2009 * If we have allocated per cpu buffers, then we do not
2010 * need to do any locking.
2011 */
2012 if (in_nmi())
2013 percpu_buffer = trace_percpu_nmi_buffer;
2014 else if (in_irq())
2015 percpu_buffer = trace_percpu_irq_buffer;
2016 else if (in_softirq())
2017 percpu_buffer = trace_percpu_sirq_buffer;
2018 else
2019 percpu_buffer = trace_percpu_buffer;
2020
2021 if (!percpu_buffer)
2022 return NULL;
2023
Shan Weid8a03492012-11-13 09:53:04 +08002024 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002025}
2026
2027static int alloc_percpu_trace_buffer(void)
2028{
2029 struct trace_buffer_struct *buffers;
2030 struct trace_buffer_struct *sirq_buffers;
2031 struct trace_buffer_struct *irq_buffers;
2032 struct trace_buffer_struct *nmi_buffers;
2033
2034 buffers = alloc_percpu(struct trace_buffer_struct);
2035 if (!buffers)
2036 goto err_warn;
2037
2038 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2039 if (!sirq_buffers)
2040 goto err_sirq;
2041
2042 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2043 if (!irq_buffers)
2044 goto err_irq;
2045
2046 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2047 if (!nmi_buffers)
2048 goto err_nmi;
2049
2050 trace_percpu_buffer = buffers;
2051 trace_percpu_sirq_buffer = sirq_buffers;
2052 trace_percpu_irq_buffer = irq_buffers;
2053 trace_percpu_nmi_buffer = nmi_buffers;
2054
2055 return 0;
2056
2057 err_nmi:
2058 free_percpu(irq_buffers);
2059 err_irq:
2060 free_percpu(sirq_buffers);
2061 err_sirq:
2062 free_percpu(buffers);
2063 err_warn:
2064 WARN(1, "Could not allocate percpu trace_printk buffer");
2065 return -ENOMEM;
2066}
2067
Steven Rostedt81698832012-10-11 10:15:05 -04002068static int buffers_allocated;
2069
Steven Rostedt07d777f2011-09-22 14:01:55 -04002070void trace_printk_init_buffers(void)
2071{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002072 if (buffers_allocated)
2073 return;
2074
2075 if (alloc_percpu_trace_buffer())
2076 return;
2077
Steven Rostedt2184db42014-05-28 13:14:40 -04002078 /* trace_printk() is for debug use only. Don't use it in production. */
2079
Borislav Petkov69a1c992015-01-27 17:17:20 +01002080 pr_warning("\n");
2081 pr_warning("**********************************************************\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002082 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2083 pr_warning("** **\n");
2084 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2085 pr_warning("** **\n");
2086 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
Frans Klavereff264e2014-11-07 15:53:44 +01002087 pr_warning("** unsafe for production use. **\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002088 pr_warning("** **\n");
2089 pr_warning("** If you see this message and you are not debugging **\n");
2090 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2091 pr_warning("** **\n");
2092 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2093 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002094
Steven Rostedtb382ede62012-10-10 21:44:34 -04002095 /* Expand the buffers to set size */
2096 tracing_update_buffers();
2097
Steven Rostedt07d777f2011-09-22 14:01:55 -04002098 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002099
2100 /*
2101 * trace_printk_init_buffers() can be called by modules.
2102 * If that happens, then we need to start cmdline recording
2103 * directly here. If the global_trace.buffer is already
2104 * allocated here, then this was called by module code.
2105 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002106 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002107 tracing_start_cmdline_record();
2108}
2109
2110void trace_printk_start_comm(void)
2111{
2112 /* Start tracing comms if trace printk is set */
2113 if (!buffers_allocated)
2114 return;
2115 tracing_start_cmdline_record();
2116}
2117
2118static void trace_printk_start_stop_comm(int enabled)
2119{
2120 if (!buffers_allocated)
2121 return;
2122
2123 if (enabled)
2124 tracing_start_cmdline_record();
2125 else
2126 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002127}
2128
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002129/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002130 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002131 *
2132 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002133int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002134{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002135 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002136 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002137 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002138 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002139 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002140 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002141 char *tbuffer;
2142 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002143
2144 if (unlikely(tracing_selftest_running || tracing_disabled))
2145 return 0;
2146
2147 /* Don't pollute graph traces with trace_vprintk internals */
2148 pause_graph_tracing();
2149
2150 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002151 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002152
Steven Rostedt07d777f2011-09-22 14:01:55 -04002153 tbuffer = get_trace_buf();
2154 if (!tbuffer) {
2155 len = 0;
2156 goto out;
2157 }
2158
2159 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2160
2161 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002162 goto out;
2163
Steven Rostedt07d777f2011-09-22 14:01:55 -04002164 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002165 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002166 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002167 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2168 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002169 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002170 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002171 entry = ring_buffer_event_data(event);
2172 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002173 entry->fmt = fmt;
2174
Steven Rostedt07d777f2011-09-22 14:01:55 -04002175 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002176 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002177 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002178 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002179 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002180
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002181out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002182 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002183 unpause_graph_tracing();
2184
2185 return len;
2186}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002187EXPORT_SYMBOL_GPL(trace_vbprintk);
2188
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002189static int
2190__trace_array_vprintk(struct ring_buffer *buffer,
2191 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002192{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002193 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002194 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002195 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002196 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002197 unsigned long flags;
2198 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002199
2200 if (tracing_disabled || tracing_selftest_running)
2201 return 0;
2202
Steven Rostedt07d777f2011-09-22 14:01:55 -04002203 /* Don't pollute graph traces with trace_vprintk internals */
2204 pause_graph_tracing();
2205
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002206 pc = preempt_count();
2207 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002208
Steven Rostedt07d777f2011-09-22 14:01:55 -04002209
2210 tbuffer = get_trace_buf();
2211 if (!tbuffer) {
2212 len = 0;
2213 goto out;
2214 }
2215
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002216 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002217
Steven Rostedt07d777f2011-09-22 14:01:55 -04002218 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002219 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002220 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002221 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002222 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002223 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002224 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002225 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002226
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002227 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002228 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002229 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002230 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002231 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002232 out:
2233 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002234 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002235
2236 return len;
2237}
Steven Rostedt659372d2009-09-03 19:11:07 -04002238
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002239int trace_array_vprintk(struct trace_array *tr,
2240 unsigned long ip, const char *fmt, va_list args)
2241{
2242 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2243}
2244
2245int trace_array_printk(struct trace_array *tr,
2246 unsigned long ip, const char *fmt, ...)
2247{
2248 int ret;
2249 va_list ap;
2250
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002251 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002252 return 0;
2253
2254 va_start(ap, fmt);
2255 ret = trace_array_vprintk(tr, ip, fmt, ap);
2256 va_end(ap);
2257 return ret;
2258}
2259
2260int trace_array_printk_buf(struct ring_buffer *buffer,
2261 unsigned long ip, const char *fmt, ...)
2262{
2263 int ret;
2264 va_list ap;
2265
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002266 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002267 return 0;
2268
2269 va_start(ap, fmt);
2270 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2271 va_end(ap);
2272 return ret;
2273}
2274
Steven Rostedt659372d2009-09-03 19:11:07 -04002275int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2276{
Steven Rostedta813a152009-10-09 01:41:35 -04002277 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002278}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002279EXPORT_SYMBOL_GPL(trace_vprintk);
2280
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002281static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002282{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002283 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2284
Steven Rostedt5a90f572008-09-03 17:42:51 -04002285 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002286 if (buf_iter)
2287 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002288}
2289
Ingo Molnare309b412008-05-12 21:20:51 +02002290static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002291peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2292 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002293{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002294 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002295 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002296
Steven Rostedtd7690412008-10-01 00:29:53 -04002297 if (buf_iter)
2298 event = ring_buffer_iter_peek(buf_iter, ts);
2299 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002300 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002301 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002302
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002303 if (event) {
2304 iter->ent_size = ring_buffer_event_length(event);
2305 return ring_buffer_event_data(event);
2306 }
2307 iter->ent_size = 0;
2308 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002309}
Steven Rostedtd7690412008-10-01 00:29:53 -04002310
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002311static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002312__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2313 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002314{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002315 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002316 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002317 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002318 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002319 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002320 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002321 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002322 int cpu;
2323
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002324 /*
2325 * If we are in a per_cpu trace file, don't bother by iterating over
2326 * all cpu and peek directly.
2327 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002328 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002329 if (ring_buffer_empty_cpu(buffer, cpu_file))
2330 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002331 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002332 if (ent_cpu)
2333 *ent_cpu = cpu_file;
2334
2335 return ent;
2336 }
2337
Steven Rostedtab464282008-05-12 21:21:00 +02002338 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002339
2340 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002341 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002342
Steven Rostedtbc21b472010-03-31 19:49:26 -04002343 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002344
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002345 /*
2346 * Pick the entry with the smallest timestamp:
2347 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002348 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002349 next = ent;
2350 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002351 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002352 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002353 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002354 }
2355 }
2356
Steven Rostedt12b5da32012-03-27 10:43:28 -04002357 iter->ent_size = next_size;
2358
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002359 if (ent_cpu)
2360 *ent_cpu = next_cpu;
2361
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002362 if (ent_ts)
2363 *ent_ts = next_ts;
2364
Steven Rostedtbc21b472010-03-31 19:49:26 -04002365 if (missing_events)
2366 *missing_events = next_lost;
2367
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002368 return next;
2369}
2370
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002371/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002372struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2373 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002374{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002375 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002376}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002377
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002378/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002379void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002380{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002381 iter->ent = __find_next_entry(iter, &iter->cpu,
2382 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002383
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002384 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002385 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002386
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002387 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002388}
2389
Ingo Molnare309b412008-05-12 21:20:51 +02002390static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002391{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002392 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002393 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002394}
2395
Ingo Molnare309b412008-05-12 21:20:51 +02002396static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002397{
2398 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002399 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002400 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002401
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002402 WARN_ON_ONCE(iter->leftover);
2403
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002404 (*pos)++;
2405
2406 /* can't go backwards */
2407 if (iter->idx > i)
2408 return NULL;
2409
2410 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002411 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002412 else
2413 ent = iter;
2414
2415 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002416 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002417
2418 iter->pos = *pos;
2419
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002420 return ent;
2421}
2422
Jason Wessel955b61e2010-08-05 09:22:23 -05002423void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002424{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002425 struct ring_buffer_event *event;
2426 struct ring_buffer_iter *buf_iter;
2427 unsigned long entries = 0;
2428 u64 ts;
2429
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002430 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002431
Steven Rostedt6d158a82012-06-27 20:46:14 -04002432 buf_iter = trace_buffer_iter(iter, cpu);
2433 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002434 return;
2435
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002436 ring_buffer_iter_reset(buf_iter);
2437
2438 /*
2439 * We could have the case with the max latency tracers
2440 * that a reset never took place on a cpu. This is evident
2441 * by the timestamp being before the start of the buffer.
2442 */
2443 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002444 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002445 break;
2446 entries++;
2447 ring_buffer_read(buf_iter, NULL);
2448 }
2449
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002450 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002451}
2452
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002453/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002454 * The current tracer is copied to avoid a global locking
2455 * all around.
2456 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002457static void *s_start(struct seq_file *m, loff_t *pos)
2458{
2459 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002460 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002461 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002462 void *p = NULL;
2463 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002464 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002465
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002466 /*
2467 * copy the tracer to avoid using a global lock all around.
2468 * iter->trace is a copy of current_trace, the pointer to the
2469 * name may be used instead of a strcmp(), as iter->trace->name
2470 * will point to the same string as current_trace->name.
2471 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002472 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002473 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2474 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002475 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002476
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002477#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002478 if (iter->snapshot && iter->trace->use_max_tr)
2479 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002480#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002481
2482 if (!iter->snapshot)
2483 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002484
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002485 if (*pos != iter->pos) {
2486 iter->ent = NULL;
2487 iter->cpu = 0;
2488 iter->idx = -1;
2489
Steven Rostedtae3b5092013-01-23 15:22:59 -05002490 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002491 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002492 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002493 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002494 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002495
Lai Jiangshanac91d852010-03-02 17:54:50 +08002496 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002497 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2498 ;
2499
2500 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002501 /*
2502 * If we overflowed the seq_file before, then we want
2503 * to just reuse the trace_seq buffer again.
2504 */
2505 if (iter->leftover)
2506 p = iter;
2507 else {
2508 l = *pos - 1;
2509 p = s_next(m, p, &l);
2510 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002511 }
2512
Lai Jiangshan4f535962009-05-18 19:35:34 +08002513 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002514 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002515 return p;
2516}
2517
2518static void s_stop(struct seq_file *m, void *p)
2519{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002520 struct trace_iterator *iter = m->private;
2521
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002522#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002523 if (iter->snapshot && iter->trace->use_max_tr)
2524 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002525#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002526
2527 if (!iter->snapshot)
2528 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002529
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002530 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002531 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002532}
2533
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002534static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002535get_total_entries(struct trace_buffer *buf,
2536 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002537{
2538 unsigned long count;
2539 int cpu;
2540
2541 *total = 0;
2542 *entries = 0;
2543
2544 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002545 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002546 /*
2547 * If this buffer has skipped entries, then we hold all
2548 * entries for the trace and we need to ignore the
2549 * ones before the time stamp.
2550 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002551 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2552 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002553 /* total is the same as the entries */
2554 *total += count;
2555 } else
2556 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002557 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002558 *entries += count;
2559 }
2560}
2561
Ingo Molnare309b412008-05-12 21:20:51 +02002562static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002563{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002564 seq_puts(m, "# _------=> CPU# \n"
2565 "# / _-----=> irqs-off \n"
2566 "# | / _----=> need-resched \n"
2567 "# || / _---=> hardirq/softirq \n"
2568 "# ||| / _--=> preempt-depth \n"
2569 "# |||| / delay \n"
2570 "# cmd pid ||||| time | caller \n"
2571 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002572}
2573
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002574static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002575{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002576 unsigned long total;
2577 unsigned long entries;
2578
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002579 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002580 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2581 entries, total, num_online_cpus());
2582 seq_puts(m, "#\n");
2583}
2584
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002585static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002586{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002587 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002588 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2589 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002590}
2591
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002592static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002593{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002594 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002595 seq_puts(m, "# _-----=> irqs-off\n"
2596 "# / _----=> need-resched\n"
2597 "# | / _---=> hardirq/softirq\n"
2598 "# || / _--=> preempt-depth\n"
2599 "# ||| / delay\n"
2600 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2601 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002602}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002603
Jiri Olsa62b915f2010-04-02 19:01:22 +02002604void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002605print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2606{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002607 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002608 struct trace_buffer *buf = iter->trace_buffer;
2609 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002610 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002611 unsigned long entries;
2612 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002613 const char *name = "preemption";
2614
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002615 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002616
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002617 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002618
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002619 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002620 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002621 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002622 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002623 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002624 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002625 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002626 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002627 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002628 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002629#if defined(CONFIG_PREEMPT_NONE)
2630 "server",
2631#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2632 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002633#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002634 "preempt",
2635#else
2636 "unknown",
2637#endif
2638 /* These are reserved for later use */
2639 0, 0, 0, 0);
2640#ifdef CONFIG_SMP
2641 seq_printf(m, " #P:%d)\n", num_online_cpus());
2642#else
2643 seq_puts(m, ")\n");
2644#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002645 seq_puts(m, "# -----------------\n");
2646 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002647 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002648 data->comm, data->pid,
2649 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002650 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002651 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002652
2653 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002654 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002655 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2656 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002657 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002658 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2659 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002660 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002661 }
2662
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002663 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002664}
2665
Steven Rostedta3097202008-11-07 22:36:02 -05002666static void test_cpu_buff_start(struct trace_iterator *iter)
2667{
2668 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002669 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05002670
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002671 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002672 return;
2673
2674 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2675 return;
2676
Sasha Levin919cd972015-09-04 12:45:56 -04002677 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002678 return;
2679
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002680 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002681 return;
2682
Sasha Levin919cd972015-09-04 12:45:56 -04002683 if (iter->started)
2684 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002685
2686 /* Don't print started cpu buffer for the first entry of the trace */
2687 if (iter->idx > 1)
2688 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2689 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002690}
2691
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002692static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002693{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002694 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02002695 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002696 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002697 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002698 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002699
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002700 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002701
Steven Rostedta3097202008-11-07 22:36:02 -05002702 test_cpu_buff_start(iter);
2703
Steven Rostedtf633cef2008-12-23 23:24:13 -05002704 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002705
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002706 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002707 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2708 trace_print_lat_context(iter);
2709 else
2710 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002711 }
2712
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002713 if (trace_seq_has_overflowed(s))
2714 return TRACE_TYPE_PARTIAL_LINE;
2715
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002716 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002717 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002718
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002719 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002720
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002721 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002722}
2723
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002724static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002725{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002726 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002727 struct trace_seq *s = &iter->seq;
2728 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002729 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002730
2731 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002732
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002733 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002734 trace_seq_printf(s, "%d %d %llu ",
2735 entry->pid, iter->cpu, iter->ts);
2736
2737 if (trace_seq_has_overflowed(s))
2738 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002739
Steven Rostedtf633cef2008-12-23 23:24:13 -05002740 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002741 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002742 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002743
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002744 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002745
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002746 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002747}
2748
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002749static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002750{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002751 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002752 struct trace_seq *s = &iter->seq;
2753 unsigned char newline = '\n';
2754 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002755 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002756
2757 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002758
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002759 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002760 SEQ_PUT_HEX_FIELD(s, entry->pid);
2761 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2762 SEQ_PUT_HEX_FIELD(s, iter->ts);
2763 if (trace_seq_has_overflowed(s))
2764 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002765 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002766
Steven Rostedtf633cef2008-12-23 23:24:13 -05002767 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002768 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002769 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002770 if (ret != TRACE_TYPE_HANDLED)
2771 return ret;
2772 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002773
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002774 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002775
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002776 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002777}
2778
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002779static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002780{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002781 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002782 struct trace_seq *s = &iter->seq;
2783 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002784 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002785
2786 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002787
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002788 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002789 SEQ_PUT_FIELD(s, entry->pid);
2790 SEQ_PUT_FIELD(s, iter->cpu);
2791 SEQ_PUT_FIELD(s, iter->ts);
2792 if (trace_seq_has_overflowed(s))
2793 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002794 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002795
Steven Rostedtf633cef2008-12-23 23:24:13 -05002796 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002797 return event ? event->funcs->binary(iter, 0, event) :
2798 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002799}
2800
Jiri Olsa62b915f2010-04-02 19:01:22 +02002801int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002802{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002803 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002804 int cpu;
2805
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002806 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002807 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002808 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002809 buf_iter = trace_buffer_iter(iter, cpu);
2810 if (buf_iter) {
2811 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002812 return 0;
2813 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002814 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002815 return 0;
2816 }
2817 return 1;
2818 }
2819
Steven Rostedtab464282008-05-12 21:21:00 +02002820 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002821 buf_iter = trace_buffer_iter(iter, cpu);
2822 if (buf_iter) {
2823 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002824 return 0;
2825 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002826 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002827 return 0;
2828 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002829 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002830
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002831 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002832}
2833
Lai Jiangshan4f535962009-05-18 19:35:34 +08002834/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002835enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002836{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002837 struct trace_array *tr = iter->tr;
2838 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002839 enum print_line_t ret;
2840
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002841 if (iter->lost_events) {
2842 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2843 iter->cpu, iter->lost_events);
2844 if (trace_seq_has_overflowed(&iter->seq))
2845 return TRACE_TYPE_PARTIAL_LINE;
2846 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002847
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002848 if (iter->trace && iter->trace->print_line) {
2849 ret = iter->trace->print_line(iter);
2850 if (ret != TRACE_TYPE_UNHANDLED)
2851 return ret;
2852 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002853
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002854 if (iter->ent->type == TRACE_BPUTS &&
2855 trace_flags & TRACE_ITER_PRINTK &&
2856 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2857 return trace_print_bputs_msg_only(iter);
2858
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002859 if (iter->ent->type == TRACE_BPRINT &&
2860 trace_flags & TRACE_ITER_PRINTK &&
2861 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002862 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002863
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002864 if (iter->ent->type == TRACE_PRINT &&
2865 trace_flags & TRACE_ITER_PRINTK &&
2866 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002867 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002868
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002869 if (trace_flags & TRACE_ITER_BIN)
2870 return print_bin_fmt(iter);
2871
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002872 if (trace_flags & TRACE_ITER_HEX)
2873 return print_hex_fmt(iter);
2874
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002875 if (trace_flags & TRACE_ITER_RAW)
2876 return print_raw_fmt(iter);
2877
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002878 return print_trace_fmt(iter);
2879}
2880
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002881void trace_latency_header(struct seq_file *m)
2882{
2883 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002884 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002885
2886 /* print nothing if the buffers are empty */
2887 if (trace_empty(iter))
2888 return;
2889
2890 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2891 print_trace_header(m, iter);
2892
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002893 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002894 print_lat_help_header(m);
2895}
2896
Jiri Olsa62b915f2010-04-02 19:01:22 +02002897void trace_default_header(struct seq_file *m)
2898{
2899 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002900 struct trace_array *tr = iter->tr;
2901 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02002902
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002903 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2904 return;
2905
Jiri Olsa62b915f2010-04-02 19:01:22 +02002906 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2907 /* print nothing if the buffers are empty */
2908 if (trace_empty(iter))
2909 return;
2910 print_trace_header(m, iter);
2911 if (!(trace_flags & TRACE_ITER_VERBOSE))
2912 print_lat_help_header(m);
2913 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002914 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2915 if (trace_flags & TRACE_ITER_IRQ_INFO)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002916 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002917 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002918 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002919 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002920 }
2921}
2922
Steven Rostedte0a413f2011-09-29 21:26:16 -04002923static void test_ftrace_alive(struct seq_file *m)
2924{
2925 if (!ftrace_is_dead())
2926 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002927 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2928 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002929}
2930
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002931#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002932static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002933{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002934 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2935 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2936 "# Takes a snapshot of the main buffer.\n"
2937 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2938 "# (Doesn't have to be '2' works with any number that\n"
2939 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002940}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002941
2942static void show_snapshot_percpu_help(struct seq_file *m)
2943{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002944 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002945#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002946 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2947 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002948#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002949 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2950 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002951#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002952 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2953 "# (Doesn't have to be '2' works with any number that\n"
2954 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002955}
2956
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002957static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2958{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002959 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002960 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002961 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002962 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002963
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002964 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002965 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
2966 show_snapshot_main_help(m);
2967 else
2968 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002969}
2970#else
2971/* Should never be called */
2972static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
2973#endif
2974
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002975static int s_show(struct seq_file *m, void *v)
2976{
2977 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002978 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002979
2980 if (iter->ent == NULL) {
2981 if (iter->tr) {
2982 seq_printf(m, "# tracer: %s\n", iter->trace->name);
2983 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002984 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002985 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002986 if (iter->snapshot && trace_empty(iter))
2987 print_snapshot_help(m, iter);
2988 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01002989 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02002990 else
2991 trace_default_header(m);
2992
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002993 } else if (iter->leftover) {
2994 /*
2995 * If we filled the seq_file buffer earlier, we
2996 * want to just show it now.
2997 */
2998 ret = trace_print_seq(m, &iter->seq);
2999
3000 /* ret should this time be zero, but you never know */
3001 iter->leftover = ret;
3002
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003003 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003004 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003005 ret = trace_print_seq(m, &iter->seq);
3006 /*
3007 * If we overflow the seq_file buffer, then it will
3008 * ask us for this data again at start up.
3009 * Use that instead.
3010 * ret is 0 if seq_file write succeeded.
3011 * -1 otherwise.
3012 */
3013 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003014 }
3015
3016 return 0;
3017}
3018
Oleg Nesterov649e9c72013-07-23 17:25:54 +02003019/*
3020 * Should be used after trace_array_get(), trace_types_lock
3021 * ensures that i_cdev was already initialized.
3022 */
3023static inline int tracing_get_cpu(struct inode *inode)
3024{
3025 if (inode->i_cdev) /* See trace_create_cpu_file() */
3026 return (long)inode->i_cdev - 1;
3027 return RING_BUFFER_ALL_CPUS;
3028}
3029
James Morris88e9d342009-09-22 16:43:43 -07003030static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003031 .start = s_start,
3032 .next = s_next,
3033 .stop = s_stop,
3034 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003035};
3036
Ingo Molnare309b412008-05-12 21:20:51 +02003037static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003038__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003039{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003040 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003041 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003042 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003043
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003044 if (tracing_disabled)
3045 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003046
Jiri Olsa50e18b92012-04-25 10:23:39 +02003047 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003048 if (!iter)
3049 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003050
Gil Fruchter72917232015-06-09 10:32:35 +03003051 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003052 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003053 if (!iter->buffer_iter)
3054 goto release;
3055
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003056 /*
3057 * We make a copy of the current tracer to avoid concurrent
3058 * changes on it while we are reading.
3059 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003060 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003061 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003062 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003063 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003064
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003065 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003066
Li Zefan79f55992009-06-15 14:58:26 +08003067 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003068 goto fail;
3069
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003070 iter->tr = tr;
3071
3072#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003073 /* Currently only the top directory has a snapshot */
3074 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003075 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003076 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003077#endif
3078 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003079 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003080 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003081 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003082 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003083
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003084 /* Notify the tracer early; before we stop tracing. */
3085 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003086 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003087
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003088 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003089 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003090 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3091
David Sharp8be07092012-11-13 12:18:22 -08003092 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003093 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003094 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3095
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003096 /* stop the trace while dumping if we are not opening "snapshot" */
3097 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003098 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003099
Steven Rostedtae3b5092013-01-23 15:22:59 -05003100 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003101 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003102 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003103 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003104 }
3105 ring_buffer_read_prepare_sync();
3106 for_each_tracing_cpu(cpu) {
3107 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003108 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003109 }
3110 } else {
3111 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003112 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003113 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003114 ring_buffer_read_prepare_sync();
3115 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003116 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003117 }
3118
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003119 mutex_unlock(&trace_types_lock);
3120
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003121 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003122
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003123 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003124 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003125 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003126 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003127release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003128 seq_release_private(inode, file);
3129 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003130}
3131
3132int tracing_open_generic(struct inode *inode, struct file *filp)
3133{
Steven Rostedt60a11772008-05-12 21:20:44 +02003134 if (tracing_disabled)
3135 return -ENODEV;
3136
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003137 filp->private_data = inode->i_private;
3138 return 0;
3139}
3140
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003141bool tracing_is_disabled(void)
3142{
3143 return (tracing_disabled) ? true: false;
3144}
3145
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003146/*
3147 * Open and update trace_array ref count.
3148 * Must have the current trace_array passed to it.
3149 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003150static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003151{
3152 struct trace_array *tr = inode->i_private;
3153
3154 if (tracing_disabled)
3155 return -ENODEV;
3156
3157 if (trace_array_get(tr) < 0)
3158 return -ENODEV;
3159
3160 filp->private_data = inode->i_private;
3161
3162 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003163}
3164
Hannes Eder4fd27352009-02-10 19:44:12 +01003165static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003166{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003167 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003168 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003169 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003170 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003171
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003172 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003173 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003174 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003175 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003176
Oleg Nesterov6484c712013-07-23 17:26:10 +02003177 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003178 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003179 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003180
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003181 for_each_tracing_cpu(cpu) {
3182 if (iter->buffer_iter[cpu])
3183 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3184 }
3185
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003186 if (iter->trace && iter->trace->close)
3187 iter->trace->close(iter);
3188
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003189 if (!iter->snapshot)
3190 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003191 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003192
3193 __trace_array_put(tr);
3194
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003195 mutex_unlock(&trace_types_lock);
3196
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003197 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003198 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003199 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003200 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003201 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003202
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003203 return 0;
3204}
3205
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003206static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3207{
3208 struct trace_array *tr = inode->i_private;
3209
3210 trace_array_put(tr);
3211 return 0;
3212}
3213
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003214static int tracing_single_release_tr(struct inode *inode, struct file *file)
3215{
3216 struct trace_array *tr = inode->i_private;
3217
3218 trace_array_put(tr);
3219
3220 return single_release(inode, file);
3221}
3222
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003223static int tracing_open(struct inode *inode, struct file *file)
3224{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003225 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003226 struct trace_iterator *iter;
3227 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003228
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003229 if (trace_array_get(tr) < 0)
3230 return -ENODEV;
3231
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003232 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003233 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3234 int cpu = tracing_get_cpu(inode);
Bo Yan68a4a522017-09-18 10:03:35 -07003235 struct trace_buffer *trace_buf = &tr->trace_buffer;
3236
3237#ifdef CONFIG_TRACER_MAX_TRACE
3238 if (tr->current_trace->print_max)
3239 trace_buf = &tr->max_buffer;
3240#endif
Oleg Nesterov6484c712013-07-23 17:26:10 +02003241
3242 if (cpu == RING_BUFFER_ALL_CPUS)
Bo Yan68a4a522017-09-18 10:03:35 -07003243 tracing_reset_online_cpus(trace_buf);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003244 else
Bo Yan68a4a522017-09-18 10:03:35 -07003245 tracing_reset(trace_buf, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003246 }
3247
3248 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003249 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003250 if (IS_ERR(iter))
3251 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003252 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003253 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3254 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003255
3256 if (ret < 0)
3257 trace_array_put(tr);
3258
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003259 return ret;
3260}
3261
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003262/*
3263 * Some tracers are not suitable for instance buffers.
3264 * A tracer is always available for the global array (toplevel)
3265 * or if it explicitly states that it is.
3266 */
3267static bool
3268trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3269{
3270 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3271}
3272
3273/* Find the next tracer that this trace array may use */
3274static struct tracer *
3275get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3276{
3277 while (t && !trace_ok_for_array(t, tr))
3278 t = t->next;
3279
3280 return t;
3281}
3282
Ingo Molnare309b412008-05-12 21:20:51 +02003283static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003284t_next(struct seq_file *m, void *v, loff_t *pos)
3285{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003286 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003287 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003288
3289 (*pos)++;
3290
3291 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003292 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003293
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003294 return t;
3295}
3296
3297static void *t_start(struct seq_file *m, loff_t *pos)
3298{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003299 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003300 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003301 loff_t l = 0;
3302
3303 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003304
3305 t = get_tracer_for_array(tr, trace_types);
3306 for (; t && l < *pos; t = t_next(m, t, &l))
3307 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003308
3309 return t;
3310}
3311
3312static void t_stop(struct seq_file *m, void *p)
3313{
3314 mutex_unlock(&trace_types_lock);
3315}
3316
3317static int t_show(struct seq_file *m, void *v)
3318{
3319 struct tracer *t = v;
3320
3321 if (!t)
3322 return 0;
3323
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003324 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003325 if (t->next)
3326 seq_putc(m, ' ');
3327 else
3328 seq_putc(m, '\n');
3329
3330 return 0;
3331}
3332
James Morris88e9d342009-09-22 16:43:43 -07003333static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003334 .start = t_start,
3335 .next = t_next,
3336 .stop = t_stop,
3337 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003338};
3339
3340static int show_traces_open(struct inode *inode, struct file *file)
3341{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003342 struct trace_array *tr = inode->i_private;
3343 struct seq_file *m;
3344 int ret;
3345
Steven Rostedt60a11772008-05-12 21:20:44 +02003346 if (tracing_disabled)
3347 return -ENODEV;
3348
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003349 ret = seq_open(file, &show_traces_seq_ops);
3350 if (ret)
3351 return ret;
3352
3353 m = file->private_data;
3354 m->private = tr;
3355
3356 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003357}
3358
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003359static ssize_t
3360tracing_write_stub(struct file *filp, const char __user *ubuf,
3361 size_t count, loff_t *ppos)
3362{
3363 return count;
3364}
3365
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003366loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003367{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003368 int ret;
3369
Slava Pestov364829b2010-11-24 15:13:16 -08003370 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003371 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003372 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003373 file->f_pos = ret = 0;
3374
3375 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003376}
3377
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003378static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003379 .open = tracing_open,
3380 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003381 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003382 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003383 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003384};
3385
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003386static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003387 .open = show_traces_open,
3388 .read = seq_read,
3389 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003390 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003391};
3392
3393static ssize_t
3394tracing_cpumask_read(struct file *filp, char __user *ubuf,
3395 size_t count, loff_t *ppos)
3396{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003397 struct trace_array *tr = file_inode(filp)->i_private;
Changbin Dua34419b2017-11-30 11:39:43 +08003398 char *mask_str;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003399 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003400
Changbin Dua34419b2017-11-30 11:39:43 +08003401 len = snprintf(NULL, 0, "%*pb\n",
3402 cpumask_pr_args(tr->tracing_cpumask)) + 1;
3403 mask_str = kmalloc(len, GFP_KERNEL);
3404 if (!mask_str)
3405 return -ENOMEM;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003406
Changbin Dua34419b2017-11-30 11:39:43 +08003407 len = snprintf(mask_str, len, "%*pb\n",
Tejun Heo1a402432015-02-13 14:37:39 -08003408 cpumask_pr_args(tr->tracing_cpumask));
3409 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003410 count = -EINVAL;
3411 goto out_err;
3412 }
Changbin Dua34419b2017-11-30 11:39:43 +08003413 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003414
3415out_err:
Changbin Dua34419b2017-11-30 11:39:43 +08003416 kfree(mask_str);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003417
3418 return count;
3419}
3420
3421static ssize_t
3422tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3423 size_t count, loff_t *ppos)
3424{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003425 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303426 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003427 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303428
3429 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3430 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003431
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303432 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003433 if (err)
3434 goto err_unlock;
3435
Steven Rostedta5e25882008-12-02 15:34:05 -05003436 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003437 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003438 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003439 /*
3440 * Increase/decrease the disabled counter if we are
3441 * about to flip a bit in the cpumask:
3442 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003443 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303444 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003445 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3446 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003447 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003448 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303449 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003450 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3451 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003452 }
3453 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003454 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003455 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003456
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003457 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303458 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003459
Ingo Molnarc7078de2008-05-12 21:20:52 +02003460 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003461
3462err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003463 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003464
3465 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003466}
3467
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003468static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003469 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003470 .read = tracing_cpumask_read,
3471 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003472 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003473 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003474};
3475
Li Zefanfdb372e2009-12-08 11:15:59 +08003476static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003477{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003478 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003479 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003480 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003481 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003482
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003483 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003484 tracer_flags = tr->current_trace->flags->val;
3485 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003486
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003487 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003488 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003489 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003490 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003491 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003492 }
3493
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003494 for (i = 0; trace_opts[i].name; i++) {
3495 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003496 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003497 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003498 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003499 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003500 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003501
Li Zefanfdb372e2009-12-08 11:15:59 +08003502 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003503}
3504
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003505static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003506 struct tracer_flags *tracer_flags,
3507 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003508{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003509 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003510 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003511
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003512 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003513 if (ret)
3514 return ret;
3515
3516 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003517 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003518 else
Zhaolei77708412009-08-07 18:53:21 +08003519 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003520 return 0;
3521}
3522
Li Zefan8d18eaa2009-12-08 11:17:06 +08003523/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003524static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003525{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003526 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003527 struct tracer_flags *tracer_flags = trace->flags;
3528 struct tracer_opt *opts = NULL;
3529 int i;
3530
3531 for (i = 0; tracer_flags->opts[i].name; i++) {
3532 opts = &tracer_flags->opts[i];
3533
3534 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003535 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003536 }
3537
3538 return -EINVAL;
3539}
3540
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003541/* Some tracers require overwrite to stay enabled */
3542int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3543{
3544 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3545 return -1;
3546
3547 return 0;
3548}
3549
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003550int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003551{
3552 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003553 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003554 return 0;
3555
3556 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003557 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003558 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003559 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003560
3561 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003562 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003563 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003564 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003565
3566 if (mask == TRACE_ITER_RECORD_CMD)
3567 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003568
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003569 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003570 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003571#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003572 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003573#endif
3574 }
Steven Rostedt81698832012-10-11 10:15:05 -04003575
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003576 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04003577 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003578 trace_printk_control(enabled);
3579 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003580
3581 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003582}
3583
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003584static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003585{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003586 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003587 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003588 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003589 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003590 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003591
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003592 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003593
Li Zefan8d18eaa2009-12-08 11:17:06 +08003594 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003595 neg = 1;
3596 cmp += 2;
3597 }
3598
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003599 mutex_lock(&trace_types_lock);
3600
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003601 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003602 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003603 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003604 break;
3605 }
3606 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003607
3608 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003609 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003610 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003611
3612 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003613
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003614 /*
3615 * If the first trailing whitespace is replaced with '\0' by strstrip,
3616 * turn it back into a space.
3617 */
3618 if (orig_len > strlen(option))
3619 option[strlen(option)] = ' ';
3620
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003621 return ret;
3622}
3623
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003624static void __init apply_trace_boot_options(void)
3625{
3626 char *buf = trace_boot_options_buf;
3627 char *option;
3628
3629 while (true) {
3630 option = strsep(&buf, ",");
3631
3632 if (!option)
3633 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003634
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05003635 if (*option)
3636 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003637
3638 /* Put back the comma to allow this to be called again */
3639 if (buf)
3640 *(buf - 1) = ',';
3641 }
3642}
3643
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003644static ssize_t
3645tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3646 size_t cnt, loff_t *ppos)
3647{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003648 struct seq_file *m = filp->private_data;
3649 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003650 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003651 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003652
3653 if (cnt >= sizeof(buf))
3654 return -EINVAL;
3655
3656 if (copy_from_user(&buf, ubuf, cnt))
3657 return -EFAULT;
3658
Steven Rostedta8dd2172013-01-09 20:54:17 -05003659 buf[cnt] = 0;
3660
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003661 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003662 if (ret < 0)
3663 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003664
Jiri Olsacf8517c2009-10-23 19:36:16 -04003665 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003666
3667 return cnt;
3668}
3669
Li Zefanfdb372e2009-12-08 11:15:59 +08003670static int tracing_trace_options_open(struct inode *inode, struct file *file)
3671{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003672 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003673 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003674
Li Zefanfdb372e2009-12-08 11:15:59 +08003675 if (tracing_disabled)
3676 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003677
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003678 if (trace_array_get(tr) < 0)
3679 return -ENODEV;
3680
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003681 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3682 if (ret < 0)
3683 trace_array_put(tr);
3684
3685 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003686}
3687
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003688static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003689 .open = tracing_trace_options_open,
3690 .read = seq_read,
3691 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003692 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003693 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003694};
3695
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003696static const char readme_msg[] =
3697 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003698 "# echo 0 > tracing_on : quick way to disable tracing\n"
3699 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3700 " Important files:\n"
3701 " trace\t\t\t- The static contents of the buffer\n"
3702 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3703 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3704 " current_tracer\t- function and latency tracers\n"
3705 " available_tracers\t- list of configured tracers for current_tracer\n"
3706 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3707 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3708 " trace_clock\t\t-change the clock used to order events\n"
3709 " local: Per cpu clock but may not be synced across CPUs\n"
3710 " global: Synced across CPUs but slows tracing down.\n"
3711 " counter: Not a clock, but just an increment\n"
3712 " uptime: Jiffy counter from time of boot\n"
3713 " perf: Same clock that perf events use\n"
3714#ifdef CONFIG_X86_64
3715 " x86-tsc: TSC cycle counter\n"
3716#endif
3717 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3718 " tracing_cpumask\t- Limit which CPUs to trace\n"
3719 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3720 "\t\t\t Remove sub-buffer with rmdir\n"
3721 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003722 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3723 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003724 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003725#ifdef CONFIG_DYNAMIC_FTRACE
3726 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003727 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3728 "\t\t\t functions\n"
3729 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3730 "\t modules: Can select a group via module\n"
3731 "\t Format: :mod:<module-name>\n"
3732 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3733 "\t triggers: a command to perform when function is hit\n"
3734 "\t Format: <function>:<trigger>[:count]\n"
3735 "\t trigger: traceon, traceoff\n"
3736 "\t\t enable_event:<system>:<event>\n"
3737 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003738#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003739 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003740#endif
3741#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003742 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003743#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003744 "\t\t dump\n"
3745 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003746 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3747 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3748 "\t The first one will disable tracing every time do_fault is hit\n"
3749 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3750 "\t The first time do trap is hit and it disables tracing, the\n"
3751 "\t counter will decrement to 2. If tracing is already disabled,\n"
3752 "\t the counter will not decrement. It only decrements when the\n"
3753 "\t trigger did work\n"
3754 "\t To remove trigger without count:\n"
3755 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3756 "\t To remove trigger with a count:\n"
3757 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003758 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003759 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3760 "\t modules: Can select a group via module command :mod:\n"
3761 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003762#endif /* CONFIG_DYNAMIC_FTRACE */
3763#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003764 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3765 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003766#endif
3767#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3768 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003769 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003770 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3771#endif
3772#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003773 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3774 "\t\t\t snapshot buffer. Read the contents for more\n"
3775 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003776#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003777#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003778 " stack_trace\t\t- Shows the max stack trace when active\n"
3779 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003780 "\t\t\t Write into this file to reset the max size (trigger a\n"
3781 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003782#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003783 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3784 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003785#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003786#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003787 " events/\t\t- Directory containing all trace event subsystems:\n"
3788 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3789 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003790 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3791 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003792 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003793 " events/<system>/<event>/\t- Directory containing control files for\n"
3794 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003795 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3796 " filter\t\t- If set, only events passing filter are traced\n"
3797 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003798 "\t Format: <trigger>[:count][if <filter>]\n"
3799 "\t trigger: traceon, traceoff\n"
3800 "\t enable_event:<system>:<event>\n"
3801 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003802#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003803 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003804#endif
3805#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003806 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003807#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003808 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3809 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3810 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3811 "\t events/block/block_unplug/trigger\n"
3812 "\t The first disables tracing every time block_unplug is hit.\n"
3813 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3814 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3815 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3816 "\t Like function triggers, the counter is only decremented if it\n"
3817 "\t enabled or disabled tracing.\n"
3818 "\t To remove a trigger without a count:\n"
3819 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3820 "\t To remove a trigger with a count:\n"
3821 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3822 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003823;
3824
3825static ssize_t
3826tracing_readme_read(struct file *filp, char __user *ubuf,
3827 size_t cnt, loff_t *ppos)
3828{
3829 return simple_read_from_buffer(ubuf, cnt, ppos,
3830 readme_msg, strlen(readme_msg));
3831}
3832
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003833static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003834 .open = tracing_open_generic,
3835 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003836 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003837};
3838
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003839static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003840{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003841 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003842
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003843 if (*pos || m->count)
3844 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003845
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003846 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003847
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003848 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3849 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003850 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003851 continue;
3852
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003853 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003854 }
3855
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003856 return NULL;
3857}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003858
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003859static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3860{
3861 void *v;
3862 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003863
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003864 preempt_disable();
3865 arch_spin_lock(&trace_cmdline_lock);
3866
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003867 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003868 while (l <= *pos) {
3869 v = saved_cmdlines_next(m, v, &l);
3870 if (!v)
3871 return NULL;
3872 }
3873
3874 return v;
3875}
3876
3877static void saved_cmdlines_stop(struct seq_file *m, void *v)
3878{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003879 arch_spin_unlock(&trace_cmdline_lock);
3880 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003881}
3882
3883static int saved_cmdlines_show(struct seq_file *m, void *v)
3884{
3885 char buf[TASK_COMM_LEN];
3886 unsigned int *pid = v;
3887
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003888 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003889 seq_printf(m, "%d %s\n", *pid, buf);
3890 return 0;
3891}
3892
3893static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3894 .start = saved_cmdlines_start,
3895 .next = saved_cmdlines_next,
3896 .stop = saved_cmdlines_stop,
3897 .show = saved_cmdlines_show,
3898};
3899
3900static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3901{
3902 if (tracing_disabled)
3903 return -ENODEV;
3904
3905 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003906}
3907
3908static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003909 .open = tracing_saved_cmdlines_open,
3910 .read = seq_read,
3911 .llseek = seq_lseek,
3912 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003913};
3914
3915static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003916tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3917 size_t cnt, loff_t *ppos)
3918{
3919 char buf[64];
3920 int r;
3921
3922 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003923 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003924 arch_spin_unlock(&trace_cmdline_lock);
3925
3926 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3927}
3928
3929static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3930{
3931 kfree(s->saved_cmdlines);
3932 kfree(s->map_cmdline_to_pid);
3933 kfree(s);
3934}
3935
3936static int tracing_resize_saved_cmdlines(unsigned int val)
3937{
3938 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3939
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003940 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003941 if (!s)
3942 return -ENOMEM;
3943
3944 if (allocate_cmdlines_buffer(val, s) < 0) {
3945 kfree(s);
3946 return -ENOMEM;
3947 }
3948
3949 arch_spin_lock(&trace_cmdline_lock);
3950 savedcmd_temp = savedcmd;
3951 savedcmd = s;
3952 arch_spin_unlock(&trace_cmdline_lock);
3953 free_saved_cmdlines_buffer(savedcmd_temp);
3954
3955 return 0;
3956}
3957
3958static ssize_t
3959tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
3960 size_t cnt, loff_t *ppos)
3961{
3962 unsigned long val;
3963 int ret;
3964
3965 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3966 if (ret)
3967 return ret;
3968
3969 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
3970 if (!val || val > PID_MAX_DEFAULT)
3971 return -EINVAL;
3972
3973 ret = tracing_resize_saved_cmdlines((unsigned int)val);
3974 if (ret < 0)
3975 return ret;
3976
3977 *ppos += cnt;
3978
3979 return cnt;
3980}
3981
3982static const struct file_operations tracing_saved_cmdlines_size_fops = {
3983 .open = tracing_open_generic,
3984 .read = tracing_saved_cmdlines_size_read,
3985 .write = tracing_saved_cmdlines_size_write,
3986};
3987
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04003988#ifdef CONFIG_TRACE_ENUM_MAP_FILE
3989static union trace_enum_map_item *
3990update_enum_map(union trace_enum_map_item *ptr)
3991{
3992 if (!ptr->map.enum_string) {
3993 if (ptr->tail.next) {
3994 ptr = ptr->tail.next;
3995 /* Set ptr to the next real item (skip head) */
3996 ptr++;
3997 } else
3998 return NULL;
3999 }
4000 return ptr;
4001}
4002
4003static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4004{
4005 union trace_enum_map_item *ptr = v;
4006
4007 /*
4008 * Paranoid! If ptr points to end, we don't want to increment past it.
4009 * This really should never happen.
4010 */
4011 ptr = update_enum_map(ptr);
4012 if (WARN_ON_ONCE(!ptr))
4013 return NULL;
4014
4015 ptr++;
4016
4017 (*pos)++;
4018
4019 ptr = update_enum_map(ptr);
4020
4021 return ptr;
4022}
4023
4024static void *enum_map_start(struct seq_file *m, loff_t *pos)
4025{
4026 union trace_enum_map_item *v;
4027 loff_t l = 0;
4028
4029 mutex_lock(&trace_enum_mutex);
4030
4031 v = trace_enum_maps;
4032 if (v)
4033 v++;
4034
4035 while (v && l < *pos) {
4036 v = enum_map_next(m, v, &l);
4037 }
4038
4039 return v;
4040}
4041
4042static void enum_map_stop(struct seq_file *m, void *v)
4043{
4044 mutex_unlock(&trace_enum_mutex);
4045}
4046
4047static int enum_map_show(struct seq_file *m, void *v)
4048{
4049 union trace_enum_map_item *ptr = v;
4050
4051 seq_printf(m, "%s %ld (%s)\n",
4052 ptr->map.enum_string, ptr->map.enum_value,
4053 ptr->map.system);
4054
4055 return 0;
4056}
4057
4058static const struct seq_operations tracing_enum_map_seq_ops = {
4059 .start = enum_map_start,
4060 .next = enum_map_next,
4061 .stop = enum_map_stop,
4062 .show = enum_map_show,
4063};
4064
4065static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4066{
4067 if (tracing_disabled)
4068 return -ENODEV;
4069
4070 return seq_open(filp, &tracing_enum_map_seq_ops);
4071}
4072
4073static const struct file_operations tracing_enum_map_fops = {
4074 .open = tracing_enum_map_open,
4075 .read = seq_read,
4076 .llseek = seq_lseek,
4077 .release = seq_release,
4078};
4079
4080static inline union trace_enum_map_item *
4081trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4082{
4083 /* Return tail of array given the head */
4084 return ptr + ptr->head.length + 1;
4085}
4086
4087static void
4088trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4089 int len)
4090{
4091 struct trace_enum_map **stop;
4092 struct trace_enum_map **map;
4093 union trace_enum_map_item *map_array;
4094 union trace_enum_map_item *ptr;
4095
4096 stop = start + len;
4097
4098 /*
4099 * The trace_enum_maps contains the map plus a head and tail item,
4100 * where the head holds the module and length of array, and the
4101 * tail holds a pointer to the next list.
4102 */
4103 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4104 if (!map_array) {
4105 pr_warning("Unable to allocate trace enum mapping\n");
4106 return;
4107 }
4108
4109 mutex_lock(&trace_enum_mutex);
4110
4111 if (!trace_enum_maps)
4112 trace_enum_maps = map_array;
4113 else {
4114 ptr = trace_enum_maps;
4115 for (;;) {
4116 ptr = trace_enum_jmp_to_tail(ptr);
4117 if (!ptr->tail.next)
4118 break;
4119 ptr = ptr->tail.next;
4120
4121 }
4122 ptr->tail.next = map_array;
4123 }
4124 map_array->head.mod = mod;
4125 map_array->head.length = len;
4126 map_array++;
4127
4128 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4129 map_array->map = **map;
4130 map_array++;
4131 }
4132 memset(map_array, 0, sizeof(*map_array));
4133
4134 mutex_unlock(&trace_enum_mutex);
4135}
4136
4137static void trace_create_enum_file(struct dentry *d_tracer)
4138{
4139 trace_create_file("enum_map", 0444, d_tracer,
4140 NULL, &tracing_enum_map_fops);
4141}
4142
4143#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4144static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4145static inline void trace_insert_enum_map_file(struct module *mod,
4146 struct trace_enum_map **start, int len) { }
4147#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4148
4149static void trace_insert_enum_map(struct module *mod,
4150 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004151{
4152 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004153
4154 if (len <= 0)
4155 return;
4156
4157 map = start;
4158
4159 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004160
4161 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004162}
4163
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004164static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004165tracing_set_trace_read(struct file *filp, char __user *ubuf,
4166 size_t cnt, loff_t *ppos)
4167{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004168 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004169 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004170 int r;
4171
4172 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004173 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004174 mutex_unlock(&trace_types_lock);
4175
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004176 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004177}
4178
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004179int tracer_init(struct tracer *t, struct trace_array *tr)
4180{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004181 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004182 return t->init(tr);
4183}
4184
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004185static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004186{
4187 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004188
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004189 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004190 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004191}
4192
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004193#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004194/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004195static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4196 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004197{
4198 int cpu, ret = 0;
4199
4200 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4201 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004202 ret = ring_buffer_resize(trace_buf->buffer,
4203 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004204 if (ret < 0)
4205 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004206 per_cpu_ptr(trace_buf->data, cpu)->entries =
4207 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004208 }
4209 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004210 ret = ring_buffer_resize(trace_buf->buffer,
4211 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004212 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004213 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4214 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004215 }
4216
4217 return ret;
4218}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004219#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004220
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004221static int __tracing_resize_ring_buffer(struct trace_array *tr,
4222 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004223{
4224 int ret;
4225
4226 /*
4227 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004228 * we use the size that was given, and we can forget about
4229 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004230 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004231 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004232
Steven Rostedtb382ede62012-10-10 21:44:34 -04004233 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004234 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004235 return 0;
4236
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004237 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004238 if (ret < 0)
4239 return ret;
4240
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004241#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004242 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4243 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004244 goto out;
4245
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004246 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004247 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004248 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4249 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004250 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004251 /*
4252 * AARGH! We are left with different
4253 * size max buffer!!!!
4254 * The max buffer is our "snapshot" buffer.
4255 * When a tracer needs a snapshot (one of the
4256 * latency tracers), it swaps the max buffer
4257 * with the saved snap shot. We succeeded to
4258 * update the size of the main buffer, but failed to
4259 * update the size of the max buffer. But when we tried
4260 * to reset the main buffer to the original size, we
4261 * failed there too. This is very unlikely to
4262 * happen, but if it does, warn and kill all
4263 * tracing.
4264 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004265 WARN_ON(1);
4266 tracing_disabled = 1;
4267 }
4268 return ret;
4269 }
4270
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004271 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004272 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004273 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004274 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004275
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004276 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004277#endif /* CONFIG_TRACER_MAX_TRACE */
4278
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004279 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004280 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004281 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004282 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004283
4284 return ret;
4285}
4286
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004287static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4288 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004289{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004290 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004291
4292 mutex_lock(&trace_types_lock);
4293
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004294 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4295 /* make sure, this cpu is enabled in the mask */
4296 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4297 ret = -EINVAL;
4298 goto out;
4299 }
4300 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004301
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004302 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004303 if (ret < 0)
4304 ret = -ENOMEM;
4305
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004306out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004307 mutex_unlock(&trace_types_lock);
4308
4309 return ret;
4310}
4311
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004312
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004313/**
4314 * tracing_update_buffers - used by tracing facility to expand ring buffers
4315 *
4316 * To save on memory when the tracing is never used on a system with it
4317 * configured in. The ring buffers are set to a minimum size. But once
4318 * a user starts to use the tracing facility, then they need to grow
4319 * to their default size.
4320 *
4321 * This function is to be called when a tracer is about to be used.
4322 */
4323int tracing_update_buffers(void)
4324{
4325 int ret = 0;
4326
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004327 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004328 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004329 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004330 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004331 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004332
4333 return ret;
4334}
4335
Steven Rostedt577b7852009-02-26 23:43:05 -05004336struct trace_option_dentry;
4337
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004338static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004339create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004340
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004341/*
4342 * Used to clear out the tracer before deletion of an instance.
4343 * Must have trace_types_lock held.
4344 */
4345static void tracing_set_nop(struct trace_array *tr)
4346{
4347 if (tr->current_trace == &nop_trace)
4348 return;
4349
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004350 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004351
4352 if (tr->current_trace->reset)
4353 tr->current_trace->reset(tr);
4354
4355 tr->current_trace = &nop_trace;
4356}
4357
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04004358static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004359{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004360 /* Only enable if the directory has been created already. */
4361 if (!tr->dir)
4362 return;
4363
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004364 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004365}
4366
4367static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4368{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004369 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004370#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004371 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004372#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004373 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004374
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004375 mutex_lock(&trace_types_lock);
4376
Steven Rostedt73c51622009-03-11 13:42:01 -04004377 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004378 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004379 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004380 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004381 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004382 ret = 0;
4383 }
4384
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004385 for (t = trace_types; t; t = t->next) {
4386 if (strcmp(t->name, buf) == 0)
4387 break;
4388 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004389 if (!t) {
4390 ret = -EINVAL;
4391 goto out;
4392 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004393 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004394 goto out;
4395
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004396 /* Some tracers are only allowed for the top level buffer */
4397 if (!trace_ok_for_array(t, tr)) {
4398 ret = -EINVAL;
4399 goto out;
4400 }
4401
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004402 /* If trace pipe files are being read, we can't change the tracer */
4403 if (tr->current_trace->ref) {
4404 ret = -EBUSY;
4405 goto out;
4406 }
4407
Steven Rostedt9f029e82008-11-12 15:24:24 -05004408 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004409
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004410 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004411
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004412 if (tr->current_trace->reset)
4413 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004414
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004415 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004416 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004417
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004418#ifdef CONFIG_TRACER_MAX_TRACE
4419 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004420
4421 if (had_max_tr && !t->use_max_tr) {
4422 /*
4423 * We need to make sure that the update_max_tr sees that
4424 * current_trace changed to nop_trace to keep it from
4425 * swapping the buffers after we resize it.
4426 * The update_max_tr is called from interrupts disabled
4427 * so a synchronized_sched() is sufficient.
4428 */
4429 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004430 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004431 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004432#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004433
4434#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004435 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004436 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004437 if (ret < 0)
4438 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004439 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004440#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004441
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004442 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004443 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004444 if (ret)
4445 goto out;
4446 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004447
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004448 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004449 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004450 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004451 out:
4452 mutex_unlock(&trace_types_lock);
4453
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004454 return ret;
4455}
4456
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004457static ssize_t
4458tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4459 size_t cnt, loff_t *ppos)
4460{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004461 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004462 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004463 int i;
4464 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004465 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004466
Steven Rostedt60063a62008-10-28 10:44:24 -04004467 ret = cnt;
4468
Li Zefanee6c2c12009-09-18 14:06:47 +08004469 if (cnt > MAX_TRACER_SIZE)
4470 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004471
4472 if (copy_from_user(&buf, ubuf, cnt))
4473 return -EFAULT;
4474
4475 buf[cnt] = 0;
4476
4477 /* strip ending whitespace. */
4478 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4479 buf[i] = 0;
4480
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004481 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004482 if (err)
4483 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004484
Jiri Olsacf8517c2009-10-23 19:36:16 -04004485 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004486
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004487 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004488}
4489
4490static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004491tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4492 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004493{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004494 char buf[64];
4495 int r;
4496
Steven Rostedtcffae432008-05-12 21:21:00 +02004497 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004498 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004499 if (r > sizeof(buf))
4500 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004501 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004502}
4503
4504static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004505tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4506 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004507{
Hannes Eder5e398412009-02-10 19:44:34 +01004508 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004509 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004510
Peter Huewe22fe9b52011-06-07 21:58:27 +02004511 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4512 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004513 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004514
4515 *ptr = val * 1000;
4516
4517 return cnt;
4518}
4519
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004520static ssize_t
4521tracing_thresh_read(struct file *filp, char __user *ubuf,
4522 size_t cnt, loff_t *ppos)
4523{
4524 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4525}
4526
4527static ssize_t
4528tracing_thresh_write(struct file *filp, const char __user *ubuf,
4529 size_t cnt, loff_t *ppos)
4530{
4531 struct trace_array *tr = filp->private_data;
4532 int ret;
4533
4534 mutex_lock(&trace_types_lock);
4535 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4536 if (ret < 0)
4537 goto out;
4538
4539 if (tr->current_trace->update_thresh) {
4540 ret = tr->current_trace->update_thresh(tr);
4541 if (ret < 0)
4542 goto out;
4543 }
4544
4545 ret = cnt;
4546out:
4547 mutex_unlock(&trace_types_lock);
4548
4549 return ret;
4550}
4551
Chen Gange428abb2015-11-10 05:15:15 +08004552#ifdef CONFIG_TRACER_MAX_TRACE
4553
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004554static ssize_t
4555tracing_max_lat_read(struct file *filp, char __user *ubuf,
4556 size_t cnt, loff_t *ppos)
4557{
4558 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4559}
4560
4561static ssize_t
4562tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4563 size_t cnt, loff_t *ppos)
4564{
4565 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4566}
4567
Chen Gange428abb2015-11-10 05:15:15 +08004568#endif
4569
Steven Rostedtb3806b42008-05-12 21:20:46 +02004570static int tracing_open_pipe(struct inode *inode, struct file *filp)
4571{
Oleg Nesterov15544202013-07-23 17:25:57 +02004572 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004573 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004574 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004575
4576 if (tracing_disabled)
4577 return -ENODEV;
4578
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004579 if (trace_array_get(tr) < 0)
4580 return -ENODEV;
4581
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004582 mutex_lock(&trace_types_lock);
4583
Steven Rostedtb3806b42008-05-12 21:20:46 +02004584 /* create a buffer to store the information to pass to userspace */
4585 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004586 if (!iter) {
4587 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004588 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004589 goto out;
4590 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004591
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004592 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004593 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004594
4595 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4596 ret = -ENOMEM;
4597 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304598 }
4599
Steven Rostedta3097202008-11-07 22:36:02 -05004600 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304601 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004602
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004603 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04004604 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4605
David Sharp8be07092012-11-13 12:18:22 -08004606 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004607 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004608 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4609
Oleg Nesterov15544202013-07-23 17:25:57 +02004610 iter->tr = tr;
4611 iter->trace_buffer = &tr->trace_buffer;
4612 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004613 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004614 filp->private_data = iter;
4615
Steven Rostedt107bad82008-05-12 21:21:01 +02004616 if (iter->trace->pipe_open)
4617 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004618
Arnd Bergmannb4447862010-07-07 23:40:11 +02004619 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004620
4621 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004622out:
4623 mutex_unlock(&trace_types_lock);
4624 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004625
4626fail:
4627 kfree(iter->trace);
4628 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004629 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004630 mutex_unlock(&trace_types_lock);
4631 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004632}
4633
4634static int tracing_release_pipe(struct inode *inode, struct file *file)
4635{
4636 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004637 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004638
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004639 mutex_lock(&trace_types_lock);
4640
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004641 tr->current_trace->ref--;
4642
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004643 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004644 iter->trace->pipe_close(iter);
4645
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004646 mutex_unlock(&trace_types_lock);
4647
Rusty Russell44623442009-01-01 10:12:23 +10304648 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004649 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004650 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004651
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004652 trace_array_put(tr);
4653
Steven Rostedtb3806b42008-05-12 21:20:46 +02004654 return 0;
4655}
4656
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004657static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004658trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004659{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004660 struct trace_array *tr = iter->tr;
4661
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004662 /* Iterators are static, they should be filled or empty */
4663 if (trace_buffer_iter(iter, iter->cpu_file))
4664 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004665
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004666 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004667 /*
4668 * Always select as readable when in blocking mode
4669 */
4670 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004671 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004672 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004673 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004674}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004675
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004676static unsigned int
4677tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4678{
4679 struct trace_iterator *iter = filp->private_data;
4680
4681 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004682}
4683
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004684/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004685static int tracing_wait_pipe(struct file *filp)
4686{
4687 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004688 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004689
4690 while (trace_empty(iter)) {
4691
4692 if ((filp->f_flags & O_NONBLOCK)) {
4693 return -EAGAIN;
4694 }
4695
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004696 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004697 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004698 * We still block if tracing is disabled, but we have never
4699 * read anything. This allows a user to cat this file, and
4700 * then enable tracing. But after we have read something,
4701 * we give an EOF when tracing is again disabled.
4702 *
4703 * iter->pos will be 0 if we haven't read anything.
4704 */
Tahsin Erdogan9c5afa72017-09-17 03:23:48 -07004705 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004706 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004707
4708 mutex_unlock(&iter->mutex);
4709
Rabin Vincente30f53a2014-11-10 19:46:34 +01004710 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004711
4712 mutex_lock(&iter->mutex);
4713
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004714 if (ret)
4715 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004716 }
4717
4718 return 1;
4719}
4720
Steven Rostedtb3806b42008-05-12 21:20:46 +02004721/*
4722 * Consumer reader.
4723 */
4724static ssize_t
4725tracing_read_pipe(struct file *filp, char __user *ubuf,
4726 size_t cnt, loff_t *ppos)
4727{
4728 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004729 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004730
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004731 /*
4732 * Avoid more than one consumer on a single file descriptor
4733 * This is just a matter of traces coherency, the ring buffer itself
4734 * is protected.
4735 */
4736 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)8b275b42016-09-23 22:57:13 -04004737
4738 /* return any leftover data */
4739 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4740 if (sret != -EBUSY)
4741 goto out;
4742
4743 trace_seq_init(&iter->seq);
4744
Steven Rostedt107bad82008-05-12 21:21:01 +02004745 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004746 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4747 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004748 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004749 }
4750
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004751waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004752 sret = tracing_wait_pipe(filp);
4753 if (sret <= 0)
4754 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004755
4756 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004757 if (trace_empty(iter)) {
4758 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004759 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004760 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004761
4762 if (cnt >= PAGE_SIZE)
4763 cnt = PAGE_SIZE - 1;
4764
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004765 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004766 memset(&iter->seq, 0,
4767 sizeof(struct trace_iterator) -
4768 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004769 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004770 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004771
Lai Jiangshan4f535962009-05-18 19:35:34 +08004772 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004773 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004774 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004775 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004776 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004777
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004778 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004779 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004780 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004781 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004782 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004783 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004784 if (ret != TRACE_TYPE_NO_CONSUME)
4785 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004786
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004787 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004788 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004789
4790 /*
4791 * Setting the full flag means we reached the trace_seq buffer
4792 * size and we should leave by partial output condition above.
4793 * One of the trace_seq_* functions is not used properly.
4794 */
4795 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4796 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004797 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004798 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004799 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004800
Steven Rostedtb3806b42008-05-12 21:20:46 +02004801 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004802 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004803 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004804 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004805
4806 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004807 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004808 * entries, go back to wait for more entries.
4809 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004810 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004811 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004812
Steven Rostedt107bad82008-05-12 21:21:01 +02004813out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004814 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004815
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004816 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004817}
4818
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004819static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4820 unsigned int idx)
4821{
4822 __free_page(spd->pages[idx]);
4823}
4824
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004825static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004826 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004827 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004828 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004829 .steal = generic_pipe_buf_steal,
4830 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004831};
4832
Steven Rostedt34cd4992009-02-09 12:06:29 -05004833static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004834tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004835{
4836 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004837 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004838 int ret;
4839
4840 /* Seq buffer is page-sized, exactly what we need. */
4841 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004842 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004843 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004844
4845 if (trace_seq_has_overflowed(&iter->seq)) {
4846 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004847 break;
4848 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004849
4850 /*
4851 * This should not be hit, because it should only
4852 * be set if the iter->seq overflowed. But check it
4853 * anyway to be safe.
4854 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05004855 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004856 iter->seq.seq.len = save_len;
4857 break;
4858 }
4859
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004860 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004861 if (rem < count) {
4862 rem = 0;
4863 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004864 break;
4865 }
4866
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004867 if (ret != TRACE_TYPE_NO_CONSUME)
4868 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004869 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004870 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004871 rem = 0;
4872 iter->ent = NULL;
4873 break;
4874 }
4875 }
4876
4877 return rem;
4878}
4879
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004880static ssize_t tracing_splice_read_pipe(struct file *filp,
4881 loff_t *ppos,
4882 struct pipe_inode_info *pipe,
4883 size_t len,
4884 unsigned int flags)
4885{
Jens Axboe35f3d142010-05-20 10:43:18 +02004886 struct page *pages_def[PIPE_DEF_BUFFERS];
4887 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004888 struct trace_iterator *iter = filp->private_data;
4889 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004890 .pages = pages_def,
4891 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004892 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004893 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004894 .flags = flags,
4895 .ops = &tracing_pipe_buf_ops,
4896 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004897 };
4898 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004899 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004900 unsigned int i;
4901
Jens Axboe35f3d142010-05-20 10:43:18 +02004902 if (splice_grow_spd(pipe, &spd))
4903 return -ENOMEM;
4904
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004905 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004906
4907 if (iter->trace->splice_read) {
4908 ret = iter->trace->splice_read(iter, filp,
4909 ppos, pipe, len, flags);
4910 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004911 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004912 }
4913
4914 ret = tracing_wait_pipe(filp);
4915 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004916 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004917
Jason Wessel955b61e2010-08-05 09:22:23 -05004918 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004919 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004920 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004921 }
4922
Lai Jiangshan4f535962009-05-18 19:35:34 +08004923 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004924 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004925
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004926 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04004927 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004928 spd.pages[i] = alloc_page(GFP_KERNEL);
4929 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05004930 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004931
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004932 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004933
4934 /* Copy the data into the page, so we can start over. */
4935 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02004936 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004937 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004938 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02004939 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004940 break;
4941 }
Jens Axboe35f3d142010-05-20 10:43:18 +02004942 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004943 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004944
Steven Rostedtf9520752009-03-02 14:04:40 -05004945 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004946 }
4947
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004948 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004949 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004950 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004951
4952 spd.nr_pages = i;
4953
Steven Rostedt (Red Hat)aab3ba82016-03-18 15:46:48 -04004954 if (i)
4955 ret = splice_to_pipe(pipe, &spd);
4956 else
4957 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02004958out:
Eric Dumazet047fe362012-06-12 15:24:40 +02004959 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02004960 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004961
Steven Rostedt34cd4992009-02-09 12:06:29 -05004962out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004963 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02004964 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004965}
4966
Steven Rostedta98a3c32008-05-12 21:20:59 +02004967static ssize_t
4968tracing_entries_read(struct file *filp, char __user *ubuf,
4969 size_t cnt, loff_t *ppos)
4970{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004971 struct inode *inode = file_inode(filp);
4972 struct trace_array *tr = inode->i_private;
4973 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004974 char buf[64];
4975 int r = 0;
4976 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02004977
Steven Rostedtdb526ca2009-03-12 13:53:25 -04004978 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004979
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02004980 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004981 int cpu, buf_size_same;
4982 unsigned long size;
4983
4984 size = 0;
4985 buf_size_same = 1;
4986 /* check if all cpu sizes are same */
4987 for_each_tracing_cpu(cpu) {
4988 /* fill in the size from first enabled cpu */
4989 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004990 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
4991 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004992 buf_size_same = 0;
4993 break;
4994 }
4995 }
4996
4997 if (buf_size_same) {
4998 if (!ring_buffer_expanded)
4999 r = sprintf(buf, "%lu (expanded: %lu)\n",
5000 size >> 10,
5001 trace_buf_size >> 10);
5002 else
5003 r = sprintf(buf, "%lu\n", size >> 10);
5004 } else
5005 r = sprintf(buf, "X\n");
5006 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005007 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005008
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005009 mutex_unlock(&trace_types_lock);
5010
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005011 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5012 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005013}
5014
5015static ssize_t
5016tracing_entries_write(struct file *filp, const char __user *ubuf,
5017 size_t cnt, loff_t *ppos)
5018{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005019 struct inode *inode = file_inode(filp);
5020 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005021 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005022 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005023
Peter Huewe22fe9b52011-06-07 21:58:27 +02005024 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5025 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005026 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005027
5028 /* must have at least 1 entry */
5029 if (!val)
5030 return -EINVAL;
5031
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005032 /* value is in KB */
5033 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005034 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005035 if (ret < 0)
5036 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005037
Jiri Olsacf8517c2009-10-23 19:36:16 -04005038 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005039
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005040 return cnt;
5041}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005042
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005043static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005044tracing_total_entries_read(struct file *filp, char __user *ubuf,
5045 size_t cnt, loff_t *ppos)
5046{
5047 struct trace_array *tr = filp->private_data;
5048 char buf[64];
5049 int r, cpu;
5050 unsigned long size = 0, expanded_size = 0;
5051
5052 mutex_lock(&trace_types_lock);
5053 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005054 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005055 if (!ring_buffer_expanded)
5056 expanded_size += trace_buf_size >> 10;
5057 }
5058 if (ring_buffer_expanded)
5059 r = sprintf(buf, "%lu\n", size);
5060 else
5061 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5062 mutex_unlock(&trace_types_lock);
5063
5064 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5065}
5066
5067static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005068tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5069 size_t cnt, loff_t *ppos)
5070{
5071 /*
5072 * There is no need to read what the user has written, this function
5073 * is just to make sure that there is no error when "echo" is used
5074 */
5075
5076 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005077
5078 return cnt;
5079}
5080
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005081static int
5082tracing_free_buffer_release(struct inode *inode, struct file *filp)
5083{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005084 struct trace_array *tr = inode->i_private;
5085
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005086 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005087 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005088 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005089 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005090 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005091
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005092 trace_array_put(tr);
5093
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005094 return 0;
5095}
5096
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005097static ssize_t
5098tracing_mark_write(struct file *filp, const char __user *ubuf,
5099 size_t cnt, loff_t *fpos)
5100{
Steven Rostedtd696b582011-09-22 11:50:27 -04005101 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005102 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005103 struct ring_buffer_event *event;
5104 struct ring_buffer *buffer;
5105 struct print_entry *entry;
5106 unsigned long irq_flags;
5107 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005108 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005109 int nr_pages = 1;
5110 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005111 int offset;
5112 int size;
5113 int len;
5114 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005115 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005116
Steven Rostedtc76f0692008-11-07 22:36:02 -05005117 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005118 return -EINVAL;
5119
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005120 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005121 return -EINVAL;
5122
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005123 if (cnt > TRACE_BUF_SIZE)
5124 cnt = TRACE_BUF_SIZE;
5125
Steven Rostedtd696b582011-09-22 11:50:27 -04005126 /*
5127 * Userspace is injecting traces into the kernel trace buffer.
5128 * We want to be as non intrusive as possible.
5129 * To do so, we do not want to allocate any special buffers
5130 * or take any locks, but instead write the userspace data
5131 * straight into the ring buffer.
5132 *
5133 * First we need to pin the userspace buffer into memory,
5134 * which, most likely it is, because it just referenced it.
5135 * But there's no guarantee that it is. By using get_user_pages_fast()
5136 * and kmap_atomic/kunmap_atomic() we can get access to the
5137 * pages directly. We then write the data directly into the
5138 * ring buffer.
5139 */
5140 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005141
Steven Rostedtd696b582011-09-22 11:50:27 -04005142 /* check if we cross pages */
5143 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5144 nr_pages = 2;
5145
5146 offset = addr & (PAGE_SIZE - 1);
5147 addr &= PAGE_MASK;
5148
5149 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5150 if (ret < nr_pages) {
5151 while (--ret >= 0)
5152 put_page(pages[ret]);
5153 written = -EFAULT;
5154 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005155 }
5156
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005157 for (i = 0; i < nr_pages; i++)
5158 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005159
5160 local_save_flags(irq_flags);
5161 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005162 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005163 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5164 irq_flags, preempt_count());
5165 if (!event) {
5166 /* Ring buffer disabled, return as if not open for write */
5167 written = -EBADF;
5168 goto out_unlock;
5169 }
5170
5171 entry = ring_buffer_event_data(event);
5172 entry->ip = _THIS_IP_;
5173
5174 if (nr_pages == 2) {
5175 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005176 memcpy(&entry->buf, map_page[0] + offset, len);
5177 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005178 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005179 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005180
5181 if (entry->buf[cnt - 1] != '\n') {
5182 entry->buf[cnt] = '\n';
5183 entry->buf[cnt + 1] = '\0';
5184 } else
5185 entry->buf[cnt] = '\0';
5186
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005187 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005188
5189 written = cnt;
5190
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005191 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005192
Steven Rostedtd696b582011-09-22 11:50:27 -04005193 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005194 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005195 kunmap_atomic(map_page[i]);
5196 put_page(pages[i]);
5197 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005198 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005199 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005200}
5201
Li Zefan13f16d22009-12-08 11:16:11 +08005202static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005203{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005204 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005205 int i;
5206
5207 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005208 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005209 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005210 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5211 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005212 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005213
Li Zefan13f16d22009-12-08 11:16:11 +08005214 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005215}
5216
Steven Rostedte1e232c2014-02-10 23:38:46 -05005217static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005218{
Zhaolei5079f322009-08-25 16:12:56 +08005219 int i;
5220
Zhaolei5079f322009-08-25 16:12:56 +08005221 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5222 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5223 break;
5224 }
5225 if (i == ARRAY_SIZE(trace_clocks))
5226 return -EINVAL;
5227
Zhaolei5079f322009-08-25 16:12:56 +08005228 mutex_lock(&trace_types_lock);
5229
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005230 tr->clock_id = i;
5231
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005232 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005233
David Sharp60303ed2012-10-11 16:27:52 -07005234 /*
5235 * New clock may not be consistent with the previous clock.
5236 * Reset the buffer so that it doesn't have incomparable timestamps.
5237 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005238 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005239
5240#ifdef CONFIG_TRACER_MAX_TRACE
Baohong Liud28e96b2017-09-05 16:57:19 -05005241 if (tr->max_buffer.buffer)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005242 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005243 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005244#endif
David Sharp60303ed2012-10-11 16:27:52 -07005245
Zhaolei5079f322009-08-25 16:12:56 +08005246 mutex_unlock(&trace_types_lock);
5247
Steven Rostedte1e232c2014-02-10 23:38:46 -05005248 return 0;
5249}
5250
5251static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5252 size_t cnt, loff_t *fpos)
5253{
5254 struct seq_file *m = filp->private_data;
5255 struct trace_array *tr = m->private;
5256 char buf[64];
5257 const char *clockstr;
5258 int ret;
5259
5260 if (cnt >= sizeof(buf))
5261 return -EINVAL;
5262
5263 if (copy_from_user(&buf, ubuf, cnt))
5264 return -EFAULT;
5265
5266 buf[cnt] = 0;
5267
5268 clockstr = strstrip(buf);
5269
5270 ret = tracing_set_clock(tr, clockstr);
5271 if (ret)
5272 return ret;
5273
Zhaolei5079f322009-08-25 16:12:56 +08005274 *fpos += cnt;
5275
5276 return cnt;
5277}
5278
Li Zefan13f16d22009-12-08 11:16:11 +08005279static int tracing_clock_open(struct inode *inode, struct file *file)
5280{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005281 struct trace_array *tr = inode->i_private;
5282 int ret;
5283
Li Zefan13f16d22009-12-08 11:16:11 +08005284 if (tracing_disabled)
5285 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005286
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005287 if (trace_array_get(tr))
5288 return -ENODEV;
5289
5290 ret = single_open(file, tracing_clock_show, inode->i_private);
5291 if (ret < 0)
5292 trace_array_put(tr);
5293
5294 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005295}
5296
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005297struct ftrace_buffer_info {
5298 struct trace_iterator iter;
5299 void *spare;
5300 unsigned int read;
5301};
5302
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005303#ifdef CONFIG_TRACER_SNAPSHOT
5304static int tracing_snapshot_open(struct inode *inode, struct file *file)
5305{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005306 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005307 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005308 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005309 int ret = 0;
5310
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005311 if (trace_array_get(tr) < 0)
5312 return -ENODEV;
5313
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005314 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005315 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005316 if (IS_ERR(iter))
5317 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005318 } else {
5319 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005320 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005321 m = kzalloc(sizeof(*m), GFP_KERNEL);
5322 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005323 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005324 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5325 if (!iter) {
5326 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005327 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005328 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005329 ret = 0;
5330
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005331 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005332 iter->trace_buffer = &tr->max_buffer;
5333 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005334 m->private = iter;
5335 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005336 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005337out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005338 if (ret < 0)
5339 trace_array_put(tr);
5340
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005341 return ret;
5342}
5343
5344static ssize_t
5345tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5346 loff_t *ppos)
5347{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005348 struct seq_file *m = filp->private_data;
5349 struct trace_iterator *iter = m->private;
5350 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005351 unsigned long val;
5352 int ret;
5353
5354 ret = tracing_update_buffers();
5355 if (ret < 0)
5356 return ret;
5357
5358 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5359 if (ret)
5360 return ret;
5361
5362 mutex_lock(&trace_types_lock);
5363
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005364 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005365 ret = -EBUSY;
5366 goto out;
5367 }
5368
5369 switch (val) {
5370 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005371 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5372 ret = -EINVAL;
5373 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005374 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005375 if (tr->allocated_snapshot)
5376 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005377 break;
5378 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005379/* Only allow per-cpu swap if the ring buffer supports it */
5380#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5381 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5382 ret = -EINVAL;
5383 break;
5384 }
5385#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005386 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005387 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005388 if (ret < 0)
5389 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005390 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005391 local_irq_disable();
5392 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005393 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005394 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005395 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005396 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005397 local_irq_enable();
5398 break;
5399 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005400 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005401 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5402 tracing_reset_online_cpus(&tr->max_buffer);
5403 else
5404 tracing_reset(&tr->max_buffer, iter->cpu_file);
5405 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005406 break;
5407 }
5408
5409 if (ret >= 0) {
5410 *ppos += cnt;
5411 ret = cnt;
5412 }
5413out:
5414 mutex_unlock(&trace_types_lock);
5415 return ret;
5416}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005417
5418static int tracing_snapshot_release(struct inode *inode, struct file *file)
5419{
5420 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005421 int ret;
5422
5423 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005424
5425 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005426 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005427
5428 /* If write only, the seq_file is just a stub */
5429 if (m)
5430 kfree(m->private);
5431 kfree(m);
5432
5433 return 0;
5434}
5435
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005436static int tracing_buffers_open(struct inode *inode, struct file *filp);
5437static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5438 size_t count, loff_t *ppos);
5439static int tracing_buffers_release(struct inode *inode, struct file *file);
5440static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5441 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5442
5443static int snapshot_raw_open(struct inode *inode, struct file *filp)
5444{
5445 struct ftrace_buffer_info *info;
5446 int ret;
5447
5448 ret = tracing_buffers_open(inode, filp);
5449 if (ret < 0)
5450 return ret;
5451
5452 info = filp->private_data;
5453
5454 if (info->iter.trace->use_max_tr) {
5455 tracing_buffers_release(inode, filp);
5456 return -EBUSY;
5457 }
5458
5459 info->iter.snapshot = true;
5460 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5461
5462 return ret;
5463}
5464
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005465#endif /* CONFIG_TRACER_SNAPSHOT */
5466
5467
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005468static const struct file_operations tracing_thresh_fops = {
5469 .open = tracing_open_generic,
5470 .read = tracing_thresh_read,
5471 .write = tracing_thresh_write,
5472 .llseek = generic_file_llseek,
5473};
5474
Chen Gange428abb2015-11-10 05:15:15 +08005475#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005476static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005477 .open = tracing_open_generic,
5478 .read = tracing_max_lat_read,
5479 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005480 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005481};
Chen Gange428abb2015-11-10 05:15:15 +08005482#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005483
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005484static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005485 .open = tracing_open_generic,
5486 .read = tracing_set_trace_read,
5487 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005488 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005489};
5490
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005491static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005492 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005493 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005494 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005495 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005496 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005497 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005498};
5499
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005500static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005501 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005502 .read = tracing_entries_read,
5503 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005504 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005505 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005506};
5507
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005508static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005509 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005510 .read = tracing_total_entries_read,
5511 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005512 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005513};
5514
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005515static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005516 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005517 .write = tracing_free_buffer_write,
5518 .release = tracing_free_buffer_release,
5519};
5520
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005521static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005522 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005523 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005524 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005525 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005526};
5527
Zhaolei5079f322009-08-25 16:12:56 +08005528static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005529 .open = tracing_clock_open,
5530 .read = seq_read,
5531 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005532 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005533 .write = tracing_clock_write,
5534};
5535
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005536#ifdef CONFIG_TRACER_SNAPSHOT
5537static const struct file_operations snapshot_fops = {
5538 .open = tracing_snapshot_open,
5539 .read = seq_read,
5540 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005541 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005542 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005543};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005544
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005545static const struct file_operations snapshot_raw_fops = {
5546 .open = snapshot_raw_open,
5547 .read = tracing_buffers_read,
5548 .release = tracing_buffers_release,
5549 .splice_read = tracing_buffers_splice_read,
5550 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005551};
5552
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005553#endif /* CONFIG_TRACER_SNAPSHOT */
5554
Steven Rostedt2cadf912008-12-01 22:20:19 -05005555static int tracing_buffers_open(struct inode *inode, struct file *filp)
5556{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005557 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005558 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005559 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005560
5561 if (tracing_disabled)
5562 return -ENODEV;
5563
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005564 if (trace_array_get(tr) < 0)
5565 return -ENODEV;
5566
Steven Rostedt2cadf912008-12-01 22:20:19 -05005567 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005568 if (!info) {
5569 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005570 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005571 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005572
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005573 mutex_lock(&trace_types_lock);
5574
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005575 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005576 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005577 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005578 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005579 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005580 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005581 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005582
5583 filp->private_data = info;
5584
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005585 tr->current_trace->ref++;
5586
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005587 mutex_unlock(&trace_types_lock);
5588
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005589 ret = nonseekable_open(inode, filp);
5590 if (ret < 0)
5591 trace_array_put(tr);
5592
5593 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005594}
5595
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005596static unsigned int
5597tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5598{
5599 struct ftrace_buffer_info *info = filp->private_data;
5600 struct trace_iterator *iter = &info->iter;
5601
5602 return trace_poll(iter, filp, poll_table);
5603}
5604
Steven Rostedt2cadf912008-12-01 22:20:19 -05005605static ssize_t
5606tracing_buffers_read(struct file *filp, char __user *ubuf,
5607 size_t count, loff_t *ppos)
5608{
5609 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005610 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005611 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005612 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005613
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005614 if (!count)
5615 return 0;
5616
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005617#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005618 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5619 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005620#endif
5621
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005622 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005623 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5624 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005625 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005626 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005627
Steven Rostedt2cadf912008-12-01 22:20:19 -05005628 /* Do we have previous read data to read? */
5629 if (info->read < PAGE_SIZE)
5630 goto read;
5631
Steven Rostedtb6273442013-02-28 13:44:11 -05005632 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005633 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005634 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005635 &info->spare,
5636 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005637 iter->cpu_file, 0);
5638 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005639
5640 if (ret < 0) {
5641 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005642 if ((filp->f_flags & O_NONBLOCK))
5643 return -EAGAIN;
5644
Rabin Vincente30f53a2014-11-10 19:46:34 +01005645 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005646 if (ret)
5647 return ret;
5648
Steven Rostedtb6273442013-02-28 13:44:11 -05005649 goto again;
5650 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005651 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005652 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005653
Steven Rostedt436fc282011-10-14 10:44:25 -04005654 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005655 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005656 size = PAGE_SIZE - info->read;
5657 if (size > count)
5658 size = count;
5659
5660 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005661 if (ret == size)
5662 return -EFAULT;
5663
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005664 size -= ret;
5665
Steven Rostedt2cadf912008-12-01 22:20:19 -05005666 *ppos += size;
5667 info->read += size;
5668
5669 return size;
5670}
5671
5672static int tracing_buffers_release(struct inode *inode, struct file *file)
5673{
5674 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005675 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005676
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005677 mutex_lock(&trace_types_lock);
5678
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005679 iter->tr->current_trace->ref--;
5680
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005681 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005682
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005683 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005684 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005685 kfree(info);
5686
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005687 mutex_unlock(&trace_types_lock);
5688
Steven Rostedt2cadf912008-12-01 22:20:19 -05005689 return 0;
5690}
5691
5692struct buffer_ref {
5693 struct ring_buffer *buffer;
5694 void *page;
5695 int ref;
5696};
5697
5698static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5699 struct pipe_buffer *buf)
5700{
5701 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5702
5703 if (--ref->ref)
5704 return;
5705
5706 ring_buffer_free_read_page(ref->buffer, ref->page);
5707 kfree(ref);
5708 buf->private = 0;
5709}
5710
Steven Rostedt2cadf912008-12-01 22:20:19 -05005711static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5712 struct pipe_buffer *buf)
5713{
5714 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5715
5716 ref->ref++;
5717}
5718
5719/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005720static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005721 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005722 .confirm = generic_pipe_buf_confirm,
5723 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005724 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005725 .get = buffer_pipe_buf_get,
5726};
5727
5728/*
5729 * Callback from splice_to_pipe(), if we need to release some pages
5730 * at the end of the spd in case we error'ed out in filling the pipe.
5731 */
5732static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5733{
5734 struct buffer_ref *ref =
5735 (struct buffer_ref *)spd->partial[i].private;
5736
5737 if (--ref->ref)
5738 return;
5739
5740 ring_buffer_free_read_page(ref->buffer, ref->page);
5741 kfree(ref);
5742 spd->partial[i].private = 0;
5743}
5744
5745static ssize_t
5746tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5747 struct pipe_inode_info *pipe, size_t len,
5748 unsigned int flags)
5749{
5750 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005751 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005752 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5753 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005754 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005755 .pages = pages_def,
5756 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005757 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005758 .flags = flags,
5759 .ops = &buffer_pipe_buf_ops,
5760 .spd_release = buffer_spd_release,
5761 };
5762 struct buffer_ref *ref;
Steven Rostedt (VMware)07524212017-12-22 20:38:57 -05005763 int entries, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01005764 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005765
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005766#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005767 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5768 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005769#endif
5770
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005771 if (*ppos & (PAGE_SIZE - 1))
5772 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005773
5774 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005775 if (len < PAGE_SIZE)
5776 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005777 len &= PAGE_MASK;
5778 }
5779
Al Viro369796a2016-09-17 18:31:46 -04005780 if (splice_grow_spd(pipe, &spd))
5781 return -ENOMEM;
5782
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005783 again:
5784 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005785 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005786
Al Viroa786c062014-04-11 12:01:03 -04005787 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005788 struct page *page;
5789 int r;
5790
5791 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01005792 if (!ref) {
5793 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005794 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01005795 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005796
Steven Rostedt7267fa62009-04-29 00:16:21 -04005797 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005798 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005799 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005800 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005801 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005802 kfree(ref);
5803 break;
5804 }
5805
5806 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005807 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005808 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005809 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005810 kfree(ref);
5811 break;
5812 }
5813
Steven Rostedt2cadf912008-12-01 22:20:19 -05005814 page = virt_to_page(ref->page);
5815
5816 spd.pages[i] = page;
5817 spd.partial[i].len = PAGE_SIZE;
5818 spd.partial[i].offset = 0;
5819 spd.partial[i].private = (unsigned long)ref;
5820 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005821 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005822
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005823 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005824 }
5825
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005826 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005827 spd.nr_pages = i;
5828
5829 /* did we read anything? */
5830 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005831 if (ret)
Al Viro369796a2016-09-17 18:31:46 -04005832 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01005833
Al Viro369796a2016-09-17 18:31:46 -04005834 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005835 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viro369796a2016-09-17 18:31:46 -04005836 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005837
Rabin Vincente30f53a2014-11-10 19:46:34 +01005838 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005839 if (ret)
Al Viro369796a2016-09-17 18:31:46 -04005840 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01005841
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005842 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005843 }
5844
5845 ret = splice_to_pipe(pipe, &spd);
Al Viro369796a2016-09-17 18:31:46 -04005846out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005847 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005848
Steven Rostedt2cadf912008-12-01 22:20:19 -05005849 return ret;
5850}
5851
5852static const struct file_operations tracing_buffers_fops = {
5853 .open = tracing_buffers_open,
5854 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005855 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005856 .release = tracing_buffers_release,
5857 .splice_read = tracing_buffers_splice_read,
5858 .llseek = no_llseek,
5859};
5860
Steven Rostedtc8d77182009-04-29 18:03:45 -04005861static ssize_t
5862tracing_stats_read(struct file *filp, char __user *ubuf,
5863 size_t count, loff_t *ppos)
5864{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005865 struct inode *inode = file_inode(filp);
5866 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005867 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005868 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005869 struct trace_seq *s;
5870 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005871 unsigned long long t;
5872 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005873
Li Zefane4f2d102009-06-15 10:57:28 +08005874 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005875 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005876 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005877
5878 trace_seq_init(s);
5879
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005880 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005881 trace_seq_printf(s, "entries: %ld\n", cnt);
5882
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005883 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005884 trace_seq_printf(s, "overrun: %ld\n", cnt);
5885
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005886 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005887 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5888
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005889 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005890 trace_seq_printf(s, "bytes: %ld\n", cnt);
5891
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005892 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005893 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005894 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005895 usec_rem = do_div(t, USEC_PER_SEC);
5896 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5897 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005898
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005899 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005900 usec_rem = do_div(t, USEC_PER_SEC);
5901 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5902 } else {
5903 /* counter or tsc mode for trace_clock */
5904 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005905 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005906
5907 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005908 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005909 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005910
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005911 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07005912 trace_seq_printf(s, "dropped events: %ld\n", cnt);
5913
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005914 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05005915 trace_seq_printf(s, "read events: %ld\n", cnt);
5916
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005917 count = simple_read_from_buffer(ubuf, count, ppos,
5918 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04005919
5920 kfree(s);
5921
5922 return count;
5923}
5924
5925static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005926 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005927 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005928 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005929 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04005930};
5931
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005932#ifdef CONFIG_DYNAMIC_FTRACE
5933
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005934int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005935{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005936 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005937}
5938
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005939static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005940tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005941 size_t cnt, loff_t *ppos)
5942{
Steven Rostedta26a2a22008-10-31 00:03:22 -04005943 static char ftrace_dyn_info_buffer[1024];
5944 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005945 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005946 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04005947 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005948 int r;
5949
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005950 mutex_lock(&dyn_info_mutex);
5951 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005952
Steven Rostedta26a2a22008-10-31 00:03:22 -04005953 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005954 buf[r++] = '\n';
5955
5956 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5957
5958 mutex_unlock(&dyn_info_mutex);
5959
5960 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005961}
5962
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005963static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005964 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04005965 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005966 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005967};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005968#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005969
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005970#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
5971static void
5972ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005973{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005974 tracing_snapshot();
5975}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005976
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04005977static void
5978ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
5979{
5980 unsigned long *count = (long *)data;
5981
5982 if (!*count)
5983 return;
5984
5985 if (*count != -1)
5986 (*count)--;
5987
5988 tracing_snapshot();
5989}
5990
5991static int
5992ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
5993 struct ftrace_probe_ops *ops, void *data)
5994{
5995 long count = (long)data;
5996
5997 seq_printf(m, "%ps:", (void *)ip);
5998
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01005999 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006000
6001 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006002 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006003 else
6004 seq_printf(m, ":count=%ld\n", count);
6005
6006 return 0;
6007}
6008
6009static struct ftrace_probe_ops snapshot_probe_ops = {
6010 .func = ftrace_snapshot,
6011 .print = ftrace_snapshot_print,
6012};
6013
6014static struct ftrace_probe_ops snapshot_count_probe_ops = {
6015 .func = ftrace_count_snapshot,
6016 .print = ftrace_snapshot_print,
6017};
6018
6019static int
6020ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6021 char *glob, char *cmd, char *param, int enable)
6022{
6023 struct ftrace_probe_ops *ops;
6024 void *count = (void *)-1;
6025 char *number;
6026 int ret;
6027
6028 /* hash funcs only work with set_ftrace_filter */
6029 if (!enable)
6030 return -EINVAL;
6031
6032 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6033
6034 if (glob[0] == '!') {
6035 unregister_ftrace_function_probe_func(glob+1, ops);
6036 return 0;
6037 }
6038
6039 if (!param)
6040 goto out_reg;
6041
6042 number = strsep(&param, ":");
6043
6044 if (!strlen(number))
6045 goto out_reg;
6046
6047 /*
6048 * We use the callback data field (which is a pointer)
6049 * as our counter.
6050 */
6051 ret = kstrtoul(number, 0, (unsigned long *)&count);
6052 if (ret)
6053 return ret;
6054
6055 out_reg:
Steven Rostedt (VMware)1dfb1c72017-04-19 12:07:08 -04006056 ret = alloc_snapshot(&global_trace);
6057 if (ret < 0)
6058 goto out;
6059
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006060 ret = register_ftrace_function_probe(glob, ops, count);
6061
Steven Rostedt (VMware)1dfb1c72017-04-19 12:07:08 -04006062 out:
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006063 return ret < 0 ? ret : 0;
6064}
6065
6066static struct ftrace_func_command ftrace_snapshot_cmd = {
6067 .name = "snapshot",
6068 .func = ftrace_trace_snapshot_callback,
6069};
6070
Tom Zanussi38de93a2013-10-24 08:34:18 -05006071static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006072{
6073 return register_ftrace_command(&ftrace_snapshot_cmd);
6074}
6075#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006076static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006077#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006078
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006079static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006080{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006081 if (WARN_ON(!tr->dir))
6082 return ERR_PTR(-ENODEV);
6083
6084 /* Top directory uses NULL as the parent */
6085 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6086 return NULL;
6087
6088 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006089 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006090}
6091
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006092static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6093{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006094 struct dentry *d_tracer;
6095
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006096 if (tr->percpu_dir)
6097 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006098
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006099 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006100 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006101 return NULL;
6102
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006103 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006104
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006105 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006106 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006107
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006108 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006109}
6110
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006111static struct dentry *
6112trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6113 void *data, long cpu, const struct file_operations *fops)
6114{
6115 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6116
6117 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006118 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006119 return ret;
6120}
6121
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006122static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006123tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006124{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006125 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006126 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006127 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006128
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006129 if (!d_percpu)
6130 return;
6131
Steven Rostedtdd49a382010-10-20 21:51:26 -04006132 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006133 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006134 if (!d_cpu) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006135 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006136 return;
6137 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006138
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006139 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006140 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006141 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006142
6143 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006144 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006145 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006146
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006147 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006148 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006149
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006150 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006151 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006152
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006153 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006154 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006155
6156#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006157 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006158 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006159
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006160 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006161 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006162#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006163}
6164
Steven Rostedt60a11772008-05-12 21:20:44 +02006165#ifdef CONFIG_FTRACE_SELFTEST
6166/* Let selftest have access to static functions in this file */
6167#include "trace_selftest.c"
6168#endif
6169
Steven Rostedt577b7852009-02-26 23:43:05 -05006170static ssize_t
6171trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6172 loff_t *ppos)
6173{
6174 struct trace_option_dentry *topt = filp->private_data;
6175 char *buf;
6176
6177 if (topt->flags->val & topt->opt->bit)
6178 buf = "1\n";
6179 else
6180 buf = "0\n";
6181
6182 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6183}
6184
6185static ssize_t
6186trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6187 loff_t *ppos)
6188{
6189 struct trace_option_dentry *topt = filp->private_data;
6190 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006191 int ret;
6192
Peter Huewe22fe9b52011-06-07 21:58:27 +02006193 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6194 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006195 return ret;
6196
Li Zefan8d18eaa2009-12-08 11:17:06 +08006197 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006198 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006199
6200 if (!!(topt->flags->val & topt->opt->bit) != val) {
6201 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006202 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006203 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006204 mutex_unlock(&trace_types_lock);
6205 if (ret)
6206 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006207 }
6208
6209 *ppos += cnt;
6210
6211 return cnt;
6212}
6213
6214
6215static const struct file_operations trace_options_fops = {
6216 .open = tracing_open_generic,
6217 .read = trace_options_read,
6218 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006219 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006220};
6221
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006222/*
6223 * In order to pass in both the trace_array descriptor as well as the index
6224 * to the flag that the trace option file represents, the trace_array
6225 * has a character array of trace_flags_index[], which holds the index
6226 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6227 * The address of this character array is passed to the flag option file
6228 * read/write callbacks.
6229 *
6230 * In order to extract both the index and the trace_array descriptor,
6231 * get_tr_index() uses the following algorithm.
6232 *
6233 * idx = *ptr;
6234 *
6235 * As the pointer itself contains the address of the index (remember
6236 * index[1] == 1).
6237 *
6238 * Then to get the trace_array descriptor, by subtracting that index
6239 * from the ptr, we get to the start of the index itself.
6240 *
6241 * ptr - idx == &index[0]
6242 *
6243 * Then a simple container_of() from that pointer gets us to the
6244 * trace_array descriptor.
6245 */
6246static void get_tr_index(void *data, struct trace_array **ptr,
6247 unsigned int *pindex)
6248{
6249 *pindex = *(unsigned char *)data;
6250
6251 *ptr = container_of(data - *pindex, struct trace_array,
6252 trace_flags_index);
6253}
6254
Steven Rostedta8259072009-02-26 22:19:12 -05006255static ssize_t
6256trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6257 loff_t *ppos)
6258{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006259 void *tr_index = filp->private_data;
6260 struct trace_array *tr;
6261 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006262 char *buf;
6263
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006264 get_tr_index(tr_index, &tr, &index);
6265
6266 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05006267 buf = "1\n";
6268 else
6269 buf = "0\n";
6270
6271 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6272}
6273
6274static ssize_t
6275trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6276 loff_t *ppos)
6277{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006278 void *tr_index = filp->private_data;
6279 struct trace_array *tr;
6280 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006281 unsigned long val;
6282 int ret;
6283
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006284 get_tr_index(tr_index, &tr, &index);
6285
Peter Huewe22fe9b52011-06-07 21:58:27 +02006286 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6287 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006288 return ret;
6289
Zhaoleif2d84b62009-08-07 18:55:48 +08006290 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006291 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006292
6293 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006294 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006295 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006296
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006297 if (ret < 0)
6298 return ret;
6299
Steven Rostedta8259072009-02-26 22:19:12 -05006300 *ppos += cnt;
6301
6302 return cnt;
6303}
6304
Steven Rostedta8259072009-02-26 22:19:12 -05006305static const struct file_operations trace_options_core_fops = {
6306 .open = tracing_open_generic,
6307 .read = trace_options_core_read,
6308 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006309 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006310};
6311
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006312struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006313 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006314 struct dentry *parent,
6315 void *data,
6316 const struct file_operations *fops)
6317{
6318 struct dentry *ret;
6319
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006320 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006321 if (!ret)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006322 pr_warning("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006323
6324 return ret;
6325}
6326
6327
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006328static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006329{
6330 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006331
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006332 if (tr->options)
6333 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006334
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006335 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006336 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006337 return NULL;
6338
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006339 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006340 if (!tr->options) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006341 pr_warning("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006342 return NULL;
6343 }
6344
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006345 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006346}
6347
Steven Rostedt577b7852009-02-26 23:43:05 -05006348static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006349create_trace_option_file(struct trace_array *tr,
6350 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006351 struct tracer_flags *flags,
6352 struct tracer_opt *opt)
6353{
6354 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006355
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006356 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006357 if (!t_options)
6358 return;
6359
6360 topt->flags = flags;
6361 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006362 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006363
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006364 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006365 &trace_options_fops);
6366
Steven Rostedt577b7852009-02-26 23:43:05 -05006367}
6368
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006369static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006370create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006371{
6372 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006373 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05006374 struct tracer_flags *flags;
6375 struct tracer_opt *opts;
6376 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006377 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05006378
6379 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006380 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05006381
6382 flags = tracer->flags;
6383
6384 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006385 return;
6386
6387 /*
6388 * If this is an instance, only create flags for tracers
6389 * the instance may have.
6390 */
6391 if (!trace_ok_for_array(tracer, tr))
6392 return;
6393
6394 for (i = 0; i < tr->nr_topts; i++) {
6395 /*
6396 * Check if these flags have already been added.
6397 * Some tracers share flags.
6398 */
6399 if (tr->topts[i].tracer->flags == tracer->flags)
6400 return;
6401 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006402
6403 opts = flags->opts;
6404
6405 for (cnt = 0; opts[cnt].name; cnt++)
6406 ;
6407
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006408 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006409 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006410 return;
6411
6412 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6413 GFP_KERNEL);
6414 if (!tr_topts) {
6415 kfree(topts);
6416 return;
6417 }
6418
6419 tr->topts = tr_topts;
6420 tr->topts[tr->nr_topts].tracer = tracer;
6421 tr->topts[tr->nr_topts].topts = topts;
6422 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05006423
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006424 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006425 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006426 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006427 WARN_ONCE(topts[cnt].entry == NULL,
6428 "Failed to create trace option: %s",
6429 opts[cnt].name);
6430 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006431}
6432
Steven Rostedta8259072009-02-26 22:19:12 -05006433static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006434create_trace_option_core_file(struct trace_array *tr,
6435 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006436{
6437 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006438
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006439 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006440 if (!t_options)
6441 return NULL;
6442
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006443 return trace_create_file(option, 0644, t_options,
6444 (void *)&tr->trace_flags_index[index],
6445 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006446}
6447
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006448static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006449{
6450 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006451 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006452 int i;
6453
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006454 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006455 if (!t_options)
6456 return;
6457
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006458 for (i = 0; trace_options[i]; i++) {
6459 if (top_level ||
6460 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6461 create_trace_option_core_file(tr, trace_options[i], i);
6462 }
Steven Rostedta8259072009-02-26 22:19:12 -05006463}
6464
Steven Rostedt499e5472012-02-22 15:50:28 -05006465static ssize_t
6466rb_simple_read(struct file *filp, char __user *ubuf,
6467 size_t cnt, loff_t *ppos)
6468{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006469 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006470 char buf[64];
6471 int r;
6472
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006473 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006474 r = sprintf(buf, "%d\n", r);
6475
6476 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6477}
6478
6479static ssize_t
6480rb_simple_write(struct file *filp, const char __user *ubuf,
6481 size_t cnt, loff_t *ppos)
6482{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006483 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006484 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006485 unsigned long val;
6486 int ret;
6487
6488 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6489 if (ret)
6490 return ret;
6491
6492 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006493 mutex_lock(&trace_types_lock);
6494 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006495 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006496 if (tr->current_trace->start)
6497 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006498 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006499 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006500 if (tr->current_trace->stop)
6501 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006502 }
6503 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006504 }
6505
6506 (*ppos)++;
6507
6508 return cnt;
6509}
6510
6511static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006512 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006513 .read = rb_simple_read,
6514 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006515 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006516 .llseek = default_llseek,
6517};
6518
Steven Rostedt277ba042012-08-03 16:10:49 -04006519struct dentry *trace_instance_dir;
6520
6521static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006522init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04006523
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006524static int
6525allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006526{
6527 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006528
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006529 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006530
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006531 buf->tr = tr;
6532
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006533 buf->buffer = ring_buffer_alloc(size, rb_flags);
6534 if (!buf->buffer)
6535 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006536
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006537 buf->data = alloc_percpu(struct trace_array_cpu);
6538 if (!buf->data) {
6539 ring_buffer_free(buf->buffer);
Steven Rostedt (VMware)c2a62f82017-12-26 20:07:34 -05006540 buf->buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006541 return -ENOMEM;
6542 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006543
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006544 /* Allocate the first page for all buffers */
6545 set_buffer_entries(&tr->trace_buffer,
6546 ring_buffer_size(tr->trace_buffer.buffer, 0));
6547
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006548 return 0;
6549}
6550
6551static int allocate_trace_buffers(struct trace_array *tr, int size)
6552{
6553 int ret;
6554
6555 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6556 if (ret)
6557 return ret;
6558
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006559#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006560 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6561 allocate_snapshot ? size : 1);
6562 if (WARN_ON(ret)) {
6563 ring_buffer_free(tr->trace_buffer.buffer);
Jing Xia25fade62017-12-26 15:12:53 +08006564 tr->trace_buffer.buffer = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006565 free_percpu(tr->trace_buffer.data);
Jing Xia25fade62017-12-26 15:12:53 +08006566 tr->trace_buffer.data = NULL;
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006567 return -ENOMEM;
6568 }
6569 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006570
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006571 /*
6572 * Only the top level trace array gets its snapshot allocated
6573 * from the kernel command line.
6574 */
6575 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006576#endif
6577 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006578}
6579
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006580static void free_trace_buffer(struct trace_buffer *buf)
6581{
6582 if (buf->buffer) {
6583 ring_buffer_free(buf->buffer);
6584 buf->buffer = NULL;
6585 free_percpu(buf->data);
6586 buf->data = NULL;
6587 }
6588}
6589
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006590static void free_trace_buffers(struct trace_array *tr)
6591{
6592 if (!tr)
6593 return;
6594
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006595 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006596
6597#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006598 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006599#endif
6600}
6601
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006602static void init_trace_flags_index(struct trace_array *tr)
6603{
6604 int i;
6605
6606 /* Used by the trace options files */
6607 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6608 tr->trace_flags_index[i] = i;
6609}
6610
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006611static void __update_tracer_options(struct trace_array *tr)
6612{
6613 struct tracer *t;
6614
6615 for (t = trace_types; t; t = t->next)
6616 add_tracer_options(tr, t);
6617}
6618
6619static void update_tracer_options(struct trace_array *tr)
6620{
6621 mutex_lock(&trace_types_lock);
6622 __update_tracer_options(tr);
6623 mutex_unlock(&trace_types_lock);
6624}
6625
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006626static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006627{
Steven Rostedt277ba042012-08-03 16:10:49 -04006628 struct trace_array *tr;
6629 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006630
6631 mutex_lock(&trace_types_lock);
6632
6633 ret = -EEXIST;
6634 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6635 if (tr->name && strcmp(tr->name, name) == 0)
6636 goto out_unlock;
6637 }
6638
6639 ret = -ENOMEM;
6640 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6641 if (!tr)
6642 goto out_unlock;
6643
6644 tr->name = kstrdup(name, GFP_KERNEL);
6645 if (!tr->name)
6646 goto out_free_tr;
6647
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006648 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6649 goto out_free_tr;
6650
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006651 tr->trace_flags = global_trace.trace_flags;
6652
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006653 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6654
Steven Rostedt277ba042012-08-03 16:10:49 -04006655 raw_spin_lock_init(&tr->start_lock);
6656
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006657 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6658
Steven Rostedt277ba042012-08-03 16:10:49 -04006659 tr->current_trace = &nop_trace;
6660
6661 INIT_LIST_HEAD(&tr->systems);
6662 INIT_LIST_HEAD(&tr->events);
6663
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006664 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006665 goto out_free_tr;
6666
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006667 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006668 if (!tr->dir)
6669 goto out_free_tr;
6670
6671 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006672 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006673 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006674 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006675 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006676
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006677 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006678 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006679 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04006680
6681 list_add(&tr->list, &ftrace_trace_arrays);
6682
6683 mutex_unlock(&trace_types_lock);
6684
6685 return 0;
6686
6687 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006688 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006689 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006690 kfree(tr->name);
6691 kfree(tr);
6692
6693 out_unlock:
6694 mutex_unlock(&trace_types_lock);
6695
6696 return ret;
6697
6698}
6699
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006700static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006701{
6702 struct trace_array *tr;
6703 int found = 0;
6704 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006705 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006706
6707 mutex_lock(&trace_types_lock);
6708
6709 ret = -ENODEV;
6710 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6711 if (tr->name && strcmp(tr->name, name) == 0) {
6712 found = 1;
6713 break;
6714 }
6715 }
6716 if (!found)
6717 goto out_unlock;
6718
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006719 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006720 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006721 goto out_unlock;
6722
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006723 list_del(&tr->list);
6724
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006725 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006726 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006727 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08006728 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006729 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006730
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006731 for (i = 0; i < tr->nr_topts; i++) {
6732 kfree(tr->topts[i].topts);
6733 }
6734 kfree(tr->topts);
6735
Chunyu Hubb8109a2017-07-20 18:36:09 +08006736 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006737 kfree(tr->name);
6738 kfree(tr);
6739
6740 ret = 0;
6741
6742 out_unlock:
6743 mutex_unlock(&trace_types_lock);
6744
6745 return ret;
6746}
6747
Steven Rostedt277ba042012-08-03 16:10:49 -04006748static __init void create_trace_instances(struct dentry *d_tracer)
6749{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006750 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6751 instance_mkdir,
6752 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006753 if (WARN_ON(!trace_instance_dir))
6754 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04006755}
6756
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006757static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006758init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006759{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006760 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006761
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006762 trace_create_file("available_tracers", 0444, d_tracer,
6763 tr, &show_traces_fops);
6764
6765 trace_create_file("current_tracer", 0644, d_tracer,
6766 tr, &set_tracer_fops);
6767
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006768 trace_create_file("tracing_cpumask", 0644, d_tracer,
6769 tr, &tracing_cpumask_fops);
6770
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006771 trace_create_file("trace_options", 0644, d_tracer,
6772 tr, &tracing_iter_fops);
6773
6774 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006775 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006776
6777 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006778 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006779
6780 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006781 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006782
6783 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6784 tr, &tracing_total_entries_fops);
6785
Wang YanQing238ae932013-05-26 16:52:01 +08006786 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006787 tr, &tracing_free_buffer_fops);
6788
6789 trace_create_file("trace_marker", 0220, d_tracer,
6790 tr, &tracing_mark_fops);
6791
6792 trace_create_file("trace_clock", 0644, d_tracer, tr,
6793 &trace_clock_fops);
6794
6795 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006796 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006797
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006798 create_trace_options_dir(tr);
6799
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006800#ifdef CONFIG_TRACER_MAX_TRACE
6801 trace_create_file("tracing_max_latency", 0644, d_tracer,
6802 &tr->max_latency, &tracing_max_lat_fops);
6803#endif
6804
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006805 if (ftrace_create_function_files(tr, d_tracer))
6806 WARN(1, "Could not allocate function filter files");
6807
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006808#ifdef CONFIG_TRACER_SNAPSHOT
6809 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006810 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006811#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006812
6813 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006814 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006815
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006816}
6817
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006818static struct vfsmount *trace_automount(void *ingore)
6819{
6820 struct vfsmount *mnt;
6821 struct file_system_type *type;
6822
6823 /*
6824 * To maintain backward compatibility for tools that mount
6825 * debugfs to get to the tracing facility, tracefs is automatically
6826 * mounted to the debugfs/tracing directory.
6827 */
6828 type = get_fs_type("tracefs");
6829 if (!type)
6830 return NULL;
6831 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6832 put_filesystem(type);
6833 if (IS_ERR(mnt))
6834 return NULL;
6835 mntget(mnt);
6836
6837 return mnt;
6838}
6839
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006840/**
6841 * tracing_init_dentry - initialize top level trace array
6842 *
6843 * This is called when creating files or directories in the tracing
6844 * directory. It is called via fs_initcall() by any of the boot up code
6845 * and expects to return the dentry of the top level tracing directory.
6846 */
6847struct dentry *tracing_init_dentry(void)
6848{
6849 struct trace_array *tr = &global_trace;
6850
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006851 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006852 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006853 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006854
Jiaxing Wang8b129192015-11-06 16:04:16 +08006855 if (WARN_ON(!tracefs_initialized()) ||
6856 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6857 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006858 return ERR_PTR(-ENODEV);
6859
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006860 /*
6861 * As there may still be users that expect the tracing
6862 * files to exist in debugfs/tracing, we must automount
6863 * the tracefs file system there, so older tools still
6864 * work with the newer kerenl.
6865 */
6866 tr->dir = debugfs_create_automount("tracing", NULL,
6867 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006868 if (!tr->dir) {
6869 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6870 return ERR_PTR(-ENOMEM);
6871 }
6872
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006873 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006874}
6875
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006876extern struct trace_enum_map *__start_ftrace_enum_maps[];
6877extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6878
6879static void __init trace_enum_init(void)
6880{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006881 int len;
6882
6883 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006884 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006885}
6886
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006887#ifdef CONFIG_MODULES
6888static void trace_module_add_enums(struct module *mod)
6889{
6890 if (!mod->num_trace_enums)
6891 return;
6892
6893 /*
6894 * Modules with bad taint do not have events created, do
6895 * not bother with enums either.
6896 */
6897 if (trace_module_has_bad_taint(mod))
6898 return;
6899
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006900 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006901}
6902
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006903#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6904static void trace_module_remove_enums(struct module *mod)
6905{
6906 union trace_enum_map_item *map;
6907 union trace_enum_map_item **last = &trace_enum_maps;
6908
6909 if (!mod->num_trace_enums)
6910 return;
6911
6912 mutex_lock(&trace_enum_mutex);
6913
6914 map = trace_enum_maps;
6915
6916 while (map) {
6917 if (map->head.mod == mod)
6918 break;
6919 map = trace_enum_jmp_to_tail(map);
6920 last = &map->tail.next;
6921 map = map->tail.next;
6922 }
6923 if (!map)
6924 goto out;
6925
6926 *last = trace_enum_jmp_to_tail(map)->tail.next;
6927 kfree(map);
6928 out:
6929 mutex_unlock(&trace_enum_mutex);
6930}
6931#else
6932static inline void trace_module_remove_enums(struct module *mod) { }
6933#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
6934
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006935static int trace_module_notify(struct notifier_block *self,
6936 unsigned long val, void *data)
6937{
6938 struct module *mod = data;
6939
6940 switch (val) {
6941 case MODULE_STATE_COMING:
6942 trace_module_add_enums(mod);
6943 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006944 case MODULE_STATE_GOING:
6945 trace_module_remove_enums(mod);
6946 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006947 }
6948
6949 return 0;
6950}
6951
6952static struct notifier_block trace_module_nb = {
6953 .notifier_call = trace_module_notify,
6954 .priority = 0,
6955};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006956#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006957
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006958static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006959{
6960 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006961
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08006962 trace_access_lock_init();
6963
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006964 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006965 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09006966 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006967
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006968 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006969
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006970 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04006971 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006972
Li Zefan339ae5d2009-04-17 10:34:30 +08006973 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006974 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02006975
Avadh Patel69abe6a2009-04-10 16:04:48 -04006976 trace_create_file("saved_cmdlines", 0444, d_tracer,
6977 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03006978
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09006979 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
6980 NULL, &tracing_saved_cmdlines_size_fops);
6981
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006982 trace_enum_init();
6983
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006984 trace_create_enum_file(d_tracer);
6985
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006986#ifdef CONFIG_MODULES
6987 register_module_notifier(&trace_module_nb);
6988#endif
6989
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006990#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006991 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
6992 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006993#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006994
Steven Rostedt277ba042012-08-03 16:10:49 -04006995 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09006996
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006997 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05006998
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01006999 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007000}
7001
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007002static int trace_panic_handler(struct notifier_block *this,
7003 unsigned long event, void *unused)
7004{
Steven Rostedt944ac422008-10-23 19:26:08 -04007005 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007006 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007007 return NOTIFY_OK;
7008}
7009
7010static struct notifier_block trace_panic_notifier = {
7011 .notifier_call = trace_panic_handler,
7012 .next = NULL,
7013 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7014};
7015
7016static int trace_die_handler(struct notifier_block *self,
7017 unsigned long val,
7018 void *data)
7019{
7020 switch (val) {
7021 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007022 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007023 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007024 break;
7025 default:
7026 break;
7027 }
7028 return NOTIFY_OK;
7029}
7030
7031static struct notifier_block trace_die_notifier = {
7032 .notifier_call = trace_die_handler,
7033 .priority = 200
7034};
7035
7036/*
7037 * printk is set to max of 1024, we really don't need it that big.
7038 * Nothing should be printing 1000 characters anyway.
7039 */
7040#define TRACE_MAX_PRINT 1000
7041
7042/*
7043 * Define here KERN_TRACE so that we have one place to modify
7044 * it if we decide to change what log level the ftrace dump
7045 * should be at.
7046 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007047#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007048
Jason Wessel955b61e2010-08-05 09:22:23 -05007049void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007050trace_printk_seq(struct trace_seq *s)
7051{
7052 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007053 if (s->seq.len >= TRACE_MAX_PRINT)
7054 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007055
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007056 /*
7057 * More paranoid code. Although the buffer size is set to
7058 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7059 * an extra layer of protection.
7060 */
7061 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7062 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007063
7064 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007065 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007066
7067 printk(KERN_TRACE "%s", s->buffer);
7068
Steven Rostedtf9520752009-03-02 14:04:40 -05007069 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007070}
7071
Jason Wessel955b61e2010-08-05 09:22:23 -05007072void trace_init_global_iter(struct trace_iterator *iter)
7073{
7074 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007075 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007076 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007077 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007078
7079 if (iter->trace && iter->trace->open)
7080 iter->trace->open(iter);
7081
7082 /* Annotate start of buffers if we had overruns */
7083 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7084 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7085
7086 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7087 if (trace_clocks[iter->tr->clock_id].in_ns)
7088 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007089}
7090
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007091void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007092{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007093 /* use static because iter can be a bit big for the stack */
7094 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007095 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007096 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007097 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007098 unsigned long flags;
7099 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007100
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007101 /* Only allow one dump user at a time. */
7102 if (atomic_inc_return(&dump_running) != 1) {
7103 atomic_dec(&dump_running);
7104 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007105 }
7106
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007107 /*
7108 * Always turn off tracing when we dump.
7109 * We don't need to show trace output of what happens
7110 * between multiple crashes.
7111 *
7112 * If the user does a sysrq-z, then they can re-enable
7113 * tracing with echo 1 > tracing_on.
7114 */
7115 tracing_off();
7116
7117 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007118
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007119 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007120 trace_init_global_iter(&iter);
7121
Steven Rostedtd7690412008-10-01 00:29:53 -04007122 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307123 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007124 }
7125
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007126 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007127
Török Edwinb54d3de2008-11-22 13:28:48 +02007128 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007129 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007130
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007131 switch (oops_dump_mode) {
7132 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007133 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007134 break;
7135 case DUMP_ORIG:
7136 iter.cpu_file = raw_smp_processor_id();
7137 break;
7138 case DUMP_NONE:
7139 goto out_enable;
7140 default:
7141 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007142 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007143 }
7144
7145 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007146
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007147 /* Did function tracer already get disabled? */
7148 if (ftrace_is_dead()) {
7149 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7150 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7151 }
7152
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007153 /*
7154 * We need to stop all tracing on all CPUS to read the
7155 * the next buffer. This is a bit expensive, but is
7156 * not done often. We fill all what we can read,
7157 * and then release the locks again.
7158 */
7159
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007160 while (!trace_empty(&iter)) {
7161
7162 if (!cnt)
7163 printk(KERN_TRACE "---------------------------------\n");
7164
7165 cnt++;
7166
7167 /* reset all but tr, trace, and overruns */
7168 memset(&iter.seq, 0,
7169 sizeof(struct trace_iterator) -
7170 offsetof(struct trace_iterator, seq));
7171 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7172 iter.pos = -1;
7173
Jason Wessel955b61e2010-08-05 09:22:23 -05007174 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007175 int ret;
7176
7177 ret = print_trace_line(&iter);
7178 if (ret != TRACE_TYPE_NO_CONSUME)
7179 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007180 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007181 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007182
7183 trace_printk_seq(&iter.seq);
7184 }
7185
7186 if (!cnt)
7187 printk(KERN_TRACE " (ftrace buffer empty)\n");
7188 else
7189 printk(KERN_TRACE "---------------------------------\n");
7190
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007191 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007192 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007193
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007194 for_each_tracing_cpu(cpu) {
7195 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007196 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007197 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007198 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007199}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007200EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007201
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007202__init static int tracer_alloc_buffers(void)
7203{
Steven Rostedt73c51622009-03-11 13:42:01 -04007204 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307205 int ret = -ENOMEM;
7206
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007207 /*
7208 * Make sure we don't accidently add more trace options
7209 * than we have bits for.
7210 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007211 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007212
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307213 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7214 goto out;
7215
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007216 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307217 goto out_free_buffer_mask;
7218
Steven Rostedt07d777f2011-09-22 14:01:55 -04007219 /* Only allocate trace_printk buffers if a trace_printk exists */
7220 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007221 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007222 trace_printk_init_buffers();
7223
Steven Rostedt73c51622009-03-11 13:42:01 -04007224 /* To save memory, keep the ring buffer size to its minimum */
7225 if (ring_buffer_expanded)
7226 ring_buf_size = trace_buf_size;
7227 else
7228 ring_buf_size = 1;
7229
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307230 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007231 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007232
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007233 raw_spin_lock_init(&global_trace.start_lock);
7234
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007235 /* Used for event triggers */
7236 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7237 if (!temp_buffer)
7238 goto out_free_cpumask;
7239
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007240 if (trace_create_savedcmd() < 0)
7241 goto out_free_temp_buffer;
7242
Steven Rostedtab464282008-05-12 21:21:00 +02007243 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007244 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007245 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7246 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007247 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007248 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007249
Steven Rostedt499e5472012-02-22 15:50:28 -05007250 if (global_trace.buffer_disabled)
7251 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007252
Steven Rostedte1e232c2014-02-10 23:38:46 -05007253 if (trace_boot_clock) {
7254 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7255 if (ret < 0)
7256 pr_warning("Trace clock %s not defined, going back to default\n",
7257 trace_boot_clock);
7258 }
7259
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007260 /*
7261 * register_tracer() might reference current_trace, so it
7262 * needs to be set before we register anything. This is
7263 * just a bootstrap of current_trace anyway.
7264 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007265 global_trace.current_trace = &nop_trace;
7266
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007267 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7268
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007269 ftrace_init_global_array_ops(&global_trace);
7270
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007271 init_trace_flags_index(&global_trace);
7272
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007273 register_tracer(&nop_trace);
7274
Steven Rostedt60a11772008-05-12 21:20:44 +02007275 /* All seems OK, enable tracing */
7276 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007277
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007278 atomic_notifier_chain_register(&panic_notifier_list,
7279 &trace_panic_notifier);
7280
7281 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007282
Steven Rostedtae63b312012-05-03 23:09:03 -04007283 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7284
7285 INIT_LIST_HEAD(&global_trace.systems);
7286 INIT_LIST_HEAD(&global_trace.events);
7287 list_add(&global_trace.list, &ftrace_trace_arrays);
7288
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08007289 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007290
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007291 register_snapshot_cmd();
7292
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007293 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007294
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007295out_free_savedcmd:
7296 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007297out_free_temp_buffer:
7298 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307299out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007300 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307301out_free_buffer_mask:
7302 free_cpumask_var(tracing_buffer_mask);
7303out:
7304 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007305}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007306
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007307void __init trace_init(void)
7308{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007309 if (tracepoint_printk) {
7310 tracepoint_print_iter =
7311 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7312 if (WARN_ON(!tracepoint_print_iter))
7313 tracepoint_printk = 0;
7314 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007315 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007316 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007317}
7318
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007319__init static int clear_boot_tracer(void)
7320{
7321 /*
7322 * The default tracer at boot buffer is an init section.
7323 * This function is called in lateinit. If we did not
7324 * find the boot tracer, then clear it out, to prevent
7325 * later registration from accessing the buffer that is
7326 * about to be freed.
7327 */
7328 if (!default_bootup_tracer)
7329 return 0;
7330
7331 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7332 default_bootup_tracer);
7333 default_bootup_tracer = NULL;
7334
7335 return 0;
7336}
7337
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007338fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007339late_initcall(clear_boot_tracer);