blob: 225d450cd84d76b479f4f592e01d90524d3e6b15 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050028#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050046#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020047
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010048/*
Steven Rostedt73c51622009-03-11 13:42:01 -040049 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050052bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040053
54/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010056 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010058 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * at the same time, giving false positive or negative results.
60 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010061static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062
Steven Rostedtb2821ae2009-02-02 21:38:32 -050063/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
Li Zefan020e5f82009-07-01 10:47:05 +080066bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050068/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010072/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050082static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010084{
85 return 0;
86}
Steven Rostedt0f048702008-11-05 16:05:44 -050087
88/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040089 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
95/*
Steven Rostedt0f048702008-11-05 16:05:44 -050096 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100101static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500102
Jason Wessel955b61e2010-08-05 09:22:23 -0500103cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200104
Steven Rostedt944ac422008-10-23 19:26:08 -0400105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400119 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200120
121enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400122
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400126#ifdef CONFIG_TRACE_ENUM_MAP_FILE
127/* Map of enums to their values, for "enum_map" file */
128struct trace_enum_map_head {
129 struct module *mod;
130 unsigned long length;
131};
132
133union trace_enum_map_item;
134
135struct trace_enum_map_tail {
136 /*
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "enum_string"
139 */
140 union trace_enum_map_item *next;
141 const char *end; /* points to NULL */
142};
143
144static DEFINE_MUTEX(trace_enum_mutex);
145
146/*
147 * The trace_enum_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved enum_map items.
152 */
153union trace_enum_map_item {
154 struct trace_enum_map map;
155 struct trace_enum_map_head head;
156 struct trace_enum_map_tail tail;
157};
158
159static union trace_enum_map_item *trace_enum_maps;
160#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
161
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500162static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163
Li Zefanee6c2c12009-09-18 14:06:47 +0800164#define MAX_TRACER_SIZE 100
165static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500166static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100167
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500168static bool allocate_snapshot;
169
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200170static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100171{
Chen Gang67012ab2013-04-08 12:06:44 +0800172 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500173 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400174 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500175 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176 return 1;
177}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200178__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100179
Steven Rostedt944ac422008-10-23 19:26:08 -0400180static int __init set_ftrace_dump_on_oops(char *str)
181{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200182 if (*str++ != '=' || !*str) {
183 ftrace_dump_on_oops = DUMP_ALL;
184 return 1;
185 }
186
187 if (!strcmp("orig_cpu", str)) {
188 ftrace_dump_on_oops = DUMP_ORIG;
189 return 1;
190 }
191
192 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400193}
194__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200195
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400196static int __init stop_trace_on_warning(char *str)
197{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200198 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
199 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200 return 1;
201}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200202__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400203
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400204static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500205{
206 allocate_snapshot = true;
207 /* We also need the main ring buffer expanded */
208 ring_buffer_expanded = true;
209 return 1;
210}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400211__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500212
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400213
214static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400215
216static int __init set_trace_boot_options(char *str)
217{
Chen Gang67012ab2013-04-08 12:06:44 +0800218 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400219 return 0;
220}
221__setup("trace_options=", set_trace_boot_options);
222
Steven Rostedte1e232c2014-02-10 23:38:46 -0500223static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
224static char *trace_boot_clock __initdata;
225
226static int __init set_trace_boot_clock(char *str)
227{
228 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
229 trace_boot_clock = trace_boot_clock_buf;
230 return 0;
231}
232__setup("trace_clock=", set_trace_boot_clock);
233
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500234static int __init set_tracepoint_printk(char *str)
235{
236 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
237 tracepoint_printk = 1;
238 return 1;
239}
240__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400241
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800242unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200243{
244 nsec += 500;
245 do_div(nsec, 1000);
246 return nsec;
247}
248
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400249/* trace_flags holds trace_options default values */
250#define TRACE_DEFAULT_FLAGS \
251 (FUNCTION_DEFAULT_FLAGS | \
252 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
253 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
254 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
255 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
256
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400257/* trace_options that are only supported by global_trace */
258#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
259 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
260
261
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200262/*
263 * The global_trace is the descriptor that holds the tracing
264 * buffers for the live tracing. For each CPU, it contains
265 * a link list of pages that will store trace entries. The
266 * page descriptor of the pages in the memory is used to hold
267 * the link list by linking the lru item in the page descriptor
268 * to each of the pages in the buffer per CPU.
269 *
270 * For each active CPU there is a data field that holds the
271 * pages for the buffer for that CPU. Each CPU has the same number
272 * of pages allocated for its buffer.
273 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400274static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
276};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedtae63b312012-05-03 23:09:03 -0400278LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200279
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400280int trace_array_get(struct trace_array *this_tr)
281{
282 struct trace_array *tr;
283 int ret = -ENODEV;
284
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 if (tr == this_tr) {
288 tr->ref++;
289 ret = 0;
290 break;
291 }
292 }
293 mutex_unlock(&trace_types_lock);
294
295 return ret;
296}
297
298static void __trace_array_put(struct trace_array *this_tr)
299{
300 WARN_ON(!this_tr->ref);
301 this_tr->ref--;
302}
303
304void trace_array_put(struct trace_array *this_tr)
305{
306 mutex_lock(&trace_types_lock);
307 __trace_array_put(this_tr);
308 mutex_unlock(&trace_types_lock);
309}
310
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400311int filter_check_discard(struct trace_event_file *file, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500312 struct ring_buffer *buffer,
313 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500314{
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400315 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
Tom Zanussif306cc82013-10-24 08:34:17 -0500316 !filter_match_preds(file->filter, rec)) {
317 ring_buffer_discard_commit(buffer, event);
318 return 1;
319 }
320
321 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500322}
Tom Zanussif306cc82013-10-24 08:34:17 -0500323EXPORT_SYMBOL_GPL(filter_check_discard);
324
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400325int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
328{
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
331 ring_buffer_discard_commit(buffer, event);
332 return 1;
333 }
334
335 return 0;
336}
337EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500338
Fabian Frederickad1438a2014-04-17 21:44:42 +0200339static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400340{
341 u64 ts;
342
343 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700344 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400345 return trace_clock_local();
346
Alexander Z Lam94571582013-08-02 18:36:16 -0700347 ts = ring_buffer_time_stamp(buf->buffer, cpu);
348 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400349
350 return ts;
351}
352
Alexander Z Lam94571582013-08-02 18:36:16 -0700353cycle_t ftrace_now(int cpu)
354{
355 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
356}
357
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400358/**
359 * tracing_is_enabled - Show if global_trace has been disabled
360 *
361 * Shows if the global trace has been enabled or not. It uses the
362 * mirror flag "buffer_disabled" to be used in fast paths such as for
363 * the irqsoff tracer. But it may be inaccurate due to races. If you
364 * need to know the accurate state, use tracing_is_on() which is a little
365 * slower, but accurate.
366 */
Steven Rostedt90369902008-11-05 16:05:44 -0500367int tracing_is_enabled(void)
368{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400369 /*
370 * For quick access (irqsoff uses this in fast path), just
371 * return the mirror variable of the state of the ring buffer.
372 * It's a little racy, but we don't really care.
373 */
374 smp_rmb();
375 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500376}
377
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200378/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400379 * trace_buf_size is the size in bytes that is allocated
380 * for a buffer. Note, the number of bytes is always rounded
381 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400382 *
383 * This number is purposely set to a low number of 16384.
384 * If the dump on oops happens, it will be much appreciated
385 * to not have to wait for all that output. Anyway this can be
386 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200387 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400388#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400389
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400390static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200391
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200392/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200393static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200394
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200395/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200396 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200397 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700398DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200399
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800400/*
401 * serialize the access of the ring buffer
402 *
403 * ring buffer serializes readers, but it is low level protection.
404 * The validity of the events (which returns by ring_buffer_peek() ..etc)
405 * are not protected by ring buffer.
406 *
407 * The content of events may become garbage if we allow other process consumes
408 * these events concurrently:
409 * A) the page of the consumed events may become a normal page
410 * (not reader page) in ring buffer, and this page will be rewrited
411 * by events producer.
412 * B) The page of the consumed events may become a page for splice_read,
413 * and this page will be returned to system.
414 *
415 * These primitives allow multi process access to different cpu ring buffer
416 * concurrently.
417 *
418 * These primitives don't distinguish read-only and read-consume access.
419 * Multi read-only access are also serialized.
420 */
421
422#ifdef CONFIG_SMP
423static DECLARE_RWSEM(all_cpu_access_lock);
424static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
425
426static inline void trace_access_lock(int cpu)
427{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500428 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800429 /* gain it for accessing the whole ring buffer. */
430 down_write(&all_cpu_access_lock);
431 } else {
432 /* gain it for accessing a cpu ring buffer. */
433
Steven Rostedtae3b5092013-01-23 15:22:59 -0500434 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800435 down_read(&all_cpu_access_lock);
436
437 /* Secondly block other access to this @cpu ring buffer. */
438 mutex_lock(&per_cpu(cpu_access_lock, cpu));
439 }
440}
441
442static inline void trace_access_unlock(int cpu)
443{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500444 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800445 up_write(&all_cpu_access_lock);
446 } else {
447 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
448 up_read(&all_cpu_access_lock);
449 }
450}
451
452static inline void trace_access_lock_init(void)
453{
454 int cpu;
455
456 for_each_possible_cpu(cpu)
457 mutex_init(&per_cpu(cpu_access_lock, cpu));
458}
459
460#else
461
462static DEFINE_MUTEX(access_lock);
463
464static inline void trace_access_lock(int cpu)
465{
466 (void)cpu;
467 mutex_lock(&access_lock);
468}
469
470static inline void trace_access_unlock(int cpu)
471{
472 (void)cpu;
473 mutex_unlock(&access_lock);
474}
475
476static inline void trace_access_lock_init(void)
477{
478}
479
480#endif
481
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400482#ifdef CONFIG_STACKTRACE
483static void __ftrace_trace_stack(struct ring_buffer *buffer,
484 unsigned long flags,
485 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400486static inline void ftrace_trace_stack(struct trace_array *tr,
487 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400488 unsigned long flags,
489 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400490
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400491#else
492static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
493 unsigned long flags,
494 int skip, int pc, struct pt_regs *regs)
495{
496}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400497static inline void ftrace_trace_stack(struct trace_array *tr,
498 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400499 unsigned long flags,
500 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400501{
502}
503
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400504#endif
505
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400506static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400507{
508 if (tr->trace_buffer.buffer)
509 ring_buffer_record_on(tr->trace_buffer.buffer);
510 /*
511 * This flag is looked at when buffers haven't been allocated
512 * yet, or by some tracers (like irqsoff), that just want to
513 * know if the ring buffer has been disabled, but it can handle
514 * races of where it gets disabled but we still do a record.
515 * As the check is in the fast path of the tracers, it is more
516 * important to be fast than accurate.
517 */
518 tr->buffer_disabled = 0;
519 /* Make the flag seen by readers */
520 smp_wmb();
521}
522
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200523/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500524 * tracing_on - enable tracing buffers
525 *
526 * This function enables tracing buffers that may have been
527 * disabled with tracing_off.
528 */
529void tracing_on(void)
530{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400531 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500532}
533EXPORT_SYMBOL_GPL(tracing_on);
534
535/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500536 * __trace_puts - write a constant string into the trace buffer.
537 * @ip: The address of the caller
538 * @str: The constant string to write
539 * @size: The size of the string.
540 */
541int __trace_puts(unsigned long ip, const char *str, int size)
542{
543 struct ring_buffer_event *event;
544 struct ring_buffer *buffer;
545 struct print_entry *entry;
546 unsigned long irq_flags;
547 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800548 int pc;
549
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400550 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800551 return 0;
552
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800553 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500554
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500555 if (unlikely(tracing_selftest_running || tracing_disabled))
556 return 0;
557
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500558 alloc = sizeof(*entry) + size + 2; /* possible \n added */
559
560 local_save_flags(irq_flags);
561 buffer = global_trace.trace_buffer.buffer;
562 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800563 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500564 if (!event)
565 return 0;
566
567 entry = ring_buffer_event_data(event);
568 entry->ip = ip;
569
570 memcpy(&entry->buf, str, size);
571
572 /* Add a newline if necessary */
573 if (entry->buf[size - 1] != '\n') {
574 entry->buf[size] = '\n';
575 entry->buf[size + 1] = '\0';
576 } else
577 entry->buf[size] = '\0';
578
579 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400580 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500581
582 return size;
583}
584EXPORT_SYMBOL_GPL(__trace_puts);
585
586/**
587 * __trace_bputs - write the pointer to a constant string into trace buffer
588 * @ip: The address of the caller
589 * @str: The constant string to write to the buffer to
590 */
591int __trace_bputs(unsigned long ip, const char *str)
592{
593 struct ring_buffer_event *event;
594 struct ring_buffer *buffer;
595 struct bputs_entry *entry;
596 unsigned long irq_flags;
597 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800598 int pc;
599
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400600 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800601 return 0;
602
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800603 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500604
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500605 if (unlikely(tracing_selftest_running || tracing_disabled))
606 return 0;
607
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500608 local_save_flags(irq_flags);
609 buffer = global_trace.trace_buffer.buffer;
610 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800611 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500612 if (!event)
613 return 0;
614
615 entry = ring_buffer_event_data(event);
616 entry->ip = ip;
617 entry->str = str;
618
619 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400620 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500621
622 return 1;
623}
624EXPORT_SYMBOL_GPL(__trace_bputs);
625
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500626#ifdef CONFIG_TRACER_SNAPSHOT
627/**
628 * trace_snapshot - take a snapshot of the current buffer.
629 *
630 * This causes a swap between the snapshot buffer and the current live
631 * tracing buffer. You can use this to take snapshots of the live
632 * trace when some condition is triggered, but continue to trace.
633 *
634 * Note, make sure to allocate the snapshot with either
635 * a tracing_snapshot_alloc(), or by doing it manually
636 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
637 *
638 * If the snapshot buffer is not allocated, it will stop tracing.
639 * Basically making a permanent snapshot.
640 */
641void tracing_snapshot(void)
642{
643 struct trace_array *tr = &global_trace;
644 struct tracer *tracer = tr->current_trace;
645 unsigned long flags;
646
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500647 if (in_nmi()) {
648 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
649 internal_trace_puts("*** snapshot is being ignored ***\n");
650 return;
651 }
652
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500653 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500654 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
655 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500656 tracing_off();
657 return;
658 }
659
660 /* Note, snapshot can not be used when the tracer uses it */
661 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500662 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
663 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 return;
665 }
666
667 local_irq_save(flags);
668 update_max_tr(tr, current, smp_processor_id());
669 local_irq_restore(flags);
670}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500671EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500672
673static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
674 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400675static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
676
677static int alloc_snapshot(struct trace_array *tr)
678{
679 int ret;
680
681 if (!tr->allocated_snapshot) {
682
683 /* allocate spare buffer */
684 ret = resize_buffer_duplicate_size(&tr->max_buffer,
685 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
686 if (ret < 0)
687 return ret;
688
689 tr->allocated_snapshot = true;
690 }
691
692 return 0;
693}
694
Fabian Frederickad1438a2014-04-17 21:44:42 +0200695static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400696{
697 /*
698 * We don't free the ring buffer. instead, resize it because
699 * The max_tr ring buffer has some state (e.g. ring->clock) and
700 * we want preserve it.
701 */
702 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
703 set_buffer_entries(&tr->max_buffer, 1);
704 tracing_reset_online_cpus(&tr->max_buffer);
705 tr->allocated_snapshot = false;
706}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500707
708/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500709 * tracing_alloc_snapshot - allocate snapshot buffer.
710 *
711 * This only allocates the snapshot buffer if it isn't already
712 * allocated - it doesn't also take a snapshot.
713 *
714 * This is meant to be used in cases where the snapshot buffer needs
715 * to be set up for events that can't sleep but need to be able to
716 * trigger a snapshot.
717 */
718int tracing_alloc_snapshot(void)
719{
720 struct trace_array *tr = &global_trace;
721 int ret;
722
723 ret = alloc_snapshot(tr);
724 WARN_ON(ret < 0);
725
726 return ret;
727}
728EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
729
730/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500731 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
732 *
733 * This is similar to trace_snapshot(), but it will allocate the
734 * snapshot buffer if it isn't already allocated. Use this only
735 * where it is safe to sleep, as the allocation may sleep.
736 *
737 * This causes a swap between the snapshot buffer and the current live
738 * tracing buffer. You can use this to take snapshots of the live
739 * trace when some condition is triggered, but continue to trace.
740 */
741void tracing_snapshot_alloc(void)
742{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500743 int ret;
744
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500745 ret = tracing_alloc_snapshot();
746 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400747 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500748
749 tracing_snapshot();
750}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500751EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500752#else
753void tracing_snapshot(void)
754{
755 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
756}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500757EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500758int tracing_alloc_snapshot(void)
759{
760 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
761 return -ENODEV;
762}
763EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500764void tracing_snapshot_alloc(void)
765{
766 /* Give warning */
767 tracing_snapshot();
768}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500769EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500770#endif /* CONFIG_TRACER_SNAPSHOT */
771
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400772static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400773{
774 if (tr->trace_buffer.buffer)
775 ring_buffer_record_off(tr->trace_buffer.buffer);
776 /*
777 * This flag is looked at when buffers haven't been allocated
778 * yet, or by some tracers (like irqsoff), that just want to
779 * know if the ring buffer has been disabled, but it can handle
780 * races of where it gets disabled but we still do a record.
781 * As the check is in the fast path of the tracers, it is more
782 * important to be fast than accurate.
783 */
784 tr->buffer_disabled = 1;
785 /* Make the flag seen by readers */
786 smp_wmb();
787}
788
Steven Rostedt499e5472012-02-22 15:50:28 -0500789/**
790 * tracing_off - turn off tracing buffers
791 *
792 * This function stops the tracing buffers from recording data.
793 * It does not disable any overhead the tracers themselves may
794 * be causing. This function simply causes all recording to
795 * the ring buffers to fail.
796 */
797void tracing_off(void)
798{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400799 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500800}
801EXPORT_SYMBOL_GPL(tracing_off);
802
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400803void disable_trace_on_warning(void)
804{
805 if (__disable_trace_on_warning)
806 tracing_off();
807}
808
Steven Rostedt499e5472012-02-22 15:50:28 -0500809/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400810 * tracer_tracing_is_on - show real state of ring buffer enabled
811 * @tr : the trace array to know if ring buffer is enabled
812 *
813 * Shows real state of the ring buffer if it is enabled or not.
814 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400815static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400816{
817 if (tr->trace_buffer.buffer)
818 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
819 return !tr->buffer_disabled;
820}
821
Steven Rostedt499e5472012-02-22 15:50:28 -0500822/**
823 * tracing_is_on - show state of ring buffers enabled
824 */
825int tracing_is_on(void)
826{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400827 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500828}
829EXPORT_SYMBOL_GPL(tracing_is_on);
830
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400831static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200832{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400833 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200834
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200835 if (!str)
836 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800837 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200838 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800839 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200840 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400841 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200842 return 1;
843}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400844__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200845
Tim Bird0e950172010-02-25 15:36:43 -0800846static int __init set_tracing_thresh(char *str)
847{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800848 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800849 int ret;
850
851 if (!str)
852 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200853 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800854 if (ret < 0)
855 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800856 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800857 return 1;
858}
859__setup("tracing_thresh=", set_tracing_thresh);
860
Steven Rostedt57f50be2008-05-12 21:20:44 +0200861unsigned long nsecs_to_usecs(unsigned long nsecs)
862{
863 return nsecs / 1000;
864}
865
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -0400866/*
867 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
868 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
869 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
870 * of strings in the order that the enums were defined.
871 */
872#undef C
873#define C(a, b) b
874
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200875/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200876static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -0400877 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200878 NULL
879};
880
Zhaolei5079f322009-08-25 16:12:56 +0800881static struct {
882 u64 (*func)(void);
883 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800884 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800885} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000886 { trace_clock_local, "local", 1 },
887 { trace_clock_global, "global", 1 },
888 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700889 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000890 { trace_clock, "perf", 1 },
891 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -0700892 { ktime_get_raw_fast_ns, "mono_raw", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800893 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800894};
895
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200896/*
897 * trace_parser_get_init - gets the buffer for trace parser
898 */
899int trace_parser_get_init(struct trace_parser *parser, int size)
900{
901 memset(parser, 0, sizeof(*parser));
902
903 parser->buffer = kmalloc(size, GFP_KERNEL);
904 if (!parser->buffer)
905 return 1;
906
907 parser->size = size;
908 return 0;
909}
910
911/*
912 * trace_parser_put - frees the buffer for trace parser
913 */
914void trace_parser_put(struct trace_parser *parser)
915{
916 kfree(parser->buffer);
917}
918
919/*
920 * trace_get_user - reads the user input string separated by space
921 * (matched by isspace(ch))
922 *
923 * For each string found the 'struct trace_parser' is updated,
924 * and the function returns.
925 *
926 * Returns number of bytes read.
927 *
928 * See kernel/trace/trace.h for 'struct trace_parser' details.
929 */
930int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
931 size_t cnt, loff_t *ppos)
932{
933 char ch;
934 size_t read = 0;
935 ssize_t ret;
936
937 if (!*ppos)
938 trace_parser_clear(parser);
939
940 ret = get_user(ch, ubuf++);
941 if (ret)
942 goto out;
943
944 read++;
945 cnt--;
946
947 /*
948 * The parser is not finished with the last write,
949 * continue reading the user input without skipping spaces.
950 */
951 if (!parser->cont) {
952 /* skip white space */
953 while (cnt && isspace(ch)) {
954 ret = get_user(ch, ubuf++);
955 if (ret)
956 goto out;
957 read++;
958 cnt--;
959 }
960
961 /* only spaces were written */
962 if (isspace(ch)) {
963 *ppos += read;
964 ret = read;
965 goto out;
966 }
967
968 parser->idx = 0;
969 }
970
971 /* read the non-space input */
972 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800973 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200974 parser->buffer[parser->idx++] = ch;
975 else {
976 ret = -EINVAL;
977 goto out;
978 }
979 ret = get_user(ch, ubuf++);
980 if (ret)
981 goto out;
982 read++;
983 cnt--;
984 }
985
986 /* We either got finished input or we have to wait for another call. */
987 if (isspace(ch)) {
988 parser->buffer[parser->idx] = 0;
989 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400990 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200991 parser->cont = true;
992 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400993 } else {
994 ret = -EINVAL;
995 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200996 }
997
998 *ppos += read;
999 ret = read;
1000
1001out:
1002 return ret;
1003}
1004
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001005/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001006static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001007{
1008 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001009
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001010 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001011 return -EBUSY;
1012
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001013 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001014 if (cnt > len)
1015 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001016 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001017
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001018 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001019 return cnt;
1020}
1021
Tim Bird0e950172010-02-25 15:36:43 -08001022unsigned long __read_mostly tracing_thresh;
1023
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001024#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001025/*
1026 * Copy the new maximum trace into the separate maximum-trace
1027 * structure. (this way the maximum trace is permanently saved,
1028 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1029 */
1030static void
1031__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1032{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001033 struct trace_buffer *trace_buf = &tr->trace_buffer;
1034 struct trace_buffer *max_buf = &tr->max_buffer;
1035 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1036 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001037
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001038 max_buf->cpu = cpu;
1039 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001040
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001041 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001042 max_data->critical_start = data->critical_start;
1043 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001044
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001045 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001046 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001047 /*
1048 * If tsk == current, then use current_uid(), as that does not use
1049 * RCU. The irq tracer can be called out of RCU scope.
1050 */
1051 if (tsk == current)
1052 max_data->uid = current_uid();
1053 else
1054 max_data->uid = task_uid(tsk);
1055
Steven Rostedt8248ac02009-09-02 12:27:41 -04001056 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1057 max_data->policy = tsk->policy;
1058 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001059
1060 /* record this tasks comm */
1061 tracing_record_cmdline(tsk);
1062}
1063
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001064/**
1065 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1066 * @tr: tracer
1067 * @tsk: the task with the latency
1068 * @cpu: The cpu that initiated the trace.
1069 *
1070 * Flip the buffers between the @tr and the max_tr and record information
1071 * about which task was the cause of this latency.
1072 */
Ingo Molnare309b412008-05-12 21:20:51 +02001073void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001074update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1075{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001076 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001077
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001078 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001079 return;
1080
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001081 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001082
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001083 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001084 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001085 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001086 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001087 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001088
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001089 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001090
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001091 buf = tr->trace_buffer.buffer;
1092 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1093 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001094
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001095 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001096 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001097}
1098
1099/**
1100 * update_max_tr_single - only copy one trace over, and reset the rest
1101 * @tr - tracer
1102 * @tsk - task with the latency
1103 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001104 *
1105 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001106 */
Ingo Molnare309b412008-05-12 21:20:51 +02001107void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001108update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1109{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001110 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001111
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001112 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001113 return;
1114
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001115 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001116 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001117 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001118 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001119 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001120 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001121
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001122 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001123
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001124 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001125
Steven Rostedte8165db2009-09-03 19:13:05 -04001126 if (ret == -EBUSY) {
1127 /*
1128 * We failed to swap the buffer due to a commit taking
1129 * place on this CPU. We fail to record, but we reset
1130 * the max trace buffer (no one writes directly to it)
1131 * and flag that it failed.
1132 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001133 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001134 "Failed to swap buffers due to commit in progress\n");
1135 }
1136
Steven Rostedte8165db2009-09-03 19:13:05 -04001137 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001138
1139 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001140 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001141}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001142#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001143
Rabin Vincente30f53a2014-11-10 19:46:34 +01001144static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001145{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001146 /* Iterators are static, they should be filled or empty */
1147 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001148 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001149
Rabin Vincente30f53a2014-11-10 19:46:34 +01001150 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1151 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001152}
1153
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001154#ifdef CONFIG_FTRACE_STARTUP_TEST
1155static int run_tracer_selftest(struct tracer *type)
1156{
1157 struct trace_array *tr = &global_trace;
1158 struct tracer *saved_tracer = tr->current_trace;
1159 int ret;
1160
1161 if (!type->selftest || tracing_selftest_disabled)
1162 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001163
1164 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001165 * Run a selftest on this tracer.
1166 * Here we reset the trace buffer, and set the current
1167 * tracer to be this tracer. The tracer can then run some
1168 * internal tracing to verify that everything is in order.
1169 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001170 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001171 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001172
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001173 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001174
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001175#ifdef CONFIG_TRACER_MAX_TRACE
1176 if (type->use_max_tr) {
1177 /* If we expanded the buffers, make sure the max is expanded too */
1178 if (ring_buffer_expanded)
1179 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1180 RING_BUFFER_ALL_CPUS);
1181 tr->allocated_snapshot = true;
1182 }
1183#endif
1184
1185 /* the test is responsible for initializing and enabling */
1186 pr_info("Testing tracer %s: ", type->name);
1187 ret = type->selftest(type, tr);
1188 /* the test is responsible for resetting too */
1189 tr->current_trace = saved_tracer;
1190 if (ret) {
1191 printk(KERN_CONT "FAILED!\n");
1192 /* Add the warning after printing 'FAILED' */
1193 WARN_ON(1);
1194 return -1;
1195 }
1196 /* Only reset on passing, to avoid touching corrupted buffers */
1197 tracing_reset_online_cpus(&tr->trace_buffer);
1198
1199#ifdef CONFIG_TRACER_MAX_TRACE
1200 if (type->use_max_tr) {
1201 tr->allocated_snapshot = false;
1202
1203 /* Shrink the max buffer again */
1204 if (ring_buffer_expanded)
1205 ring_buffer_resize(tr->max_buffer.buffer, 1,
1206 RING_BUFFER_ALL_CPUS);
1207 }
1208#endif
1209
1210 printk(KERN_CONT "PASSED\n");
1211 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001212}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001213#else
1214static inline int run_tracer_selftest(struct tracer *type)
1215{
1216 return 0;
1217}
1218#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001219
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001220static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1221
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001222static void __init apply_trace_boot_options(void);
1223
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001224/**
1225 * register_tracer - register a tracer with the ftrace system.
1226 * @type - the plugin for the tracer
1227 *
1228 * Register a new plugin tracer.
1229 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001230int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001231{
1232 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001233 int ret = 0;
1234
1235 if (!type->name) {
1236 pr_info("Tracer must have a name\n");
1237 return -1;
1238 }
1239
Dan Carpenter24a461d2010-07-10 12:06:44 +02001240 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001241 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1242 return -1;
1243 }
1244
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001245 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001246
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001247 tracing_selftest_running = true;
1248
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001249 for (t = trace_types; t; t = t->next) {
1250 if (strcmp(type->name, t->name) == 0) {
1251 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001252 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001253 type->name);
1254 ret = -1;
1255 goto out;
1256 }
1257 }
1258
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001259 if (!type->set_flag)
1260 type->set_flag = &dummy_set_flag;
1261 if (!type->flags)
1262 type->flags = &dummy_tracer_flags;
1263 else
1264 if (!type->flags->opts)
1265 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001266
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001267 ret = run_tracer_selftest(type);
1268 if (ret < 0)
1269 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001270
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001271 type->next = trace_types;
1272 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001273 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001274
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001275 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001276 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001277 mutex_unlock(&trace_types_lock);
1278
Steven Rostedtdac74942009-02-05 01:13:38 -05001279 if (ret || !default_bootup_tracer)
1280 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001281
Li Zefanee6c2c12009-09-18 14:06:47 +08001282 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001283 goto out_unlock;
1284
1285 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1286 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001287 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001288 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001289
1290 apply_trace_boot_options();
1291
Steven Rostedtdac74942009-02-05 01:13:38 -05001292 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001293 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001294#ifdef CONFIG_FTRACE_STARTUP_TEST
1295 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1296 type->name);
1297#endif
1298
1299 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001300 return ret;
1301}
1302
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001303void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001304{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001305 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001306
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001307 if (!buffer)
1308 return;
1309
Steven Rostedtf6339032009-09-04 12:35:16 -04001310 ring_buffer_record_disable(buffer);
1311
1312 /* Make sure all commits have finished */
1313 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001314 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001315
1316 ring_buffer_record_enable(buffer);
1317}
1318
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001319void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001320{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001321 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001322 int cpu;
1323
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001324 if (!buffer)
1325 return;
1326
Steven Rostedt621968c2009-09-04 12:02:35 -04001327 ring_buffer_record_disable(buffer);
1328
1329 /* Make sure all commits have finished */
1330 synchronize_sched();
1331
Alexander Z Lam94571582013-08-02 18:36:16 -07001332 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001333
1334 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001335 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001336
1337 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001338}
1339
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001340/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001341void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001342{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001343 struct trace_array *tr;
1344
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001345 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001346 tracing_reset_online_cpus(&tr->trace_buffer);
1347#ifdef CONFIG_TRACER_MAX_TRACE
1348 tracing_reset_online_cpus(&tr->max_buffer);
1349#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001350 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001351}
1352
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001353#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001354#define NO_CMDLINE_MAP UINT_MAX
Jamie Gennis6019e592012-11-21 15:04:25 -08001355static unsigned saved_tgids[SAVED_CMDLINES];
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001356static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001357struct saved_cmdlines_buffer {
1358 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1359 unsigned *map_cmdline_to_pid;
1360 unsigned cmdline_num;
1361 int cmdline_idx;
1362 char *saved_cmdlines;
1363};
1364static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001365
Steven Rostedt25b0b442008-05-12 21:21:00 +02001366/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001367static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001368
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001369static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001370{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001371 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1372}
1373
1374static inline void set_cmdline(int idx, const char *cmdline)
1375{
1376 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1377}
1378
1379static int allocate_cmdlines_buffer(unsigned int val,
1380 struct saved_cmdlines_buffer *s)
1381{
1382 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1383 GFP_KERNEL);
1384 if (!s->map_cmdline_to_pid)
1385 return -ENOMEM;
1386
1387 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1388 if (!s->saved_cmdlines) {
1389 kfree(s->map_cmdline_to_pid);
1390 return -ENOMEM;
1391 }
1392
1393 s->cmdline_idx = 0;
1394 s->cmdline_num = val;
1395 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1396 sizeof(s->map_pid_to_cmdline));
1397 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1398 val * sizeof(*s->map_cmdline_to_pid));
1399
1400 return 0;
1401}
1402
1403static int trace_create_savedcmd(void)
1404{
1405 int ret;
1406
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001407 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001408 if (!savedcmd)
1409 return -ENOMEM;
1410
1411 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1412 if (ret < 0) {
1413 kfree(savedcmd);
1414 savedcmd = NULL;
1415 return -ENOMEM;
1416 }
1417
1418 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001419}
1420
Carsten Emdeb5130b12009-09-13 01:43:07 +02001421int is_tracing_stopped(void)
1422{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001423 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001424}
1425
Steven Rostedt0f048702008-11-05 16:05:44 -05001426/**
1427 * tracing_start - quick start of the tracer
1428 *
1429 * If tracing is enabled but was stopped by tracing_stop,
1430 * this will start the tracer back up.
1431 */
1432void tracing_start(void)
1433{
1434 struct ring_buffer *buffer;
1435 unsigned long flags;
1436
1437 if (tracing_disabled)
1438 return;
1439
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001440 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1441 if (--global_trace.stop_count) {
1442 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001443 /* Someone screwed up their debugging */
1444 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001445 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001446 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001447 goto out;
1448 }
1449
Steven Rostedta2f80712010-03-12 19:56:00 -05001450 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001451 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001452
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001453 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001454 if (buffer)
1455 ring_buffer_record_enable(buffer);
1456
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001457#ifdef CONFIG_TRACER_MAX_TRACE
1458 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001459 if (buffer)
1460 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001461#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001462
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001463 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001464
Steven Rostedt0f048702008-11-05 16:05:44 -05001465 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001466 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1467}
1468
1469static void tracing_start_tr(struct trace_array *tr)
1470{
1471 struct ring_buffer *buffer;
1472 unsigned long flags;
1473
1474 if (tracing_disabled)
1475 return;
1476
1477 /* If global, we need to also start the max tracer */
1478 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1479 return tracing_start();
1480
1481 raw_spin_lock_irqsave(&tr->start_lock, flags);
1482
1483 if (--tr->stop_count) {
1484 if (tr->stop_count < 0) {
1485 /* Someone screwed up their debugging */
1486 WARN_ON_ONCE(1);
1487 tr->stop_count = 0;
1488 }
1489 goto out;
1490 }
1491
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001492 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001493 if (buffer)
1494 ring_buffer_record_enable(buffer);
1495
1496 out:
1497 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001498}
1499
1500/**
1501 * tracing_stop - quick stop of the tracer
1502 *
1503 * Light weight way to stop tracing. Use in conjunction with
1504 * tracing_start.
1505 */
1506void tracing_stop(void)
1507{
1508 struct ring_buffer *buffer;
1509 unsigned long flags;
1510
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001511 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1512 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001513 goto out;
1514
Steven Rostedta2f80712010-03-12 19:56:00 -05001515 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001516 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001517
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001518 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001519 if (buffer)
1520 ring_buffer_record_disable(buffer);
1521
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001522#ifdef CONFIG_TRACER_MAX_TRACE
1523 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001524 if (buffer)
1525 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001526#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001527
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001528 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001529
Steven Rostedt0f048702008-11-05 16:05:44 -05001530 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001531 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1532}
1533
1534static void tracing_stop_tr(struct trace_array *tr)
1535{
1536 struct ring_buffer *buffer;
1537 unsigned long flags;
1538
1539 /* If global, we need to also stop the max tracer */
1540 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1541 return tracing_stop();
1542
1543 raw_spin_lock_irqsave(&tr->start_lock, flags);
1544 if (tr->stop_count++)
1545 goto out;
1546
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001547 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001548 if (buffer)
1549 ring_buffer_record_disable(buffer);
1550
1551 out:
1552 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001553}
1554
Ingo Molnare309b412008-05-12 21:20:51 +02001555void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001556
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001557static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001558{
Carsten Emdea635cf02009-03-18 09:00:41 +01001559 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001560
1561 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001562 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001563
1564 /*
1565 * It's not the end of the world if we don't get
1566 * the lock, but we also don't want to spin
1567 * nor do we want to disable interrupts,
1568 * so if we miss here, then better luck next time.
1569 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001570 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001571 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001572
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001573 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001574 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001575 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576
Carsten Emdea635cf02009-03-18 09:00:41 +01001577 /*
1578 * Check whether the cmdline buffer at idx has a pid
1579 * mapped. We are going to overwrite that entry so we
1580 * need to clear the map_pid_to_cmdline. Otherwise we
1581 * would read the new comm for the old pid.
1582 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001583 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001584 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001585 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001586
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001587 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1588 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001589
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001590 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591 }
1592
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001593 set_cmdline(idx, tsk->comm);
Jamie Gennis6019e592012-11-21 15:04:25 -08001594 saved_tgids[idx] = tsk->tgid;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001595 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001596
1597 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001598}
1599
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001600static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001601{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001602 unsigned map;
1603
Steven Rostedt4ca53082009-03-16 19:20:15 -04001604 if (!pid) {
1605 strcpy(comm, "<idle>");
1606 return;
1607 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001608
Steven Rostedt74bf4072010-01-25 15:11:53 -05001609 if (WARN_ON_ONCE(pid < 0)) {
1610 strcpy(comm, "<XXX>");
1611 return;
1612 }
1613
Steven Rostedt4ca53082009-03-16 19:20:15 -04001614 if (pid > PID_MAX_DEFAULT) {
1615 strcpy(comm, "<...>");
1616 return;
1617 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001618
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001619 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001620 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001621 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001622 else
1623 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001624}
1625
1626void trace_find_cmdline(int pid, char comm[])
1627{
1628 preempt_disable();
1629 arch_spin_lock(&trace_cmdline_lock);
1630
1631 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001632
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001633 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001634 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001635}
1636
Jamie Gennis6019e592012-11-21 15:04:25 -08001637int trace_find_tgid(int pid)
1638{
1639 unsigned map;
1640 int tgid;
1641
1642 preempt_disable();
1643 arch_spin_lock(&trace_cmdline_lock);
1644 map = map_pid_to_cmdline[pid];
1645 if (map != NO_CMDLINE_MAP)
1646 tgid = saved_tgids[map];
1647 else
1648 tgid = -1;
1649
1650 arch_spin_unlock(&trace_cmdline_lock);
1651 preempt_enable();
1652
1653 return tgid;
1654}
1655
Ingo Molnare309b412008-05-12 21:20:51 +02001656void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001657{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001658 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001659 return;
1660
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001661 if (!__this_cpu_read(trace_cmdline_save))
1662 return;
1663
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001664 if (trace_save_cmdline(tsk))
1665 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001666}
1667
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001668void
Steven Rostedt38697052008-10-01 13:14:09 -04001669tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1670 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001671{
1672 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001673
Steven Rostedt777e2082008-09-29 23:02:42 -04001674 entry->preempt_count = pc & 0xff;
1675 entry->pid = (tsk) ? tsk->pid : 0;
1676 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001677#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001678 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001679#else
1680 TRACE_FLAG_IRQS_NOSUPPORT |
1681#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001682 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1683 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001684 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1685 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001686}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001687EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001688
Steven Rostedte77405a2009-09-02 14:17:06 -04001689struct ring_buffer_event *
1690trace_buffer_lock_reserve(struct ring_buffer *buffer,
1691 int type,
1692 unsigned long len,
1693 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001694{
1695 struct ring_buffer_event *event;
1696
Steven Rostedte77405a2009-09-02 14:17:06 -04001697 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001698 if (event != NULL) {
1699 struct trace_entry *ent = ring_buffer_event_data(event);
1700
1701 tracing_generic_entry_update(ent, flags, pc);
1702 ent->type = type;
1703 }
1704
1705 return event;
1706}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001707
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001708void
1709__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1710{
1711 __this_cpu_write(trace_cmdline_save, true);
1712 ring_buffer_unlock_commit(buffer, event);
1713}
1714
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04001715void trace_buffer_unlock_commit(struct trace_array *tr,
1716 struct ring_buffer *buffer,
1717 struct ring_buffer_event *event,
1718 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001719{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001720 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001721
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001722 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedte77405a2009-09-02 14:17:06 -04001723 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001724}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001725EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001726
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001727static struct ring_buffer *temp_buffer;
1728
Steven Rostedtef5580d2009-02-27 19:38:04 -05001729struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001730trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001731 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001732 int type, unsigned long len,
1733 unsigned long flags, int pc)
1734{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001735 struct ring_buffer_event *entry;
1736
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001737 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001738 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001739 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001740 /*
1741 * If tracing is off, but we have triggers enabled
1742 * we still need to look at the event data. Use the temp_buffer
1743 * to store the trace event for the tigger to use. It's recusive
1744 * safe and will not be recorded anywhere.
1745 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001746 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001747 *current_rb = temp_buffer;
1748 entry = trace_buffer_lock_reserve(*current_rb,
1749 type, len, flags, pc);
1750 }
1751 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001752}
1753EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1754
1755struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001756trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1757 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001758 unsigned long flags, int pc)
1759{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001760 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001761 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001762 type, len, flags, pc);
1763}
Steven Rostedt94487d62009-05-05 19:22:53 -04001764EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001765
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04001766void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1767 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001768 struct ring_buffer_event *event,
1769 unsigned long flags, int pc,
1770 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001771{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001772 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001773
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001774 ftrace_trace_stack(tr, buffer, flags, 6, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001775 ftrace_trace_userstack(buffer, flags, pc);
1776}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001777EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001778
Steven Rostedte77405a2009-09-02 14:17:06 -04001779void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1780 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001781{
Steven Rostedte77405a2009-09-02 14:17:06 -04001782 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001783}
Steven Rostedt12acd472009-04-17 16:01:56 -04001784EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001785
Ingo Molnare309b412008-05-12 21:20:51 +02001786void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001787trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001788 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1789 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001790{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001791 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001792 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001793 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001794 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001795
Steven Rostedte77405a2009-09-02 14:17:06 -04001796 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001797 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001798 if (!event)
1799 return;
1800 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001801 entry->ip = ip;
1802 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001803
Tom Zanussif306cc82013-10-24 08:34:17 -05001804 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001805 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001806}
1807
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001808#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001809
1810#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1811struct ftrace_stack {
1812 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1813};
1814
1815static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1816static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1817
Steven Rostedte77405a2009-09-02 14:17:06 -04001818static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001819 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001820 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001821{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001822 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001823 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001824 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001825 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001826 int use_stack;
1827 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001828
1829 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001830 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001831
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001832 /*
1833 * Since events can happen in NMIs there's no safe way to
1834 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1835 * or NMI comes in, it will just have to use the default
1836 * FTRACE_STACK_SIZE.
1837 */
1838 preempt_disable_notrace();
1839
Shan Wei82146522012-11-19 13:21:01 +08001840 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001841 /*
1842 * We don't need any atomic variables, just a barrier.
1843 * If an interrupt comes in, we don't care, because it would
1844 * have exited and put the counter back to what we want.
1845 * We just need a barrier to keep gcc from moving things
1846 * around.
1847 */
1848 barrier();
1849 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001850 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001851 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1852
1853 if (regs)
1854 save_stack_trace_regs(regs, &trace);
1855 else
1856 save_stack_trace(&trace);
1857
1858 if (trace.nr_entries > size)
1859 size = trace.nr_entries;
1860 } else
1861 /* From now on, use_stack is a boolean */
1862 use_stack = 0;
1863
1864 size *= sizeof(unsigned long);
1865
1866 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1867 sizeof(*entry) + size, flags, pc);
1868 if (!event)
1869 goto out;
1870 entry = ring_buffer_event_data(event);
1871
1872 memset(&entry->caller, 0, size);
1873
1874 if (use_stack)
1875 memcpy(&entry->caller, trace.entries,
1876 trace.nr_entries * sizeof(unsigned long));
1877 else {
1878 trace.max_entries = FTRACE_STACK_ENTRIES;
1879 trace.entries = entry->caller;
1880 if (regs)
1881 save_stack_trace_regs(regs, &trace);
1882 else
1883 save_stack_trace(&trace);
1884 }
1885
1886 entry->size = trace.nr_entries;
1887
Tom Zanussif306cc82013-10-24 08:34:17 -05001888 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001889 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001890
1891 out:
1892 /* Again, don't let gcc optimize things here */
1893 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001894 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001895 preempt_enable_notrace();
1896
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001897}
1898
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001899static inline void ftrace_trace_stack(struct trace_array *tr,
1900 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001901 unsigned long flags,
1902 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05001903{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001904 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05001905 return;
1906
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001907 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05001908}
1909
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001910void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1911 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001912{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001913 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001914}
1915
Steven Rostedt03889382009-12-11 09:48:22 -05001916/**
1917 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001918 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001919 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001920void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001921{
1922 unsigned long flags;
1923
1924 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001925 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001926
1927 local_save_flags(flags);
1928
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001929 /*
1930 * Skip 3 more, seems to get us at the caller of
1931 * this function.
1932 */
1933 skip += 3;
1934 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1935 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001936}
1937
Steven Rostedt91e86e52010-11-10 12:56:12 +01001938static DEFINE_PER_CPU(int, user_stack_count);
1939
Steven Rostedte77405a2009-09-02 14:17:06 -04001940void
1941ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001942{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001943 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001944 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001945 struct userstack_entry *entry;
1946 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001947
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001948 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02001949 return;
1950
Steven Rostedtb6345872010-03-12 20:03:30 -05001951 /*
1952 * NMIs can not handle page faults, even with fix ups.
1953 * The save user stack can (and often does) fault.
1954 */
1955 if (unlikely(in_nmi()))
1956 return;
1957
Steven Rostedt91e86e52010-11-10 12:56:12 +01001958 /*
1959 * prevent recursion, since the user stack tracing may
1960 * trigger other kernel events.
1961 */
1962 preempt_disable();
1963 if (__this_cpu_read(user_stack_count))
1964 goto out;
1965
1966 __this_cpu_inc(user_stack_count);
1967
Steven Rostedte77405a2009-09-02 14:17:06 -04001968 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001969 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001970 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001971 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001972 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001973
Steven Rostedt48659d32009-09-11 11:36:23 -04001974 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001975 memset(&entry->caller, 0, sizeof(entry->caller));
1976
1977 trace.nr_entries = 0;
1978 trace.max_entries = FTRACE_STACK_ENTRIES;
1979 trace.skip = 0;
1980 trace.entries = entry->caller;
1981
1982 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001983 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001984 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001985
Li Zefan1dbd1952010-12-09 15:47:56 +08001986 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001987 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001988 out:
1989 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001990}
1991
Hannes Eder4fd27352009-02-10 19:44:12 +01001992#ifdef UNUSED
1993static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001994{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001995 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001996}
Hannes Eder4fd27352009-02-10 19:44:12 +01001997#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001998
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001999#endif /* CONFIG_STACKTRACE */
2000
Steven Rostedt07d777f2011-09-22 14:01:55 -04002001/* created for use with alloc_percpu */
2002struct trace_buffer_struct {
2003 char buffer[TRACE_BUF_SIZE];
2004};
2005
2006static struct trace_buffer_struct *trace_percpu_buffer;
2007static struct trace_buffer_struct *trace_percpu_sirq_buffer;
2008static struct trace_buffer_struct *trace_percpu_irq_buffer;
2009static struct trace_buffer_struct *trace_percpu_nmi_buffer;
2010
2011/*
2012 * The buffer used is dependent on the context. There is a per cpu
2013 * buffer for normal context, softirq contex, hard irq context and
2014 * for NMI context. Thise allows for lockless recording.
2015 *
2016 * Note, if the buffers failed to be allocated, then this returns NULL
2017 */
2018static char *get_trace_buf(void)
2019{
2020 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002021
2022 /*
2023 * If we have allocated per cpu buffers, then we do not
2024 * need to do any locking.
2025 */
2026 if (in_nmi())
2027 percpu_buffer = trace_percpu_nmi_buffer;
2028 else if (in_irq())
2029 percpu_buffer = trace_percpu_irq_buffer;
2030 else if (in_softirq())
2031 percpu_buffer = trace_percpu_sirq_buffer;
2032 else
2033 percpu_buffer = trace_percpu_buffer;
2034
2035 if (!percpu_buffer)
2036 return NULL;
2037
Shan Weid8a03492012-11-13 09:53:04 +08002038 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002039}
2040
2041static int alloc_percpu_trace_buffer(void)
2042{
2043 struct trace_buffer_struct *buffers;
2044 struct trace_buffer_struct *sirq_buffers;
2045 struct trace_buffer_struct *irq_buffers;
2046 struct trace_buffer_struct *nmi_buffers;
2047
2048 buffers = alloc_percpu(struct trace_buffer_struct);
2049 if (!buffers)
2050 goto err_warn;
2051
2052 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2053 if (!sirq_buffers)
2054 goto err_sirq;
2055
2056 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2057 if (!irq_buffers)
2058 goto err_irq;
2059
2060 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2061 if (!nmi_buffers)
2062 goto err_nmi;
2063
2064 trace_percpu_buffer = buffers;
2065 trace_percpu_sirq_buffer = sirq_buffers;
2066 trace_percpu_irq_buffer = irq_buffers;
2067 trace_percpu_nmi_buffer = nmi_buffers;
2068
2069 return 0;
2070
2071 err_nmi:
2072 free_percpu(irq_buffers);
2073 err_irq:
2074 free_percpu(sirq_buffers);
2075 err_sirq:
2076 free_percpu(buffers);
2077 err_warn:
2078 WARN(1, "Could not allocate percpu trace_printk buffer");
2079 return -ENOMEM;
2080}
2081
Steven Rostedt81698832012-10-11 10:15:05 -04002082static int buffers_allocated;
2083
Steven Rostedt07d777f2011-09-22 14:01:55 -04002084void trace_printk_init_buffers(void)
2085{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002086 if (buffers_allocated)
2087 return;
2088
2089 if (alloc_percpu_trace_buffer())
2090 return;
2091
Steven Rostedt2184db42014-05-28 13:14:40 -04002092 /* trace_printk() is for debug use only. Don't use it in production. */
2093
Borislav Petkov69a1c992015-01-27 17:17:20 +01002094 pr_warning("\n");
2095 pr_warning("**********************************************************\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002096 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2097 pr_warning("** **\n");
2098 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2099 pr_warning("** **\n");
2100 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
Frans Klavereff264e2014-11-07 15:53:44 +01002101 pr_warning("** unsafe for production use. **\n");
Steven Rostedt2184db42014-05-28 13:14:40 -04002102 pr_warning("** **\n");
2103 pr_warning("** If you see this message and you are not debugging **\n");
2104 pr_warning("** the kernel, report this immediately to your vendor! **\n");
2105 pr_warning("** **\n");
2106 pr_warning("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2107 pr_warning("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002108
Steven Rostedtb382ede62012-10-10 21:44:34 -04002109 /* Expand the buffers to set size */
2110 tracing_update_buffers();
2111
Steven Rostedt07d777f2011-09-22 14:01:55 -04002112 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002113
2114 /*
2115 * trace_printk_init_buffers() can be called by modules.
2116 * If that happens, then we need to start cmdline recording
2117 * directly here. If the global_trace.buffer is already
2118 * allocated here, then this was called by module code.
2119 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002120 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002121 tracing_start_cmdline_record();
2122}
2123
2124void trace_printk_start_comm(void)
2125{
2126 /* Start tracing comms if trace printk is set */
2127 if (!buffers_allocated)
2128 return;
2129 tracing_start_cmdline_record();
2130}
2131
2132static void trace_printk_start_stop_comm(int enabled)
2133{
2134 if (!buffers_allocated)
2135 return;
2136
2137 if (enabled)
2138 tracing_start_cmdline_record();
2139 else
2140 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002141}
2142
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002143/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002144 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002145 *
2146 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002147int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002148{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002149 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002150 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002151 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002152 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002153 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002154 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002155 char *tbuffer;
2156 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002157
2158 if (unlikely(tracing_selftest_running || tracing_disabled))
2159 return 0;
2160
2161 /* Don't pollute graph traces with trace_vprintk internals */
2162 pause_graph_tracing();
2163
2164 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002165 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002166
Steven Rostedt07d777f2011-09-22 14:01:55 -04002167 tbuffer = get_trace_buf();
2168 if (!tbuffer) {
2169 len = 0;
2170 goto out;
2171 }
2172
2173 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2174
2175 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002176 goto out;
2177
Steven Rostedt07d777f2011-09-22 14:01:55 -04002178 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002179 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002180 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002181 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2182 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002183 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002184 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002185 entry = ring_buffer_event_data(event);
2186 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002187 entry->fmt = fmt;
2188
Steven Rostedt07d777f2011-09-22 14:01:55 -04002189 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002190 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002191 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002192 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002193 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002194
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002195out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002196 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002197 unpause_graph_tracing();
2198
2199 return len;
2200}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002201EXPORT_SYMBOL_GPL(trace_vbprintk);
2202
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002203static int
2204__trace_array_vprintk(struct ring_buffer *buffer,
2205 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002206{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002207 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002208 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002209 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002210 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002211 unsigned long flags;
2212 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002213
2214 if (tracing_disabled || tracing_selftest_running)
2215 return 0;
2216
Steven Rostedt07d777f2011-09-22 14:01:55 -04002217 /* Don't pollute graph traces with trace_vprintk internals */
2218 pause_graph_tracing();
2219
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002220 pc = preempt_count();
2221 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002222
Steven Rostedt07d777f2011-09-22 14:01:55 -04002223
2224 tbuffer = get_trace_buf();
2225 if (!tbuffer) {
2226 len = 0;
2227 goto out;
2228 }
2229
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002230 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002231
Steven Rostedt07d777f2011-09-22 14:01:55 -04002232 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002233 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002234 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002235 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002236 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002237 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002238 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002239 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002240
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002241 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002242 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002243 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002244 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002245 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002246 out:
2247 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002248 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002249
2250 return len;
2251}
Steven Rostedt659372d2009-09-03 19:11:07 -04002252
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002253int trace_array_vprintk(struct trace_array *tr,
2254 unsigned long ip, const char *fmt, va_list args)
2255{
2256 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2257}
2258
2259int trace_array_printk(struct trace_array *tr,
2260 unsigned long ip, const char *fmt, ...)
2261{
2262 int ret;
2263 va_list ap;
2264
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002265 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002266 return 0;
2267
2268 va_start(ap, fmt);
2269 ret = trace_array_vprintk(tr, ip, fmt, ap);
2270 va_end(ap);
2271 return ret;
2272}
2273
2274int trace_array_printk_buf(struct ring_buffer *buffer,
2275 unsigned long ip, const char *fmt, ...)
2276{
2277 int ret;
2278 va_list ap;
2279
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002280 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002281 return 0;
2282
2283 va_start(ap, fmt);
2284 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2285 va_end(ap);
2286 return ret;
2287}
2288
Steven Rostedt659372d2009-09-03 19:11:07 -04002289int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2290{
Steven Rostedta813a152009-10-09 01:41:35 -04002291 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002292}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002293EXPORT_SYMBOL_GPL(trace_vprintk);
2294
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002295static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002296{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002297 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2298
Steven Rostedt5a90f572008-09-03 17:42:51 -04002299 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002300 if (buf_iter)
2301 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002302}
2303
Ingo Molnare309b412008-05-12 21:20:51 +02002304static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002305peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2306 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002307{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002308 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002309 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002310
Steven Rostedtd7690412008-10-01 00:29:53 -04002311 if (buf_iter)
2312 event = ring_buffer_iter_peek(buf_iter, ts);
2313 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002314 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002315 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002316
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002317 if (event) {
2318 iter->ent_size = ring_buffer_event_length(event);
2319 return ring_buffer_event_data(event);
2320 }
2321 iter->ent_size = 0;
2322 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002323}
Steven Rostedtd7690412008-10-01 00:29:53 -04002324
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002325static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002326__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2327 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002328{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002329 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002330 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002331 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002332 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002333 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002334 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002335 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002336 int cpu;
2337
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002338 /*
2339 * If we are in a per_cpu trace file, don't bother by iterating over
2340 * all cpu and peek directly.
2341 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002342 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002343 if (ring_buffer_empty_cpu(buffer, cpu_file))
2344 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002345 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002346 if (ent_cpu)
2347 *ent_cpu = cpu_file;
2348
2349 return ent;
2350 }
2351
Steven Rostedtab464282008-05-12 21:21:00 +02002352 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002353
2354 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002355 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002356
Steven Rostedtbc21b472010-03-31 19:49:26 -04002357 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002358
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002359 /*
2360 * Pick the entry with the smallest timestamp:
2361 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002362 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002363 next = ent;
2364 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002365 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002366 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002367 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002368 }
2369 }
2370
Steven Rostedt12b5da32012-03-27 10:43:28 -04002371 iter->ent_size = next_size;
2372
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002373 if (ent_cpu)
2374 *ent_cpu = next_cpu;
2375
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002376 if (ent_ts)
2377 *ent_ts = next_ts;
2378
Steven Rostedtbc21b472010-03-31 19:49:26 -04002379 if (missing_events)
2380 *missing_events = next_lost;
2381
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002382 return next;
2383}
2384
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002385/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002386struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2387 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002388{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002389 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002390}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002391
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002392/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002393void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002394{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002395 iter->ent = __find_next_entry(iter, &iter->cpu,
2396 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002397
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002398 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002399 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002400
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002401 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002402}
2403
Ingo Molnare309b412008-05-12 21:20:51 +02002404static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002405{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002406 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002407 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002408}
2409
Ingo Molnare309b412008-05-12 21:20:51 +02002410static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002411{
2412 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002413 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002414 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002415
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002416 WARN_ON_ONCE(iter->leftover);
2417
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002418 (*pos)++;
2419
2420 /* can't go backwards */
2421 if (iter->idx > i)
2422 return NULL;
2423
2424 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002425 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002426 else
2427 ent = iter;
2428
2429 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002430 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002431
2432 iter->pos = *pos;
2433
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002434 return ent;
2435}
2436
Jason Wessel955b61e2010-08-05 09:22:23 -05002437void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002438{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002439 struct ring_buffer_event *event;
2440 struct ring_buffer_iter *buf_iter;
2441 unsigned long entries = 0;
2442 u64 ts;
2443
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002444 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002445
Steven Rostedt6d158a82012-06-27 20:46:14 -04002446 buf_iter = trace_buffer_iter(iter, cpu);
2447 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002448 return;
2449
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002450 ring_buffer_iter_reset(buf_iter);
2451
2452 /*
2453 * We could have the case with the max latency tracers
2454 * that a reset never took place on a cpu. This is evident
2455 * by the timestamp being before the start of the buffer.
2456 */
2457 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002458 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002459 break;
2460 entries++;
2461 ring_buffer_read(buf_iter, NULL);
2462 }
2463
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002464 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002465}
2466
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002467/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002468 * The current tracer is copied to avoid a global locking
2469 * all around.
2470 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002471static void *s_start(struct seq_file *m, loff_t *pos)
2472{
2473 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002474 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002475 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002476 void *p = NULL;
2477 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002478 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002479
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002480 /*
2481 * copy the tracer to avoid using a global lock all around.
2482 * iter->trace is a copy of current_trace, the pointer to the
2483 * name may be used instead of a strcmp(), as iter->trace->name
2484 * will point to the same string as current_trace->name.
2485 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002486 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002487 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2488 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002489 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002490
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002491#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002492 if (iter->snapshot && iter->trace->use_max_tr)
2493 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002494#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002495
2496 if (!iter->snapshot)
2497 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002498
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002499 if (*pos != iter->pos) {
2500 iter->ent = NULL;
2501 iter->cpu = 0;
2502 iter->idx = -1;
2503
Steven Rostedtae3b5092013-01-23 15:22:59 -05002504 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002505 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002506 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002507 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002508 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002509
Lai Jiangshanac91d852010-03-02 17:54:50 +08002510 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002511 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2512 ;
2513
2514 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002515 /*
2516 * If we overflowed the seq_file before, then we want
2517 * to just reuse the trace_seq buffer again.
2518 */
2519 if (iter->leftover)
2520 p = iter;
2521 else {
2522 l = *pos - 1;
2523 p = s_next(m, p, &l);
2524 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002525 }
2526
Lai Jiangshan4f535962009-05-18 19:35:34 +08002527 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002528 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002529 return p;
2530}
2531
2532static void s_stop(struct seq_file *m, void *p)
2533{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002534 struct trace_iterator *iter = m->private;
2535
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002536#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002537 if (iter->snapshot && iter->trace->use_max_tr)
2538 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002539#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002540
2541 if (!iter->snapshot)
2542 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002543
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002544 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002545 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002546}
2547
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002548static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002549get_total_entries(struct trace_buffer *buf,
2550 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002551{
2552 unsigned long count;
2553 int cpu;
2554
2555 *total = 0;
2556 *entries = 0;
2557
2558 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002559 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002560 /*
2561 * If this buffer has skipped entries, then we hold all
2562 * entries for the trace and we need to ignore the
2563 * ones before the time stamp.
2564 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002565 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2566 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002567 /* total is the same as the entries */
2568 *total += count;
2569 } else
2570 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002571 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002572 *entries += count;
2573 }
2574}
2575
Ingo Molnare309b412008-05-12 21:20:51 +02002576static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002577{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002578 seq_puts(m, "# _------=> CPU# \n"
2579 "# / _-----=> irqs-off \n"
2580 "# | / _----=> need-resched \n"
2581 "# || / _---=> hardirq/softirq \n"
2582 "# ||| / _--=> preempt-depth \n"
2583 "# |||| / delay \n"
2584 "# cmd pid ||||| time | caller \n"
2585 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002586}
2587
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002588static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002589{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002590 unsigned long total;
2591 unsigned long entries;
2592
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002593 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002594 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2595 entries, total, num_online_cpus());
2596 seq_puts(m, "#\n");
2597}
2598
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002599static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002600{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002601 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002602 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2603 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002604}
2605
Jamie Gennis6019e592012-11-21 15:04:25 -08002606static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
2607{
2608 print_event_info(buf, m);
2609 seq_puts(m, "# TASK-PID TGID CPU# TIMESTAMP FUNCTION\n");
2610 seq_puts(m, "# | | | | | |\n");
2611}
2612
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002613static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002614{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002615 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002616 seq_puts(m, "# _-----=> irqs-off\n"
2617 "# / _----=> need-resched\n"
2618 "# | / _---=> hardirq/softirq\n"
2619 "# || / _--=> preempt-depth\n"
2620 "# ||| / delay\n"
2621 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2622 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002623}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002624
Jamie Gennis6019e592012-11-21 15:04:25 -08002625static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
2626{
2627 print_event_info(buf, m);
2628 seq_puts(m, "# _-----=> irqs-off\n");
2629 seq_puts(m, "# / _----=> need-resched\n");
2630 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2631 seq_puts(m, "# || / _--=> preempt-depth\n");
2632 seq_puts(m, "# ||| / delay\n");
2633 seq_puts(m, "# TASK-PID TGID CPU# |||| TIMESTAMP FUNCTION\n");
2634 seq_puts(m, "# | | | | |||| | |\n");
2635}
2636
Jiri Olsa62b915f2010-04-02 19:01:22 +02002637void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002638print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2639{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002640 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002641 struct trace_buffer *buf = iter->trace_buffer;
2642 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002643 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002644 unsigned long entries;
2645 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002646 const char *name = "preemption";
2647
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002648 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002649
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002650 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002651
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002652 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002653 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002654 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002655 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002656 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002657 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002658 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002659 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002660 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002661 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002662#if defined(CONFIG_PREEMPT_NONE)
2663 "server",
2664#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2665 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002666#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002667 "preempt",
2668#else
2669 "unknown",
2670#endif
2671 /* These are reserved for later use */
2672 0, 0, 0, 0);
2673#ifdef CONFIG_SMP
2674 seq_printf(m, " #P:%d)\n", num_online_cpus());
2675#else
2676 seq_puts(m, ")\n");
2677#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002678 seq_puts(m, "# -----------------\n");
2679 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002680 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002681 data->comm, data->pid,
2682 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002683 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002684 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002685
2686 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002687 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002688 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2689 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002690 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002691 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2692 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002693 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002694 }
2695
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002696 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002697}
2698
Steven Rostedta3097202008-11-07 22:36:02 -05002699static void test_cpu_buff_start(struct trace_iterator *iter)
2700{
2701 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002702 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05002703
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002704 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002705 return;
2706
2707 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2708 return;
2709
Sasha Levin919cd972015-09-04 12:45:56 -04002710 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002711 return;
2712
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002713 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002714 return;
2715
Sasha Levin919cd972015-09-04 12:45:56 -04002716 if (iter->started)
2717 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002718
2719 /* Don't print started cpu buffer for the first entry of the trace */
2720 if (iter->idx > 1)
2721 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2722 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002723}
2724
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002725static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002726{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002727 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02002728 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002729 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002730 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002731 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002732
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002733 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002734
Steven Rostedta3097202008-11-07 22:36:02 -05002735 test_cpu_buff_start(iter);
2736
Steven Rostedtf633cef2008-12-23 23:24:13 -05002737 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002738
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002739 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002740 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2741 trace_print_lat_context(iter);
2742 else
2743 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002744 }
2745
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002746 if (trace_seq_has_overflowed(s))
2747 return TRACE_TYPE_PARTIAL_LINE;
2748
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002749 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002750 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002751
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002752 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002753
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002754 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002755}
2756
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002757static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002758{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002759 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002760 struct trace_seq *s = &iter->seq;
2761 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002762 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002763
2764 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002765
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002766 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002767 trace_seq_printf(s, "%d %d %llu ",
2768 entry->pid, iter->cpu, iter->ts);
2769
2770 if (trace_seq_has_overflowed(s))
2771 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002772
Steven Rostedtf633cef2008-12-23 23:24:13 -05002773 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002774 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002775 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002776
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002777 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002778
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002779 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002780}
2781
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002782static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002783{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002784 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002785 struct trace_seq *s = &iter->seq;
2786 unsigned char newline = '\n';
2787 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002788 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002789
2790 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002791
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002792 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002793 SEQ_PUT_HEX_FIELD(s, entry->pid);
2794 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2795 SEQ_PUT_HEX_FIELD(s, iter->ts);
2796 if (trace_seq_has_overflowed(s))
2797 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002798 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002799
Steven Rostedtf633cef2008-12-23 23:24:13 -05002800 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002801 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002802 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002803 if (ret != TRACE_TYPE_HANDLED)
2804 return ret;
2805 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002806
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002807 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002808
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002809 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002810}
2811
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002812static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002813{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002814 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002815 struct trace_seq *s = &iter->seq;
2816 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002817 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002818
2819 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002820
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002821 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002822 SEQ_PUT_FIELD(s, entry->pid);
2823 SEQ_PUT_FIELD(s, iter->cpu);
2824 SEQ_PUT_FIELD(s, iter->ts);
2825 if (trace_seq_has_overflowed(s))
2826 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002827 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002828
Steven Rostedtf633cef2008-12-23 23:24:13 -05002829 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002830 return event ? event->funcs->binary(iter, 0, event) :
2831 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002832}
2833
Jiri Olsa62b915f2010-04-02 19:01:22 +02002834int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002835{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002836 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002837 int cpu;
2838
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002839 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002840 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002841 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002842 buf_iter = trace_buffer_iter(iter, cpu);
2843 if (buf_iter) {
2844 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002845 return 0;
2846 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002847 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002848 return 0;
2849 }
2850 return 1;
2851 }
2852
Steven Rostedtab464282008-05-12 21:21:00 +02002853 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002854 buf_iter = trace_buffer_iter(iter, cpu);
2855 if (buf_iter) {
2856 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002857 return 0;
2858 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002859 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002860 return 0;
2861 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002862 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002863
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002864 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002865}
2866
Lai Jiangshan4f535962009-05-18 19:35:34 +08002867/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002868enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002869{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002870 struct trace_array *tr = iter->tr;
2871 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002872 enum print_line_t ret;
2873
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002874 if (iter->lost_events) {
2875 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2876 iter->cpu, iter->lost_events);
2877 if (trace_seq_has_overflowed(&iter->seq))
2878 return TRACE_TYPE_PARTIAL_LINE;
2879 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002880
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002881 if (iter->trace && iter->trace->print_line) {
2882 ret = iter->trace->print_line(iter);
2883 if (ret != TRACE_TYPE_UNHANDLED)
2884 return ret;
2885 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002886
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002887 if (iter->ent->type == TRACE_BPUTS &&
2888 trace_flags & TRACE_ITER_PRINTK &&
2889 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2890 return trace_print_bputs_msg_only(iter);
2891
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002892 if (iter->ent->type == TRACE_BPRINT &&
2893 trace_flags & TRACE_ITER_PRINTK &&
2894 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002895 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002896
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002897 if (iter->ent->type == TRACE_PRINT &&
2898 trace_flags & TRACE_ITER_PRINTK &&
2899 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002900 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002901
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002902 if (trace_flags & TRACE_ITER_BIN)
2903 return print_bin_fmt(iter);
2904
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002905 if (trace_flags & TRACE_ITER_HEX)
2906 return print_hex_fmt(iter);
2907
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002908 if (trace_flags & TRACE_ITER_RAW)
2909 return print_raw_fmt(iter);
2910
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002911 return print_trace_fmt(iter);
2912}
2913
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002914void trace_latency_header(struct seq_file *m)
2915{
2916 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002917 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002918
2919 /* print nothing if the buffers are empty */
2920 if (trace_empty(iter))
2921 return;
2922
2923 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2924 print_trace_header(m, iter);
2925
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002926 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002927 print_lat_help_header(m);
2928}
2929
Jiri Olsa62b915f2010-04-02 19:01:22 +02002930void trace_default_header(struct seq_file *m)
2931{
2932 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002933 struct trace_array *tr = iter->tr;
2934 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02002935
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002936 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2937 return;
2938
Jiri Olsa62b915f2010-04-02 19:01:22 +02002939 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2940 /* print nothing if the buffers are empty */
2941 if (trace_empty(iter))
2942 return;
2943 print_trace_header(m, iter);
2944 if (!(trace_flags & TRACE_ITER_VERBOSE))
2945 print_lat_help_header(m);
2946 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002947 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2948 if (trace_flags & TRACE_ITER_IRQ_INFO)
Jamie Gennis6019e592012-11-21 15:04:25 -08002949 if (trace_flags & TRACE_ITER_TGID)
2950 print_func_help_header_irq_tgid(iter->trace_buffer, m);
2951 else
2952 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002953 else
Jamie Gennis6019e592012-11-21 15:04:25 -08002954 if (trace_flags & TRACE_ITER_TGID)
2955 print_func_help_header_tgid(iter->trace_buffer, m);
2956 else
2957 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002958 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002959 }
2960}
2961
Steven Rostedte0a413f2011-09-29 21:26:16 -04002962static void test_ftrace_alive(struct seq_file *m)
2963{
2964 if (!ftrace_is_dead())
2965 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002966 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2967 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002968}
2969
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002970#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002971static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002972{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002973 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2974 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2975 "# Takes a snapshot of the main buffer.\n"
2976 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2977 "# (Doesn't have to be '2' works with any number that\n"
2978 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002979}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002980
2981static void show_snapshot_percpu_help(struct seq_file *m)
2982{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002983 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002984#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002985 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2986 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002987#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002988 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2989 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002990#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002991 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2992 "# (Doesn't have to be '2' works with any number that\n"
2993 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002994}
2995
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002996static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2997{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002998 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002999 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003000 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003001 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003002
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003003 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003004 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3005 show_snapshot_main_help(m);
3006 else
3007 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003008}
3009#else
3010/* Should never be called */
3011static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3012#endif
3013
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003014static int s_show(struct seq_file *m, void *v)
3015{
3016 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003017 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003018
3019 if (iter->ent == NULL) {
3020 if (iter->tr) {
3021 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3022 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003023 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003024 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003025 if (iter->snapshot && trace_empty(iter))
3026 print_snapshot_help(m, iter);
3027 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003028 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003029 else
3030 trace_default_header(m);
3031
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003032 } else if (iter->leftover) {
3033 /*
3034 * If we filled the seq_file buffer earlier, we
3035 * want to just show it now.
3036 */
3037 ret = trace_print_seq(m, &iter->seq);
3038
3039 /* ret should this time be zero, but you never know */
3040 iter->leftover = ret;
3041
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003042 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003043 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003044 ret = trace_print_seq(m, &iter->seq);
3045 /*
3046 * If we overflow the seq_file buffer, then it will
3047 * ask us for this data again at start up.
3048 * Use that instead.
3049 * ret is 0 if seq_file write succeeded.
3050 * -1 otherwise.
3051 */
3052 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003053 }
3054
3055 return 0;
3056}
3057
Oleg Nesterov649e9c72013-07-23 17:25:54 +02003058/*
3059 * Should be used after trace_array_get(), trace_types_lock
3060 * ensures that i_cdev was already initialized.
3061 */
3062static inline int tracing_get_cpu(struct inode *inode)
3063{
3064 if (inode->i_cdev) /* See trace_create_cpu_file() */
3065 return (long)inode->i_cdev - 1;
3066 return RING_BUFFER_ALL_CPUS;
3067}
3068
James Morris88e9d342009-09-22 16:43:43 -07003069static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003070 .start = s_start,
3071 .next = s_next,
3072 .stop = s_stop,
3073 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003074};
3075
Ingo Molnare309b412008-05-12 21:20:51 +02003076static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003077__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003078{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003079 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003080 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003081 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003082
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003083 if (tracing_disabled)
3084 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003085
Jiri Olsa50e18b92012-04-25 10:23:39 +02003086 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003087 if (!iter)
3088 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003089
Gil Fruchter72917232015-06-09 10:32:35 +03003090 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003091 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003092 if (!iter->buffer_iter)
3093 goto release;
3094
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003095 /*
3096 * We make a copy of the current tracer to avoid concurrent
3097 * changes on it while we are reading.
3098 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003099 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003100 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003101 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003102 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003103
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003104 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003105
Li Zefan79f55992009-06-15 14:58:26 +08003106 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003107 goto fail;
3108
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003109 iter->tr = tr;
3110
3111#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003112 /* Currently only the top directory has a snapshot */
3113 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003114 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003115 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003116#endif
3117 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003118 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003119 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003120 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003121 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003122
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003123 /* Notify the tracer early; before we stop tracing. */
3124 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003125 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003126
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003127 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003128 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003129 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3130
David Sharp8be07092012-11-13 12:18:22 -08003131 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003132 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003133 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3134
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003135 /* stop the trace while dumping if we are not opening "snapshot" */
3136 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003137 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003138
Steven Rostedtae3b5092013-01-23 15:22:59 -05003139 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003140 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003141 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003142 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003143 }
3144 ring_buffer_read_prepare_sync();
3145 for_each_tracing_cpu(cpu) {
3146 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003147 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003148 }
3149 } else {
3150 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003151 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003152 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003153 ring_buffer_read_prepare_sync();
3154 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003155 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003156 }
3157
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003158 mutex_unlock(&trace_types_lock);
3159
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003160 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003161
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003162 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003163 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003164 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003165 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003166release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003167 seq_release_private(inode, file);
3168 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003169}
3170
3171int tracing_open_generic(struct inode *inode, struct file *filp)
3172{
Steven Rostedt60a11772008-05-12 21:20:44 +02003173 if (tracing_disabled)
3174 return -ENODEV;
3175
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003176 filp->private_data = inode->i_private;
3177 return 0;
3178}
3179
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003180bool tracing_is_disabled(void)
3181{
3182 return (tracing_disabled) ? true: false;
3183}
3184
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003185/*
3186 * Open and update trace_array ref count.
3187 * Must have the current trace_array passed to it.
3188 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003189static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003190{
3191 struct trace_array *tr = inode->i_private;
3192
3193 if (tracing_disabled)
3194 return -ENODEV;
3195
3196 if (trace_array_get(tr) < 0)
3197 return -ENODEV;
3198
3199 filp->private_data = inode->i_private;
3200
3201 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003202}
3203
Hannes Eder4fd27352009-02-10 19:44:12 +01003204static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003205{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003206 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003207 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003208 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003209 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003210
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003211 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003212 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003213 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003214 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003215
Oleg Nesterov6484c712013-07-23 17:26:10 +02003216 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003217 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003218 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003219
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003220 for_each_tracing_cpu(cpu) {
3221 if (iter->buffer_iter[cpu])
3222 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3223 }
3224
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003225 if (iter->trace && iter->trace->close)
3226 iter->trace->close(iter);
3227
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003228 if (!iter->snapshot)
3229 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003230 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003231
3232 __trace_array_put(tr);
3233
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003234 mutex_unlock(&trace_types_lock);
3235
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003236 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003237 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003238 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003239 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003240 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003241
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003242 return 0;
3243}
3244
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003245static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3246{
3247 struct trace_array *tr = inode->i_private;
3248
3249 trace_array_put(tr);
3250 return 0;
3251}
3252
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003253static int tracing_single_release_tr(struct inode *inode, struct file *file)
3254{
3255 struct trace_array *tr = inode->i_private;
3256
3257 trace_array_put(tr);
3258
3259 return single_release(inode, file);
3260}
3261
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003262static int tracing_open(struct inode *inode, struct file *file)
3263{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003264 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003265 struct trace_iterator *iter;
3266 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003267
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003268 if (trace_array_get(tr) < 0)
3269 return -ENODEV;
3270
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003271 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003272 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3273 int cpu = tracing_get_cpu(inode);
3274
3275 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003276 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003277 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003278 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003279 }
3280
3281 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003282 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003283 if (IS_ERR(iter))
3284 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003285 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003286 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3287 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003288
3289 if (ret < 0)
3290 trace_array_put(tr);
3291
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003292 return ret;
3293}
3294
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003295/*
3296 * Some tracers are not suitable for instance buffers.
3297 * A tracer is always available for the global array (toplevel)
3298 * or if it explicitly states that it is.
3299 */
3300static bool
3301trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3302{
3303 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3304}
3305
3306/* Find the next tracer that this trace array may use */
3307static struct tracer *
3308get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3309{
3310 while (t && !trace_ok_for_array(t, tr))
3311 t = t->next;
3312
3313 return t;
3314}
3315
Ingo Molnare309b412008-05-12 21:20:51 +02003316static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003317t_next(struct seq_file *m, void *v, loff_t *pos)
3318{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003319 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003320 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003321
3322 (*pos)++;
3323
3324 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003325 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003326
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003327 return t;
3328}
3329
3330static void *t_start(struct seq_file *m, loff_t *pos)
3331{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003332 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003333 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003334 loff_t l = 0;
3335
3336 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003337
3338 t = get_tracer_for_array(tr, trace_types);
3339 for (; t && l < *pos; t = t_next(m, t, &l))
3340 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003341
3342 return t;
3343}
3344
3345static void t_stop(struct seq_file *m, void *p)
3346{
3347 mutex_unlock(&trace_types_lock);
3348}
3349
3350static int t_show(struct seq_file *m, void *v)
3351{
3352 struct tracer *t = v;
3353
3354 if (!t)
3355 return 0;
3356
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003357 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003358 if (t->next)
3359 seq_putc(m, ' ');
3360 else
3361 seq_putc(m, '\n');
3362
3363 return 0;
3364}
3365
James Morris88e9d342009-09-22 16:43:43 -07003366static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003367 .start = t_start,
3368 .next = t_next,
3369 .stop = t_stop,
3370 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003371};
3372
3373static int show_traces_open(struct inode *inode, struct file *file)
3374{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003375 struct trace_array *tr = inode->i_private;
3376 struct seq_file *m;
3377 int ret;
3378
Steven Rostedt60a11772008-05-12 21:20:44 +02003379 if (tracing_disabled)
3380 return -ENODEV;
3381
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003382 ret = seq_open(file, &show_traces_seq_ops);
3383 if (ret)
3384 return ret;
3385
3386 m = file->private_data;
3387 m->private = tr;
3388
3389 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003390}
3391
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003392static ssize_t
3393tracing_write_stub(struct file *filp, const char __user *ubuf,
3394 size_t count, loff_t *ppos)
3395{
3396 return count;
3397}
3398
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003399loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003400{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003401 int ret;
3402
Slava Pestov364829b2010-11-24 15:13:16 -08003403 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003404 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003405 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003406 file->f_pos = ret = 0;
3407
3408 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003409}
3410
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003411static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003412 .open = tracing_open,
3413 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003414 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003415 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003416 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003417};
3418
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003419static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003420 .open = show_traces_open,
3421 .read = seq_read,
3422 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003423 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003424};
3425
Ingo Molnar36dfe922008-05-12 21:20:52 +02003426/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003427 * The tracer itself will not take this lock, but still we want
3428 * to provide a consistent cpumask to user-space:
3429 */
3430static DEFINE_MUTEX(tracing_cpumask_update_lock);
3431
3432/*
3433 * Temporary storage for the character representation of the
3434 * CPU bitmask (and one more byte for the newline):
3435 */
3436static char mask_str[NR_CPUS + 1];
3437
Ingo Molnarc7078de2008-05-12 21:20:52 +02003438static ssize_t
3439tracing_cpumask_read(struct file *filp, char __user *ubuf,
3440 size_t count, loff_t *ppos)
3441{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003442 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003443 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003444
3445 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003446
Tejun Heo1a402432015-02-13 14:37:39 -08003447 len = snprintf(mask_str, count, "%*pb\n",
3448 cpumask_pr_args(tr->tracing_cpumask));
3449 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003450 count = -EINVAL;
3451 goto out_err;
3452 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02003453 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3454
3455out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003456 mutex_unlock(&tracing_cpumask_update_lock);
3457
3458 return count;
3459}
3460
3461static ssize_t
3462tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3463 size_t count, loff_t *ppos)
3464{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003465 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303466 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003467 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303468
3469 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3470 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003471
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303472 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003473 if (err)
3474 goto err_unlock;
3475
Li Zefan215368e2009-06-15 10:56:42 +08003476 mutex_lock(&tracing_cpumask_update_lock);
3477
Steven Rostedta5e25882008-12-02 15:34:05 -05003478 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003479 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003480 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003481 /*
3482 * Increase/decrease the disabled counter if we are
3483 * about to flip a bit in the cpumask:
3484 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003485 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303486 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003487 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3488 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003489 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003490 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303491 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003492 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3493 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003494 }
3495 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003496 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003497 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003498
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003499 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003500
Ingo Molnarc7078de2008-05-12 21:20:52 +02003501 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303502 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003503
Ingo Molnarc7078de2008-05-12 21:20:52 +02003504 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003505
3506err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003507 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003508
3509 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003510}
3511
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003512static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003513 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003514 .read = tracing_cpumask_read,
3515 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003516 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003517 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003518};
3519
Li Zefanfdb372e2009-12-08 11:15:59 +08003520static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003521{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003522 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003523 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003524 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003525 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003526
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003527 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003528 tracer_flags = tr->current_trace->flags->val;
3529 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003530
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003531 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003532 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003533 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003534 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003535 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003536 }
3537
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003538 for (i = 0; trace_opts[i].name; i++) {
3539 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003540 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003541 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003542 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003543 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003544 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003545
Li Zefanfdb372e2009-12-08 11:15:59 +08003546 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003547}
3548
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003549static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003550 struct tracer_flags *tracer_flags,
3551 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003552{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003553 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003554 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003555
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003556 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003557 if (ret)
3558 return ret;
3559
3560 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003561 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003562 else
Zhaolei77708412009-08-07 18:53:21 +08003563 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003564 return 0;
3565}
3566
Li Zefan8d18eaa2009-12-08 11:17:06 +08003567/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003568static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003569{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003570 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003571 struct tracer_flags *tracer_flags = trace->flags;
3572 struct tracer_opt *opts = NULL;
3573 int i;
3574
3575 for (i = 0; tracer_flags->opts[i].name; i++) {
3576 opts = &tracer_flags->opts[i];
3577
3578 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003579 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003580 }
3581
3582 return -EINVAL;
3583}
3584
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003585/* Some tracers require overwrite to stay enabled */
3586int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3587{
3588 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3589 return -1;
3590
3591 return 0;
3592}
3593
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003594int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003595{
3596 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003597 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003598 return 0;
3599
3600 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003601 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003602 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003603 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003604
3605 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003606 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003607 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003608 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003609
3610 if (mask == TRACE_ITER_RECORD_CMD)
3611 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003612
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003613 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003614 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003615#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003616 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003617#endif
3618 }
Steven Rostedt81698832012-10-11 10:15:05 -04003619
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003620 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04003621 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003622 trace_printk_control(enabled);
3623 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003624
3625 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003626}
3627
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003628static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003629{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003630 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003631 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003632 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003633 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003634 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003635
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003636 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003637
Li Zefan8d18eaa2009-12-08 11:17:06 +08003638 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003639 neg = 1;
3640 cmp += 2;
3641 }
3642
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003643 mutex_lock(&trace_types_lock);
3644
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003645 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003646 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003647 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003648 break;
3649 }
3650 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003651
3652 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003653 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003654 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003655
3656 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003657
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003658 /*
3659 * If the first trailing whitespace is replaced with '\0' by strstrip,
3660 * turn it back into a space.
3661 */
3662 if (orig_len > strlen(option))
3663 option[strlen(option)] = ' ';
3664
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003665 return ret;
3666}
3667
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003668static void __init apply_trace_boot_options(void)
3669{
3670 char *buf = trace_boot_options_buf;
3671 char *option;
3672
3673 while (true) {
3674 option = strsep(&buf, ",");
3675
3676 if (!option)
3677 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003678
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05003679 if (*option)
3680 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003681
3682 /* Put back the comma to allow this to be called again */
3683 if (buf)
3684 *(buf - 1) = ',';
3685 }
3686}
3687
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003688static ssize_t
3689tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3690 size_t cnt, loff_t *ppos)
3691{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003692 struct seq_file *m = filp->private_data;
3693 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003694 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003695 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003696
3697 if (cnt >= sizeof(buf))
3698 return -EINVAL;
3699
3700 if (copy_from_user(&buf, ubuf, cnt))
3701 return -EFAULT;
3702
Steven Rostedta8dd2172013-01-09 20:54:17 -05003703 buf[cnt] = 0;
3704
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003705 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003706 if (ret < 0)
3707 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003708
Jiri Olsacf8517c2009-10-23 19:36:16 -04003709 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003710
3711 return cnt;
3712}
3713
Li Zefanfdb372e2009-12-08 11:15:59 +08003714static int tracing_trace_options_open(struct inode *inode, struct file *file)
3715{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003716 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003717 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003718
Li Zefanfdb372e2009-12-08 11:15:59 +08003719 if (tracing_disabled)
3720 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003721
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003722 if (trace_array_get(tr) < 0)
3723 return -ENODEV;
3724
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003725 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3726 if (ret < 0)
3727 trace_array_put(tr);
3728
3729 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003730}
3731
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003732static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003733 .open = tracing_trace_options_open,
3734 .read = seq_read,
3735 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003736 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003737 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003738};
3739
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003740static const char readme_msg[] =
3741 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003742 "# echo 0 > tracing_on : quick way to disable tracing\n"
3743 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3744 " Important files:\n"
3745 " trace\t\t\t- The static contents of the buffer\n"
3746 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3747 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3748 " current_tracer\t- function and latency tracers\n"
3749 " available_tracers\t- list of configured tracers for current_tracer\n"
3750 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3751 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3752 " trace_clock\t\t-change the clock used to order events\n"
3753 " local: Per cpu clock but may not be synced across CPUs\n"
3754 " global: Synced across CPUs but slows tracing down.\n"
3755 " counter: Not a clock, but just an increment\n"
3756 " uptime: Jiffy counter from time of boot\n"
3757 " perf: Same clock that perf events use\n"
3758#ifdef CONFIG_X86_64
3759 " x86-tsc: TSC cycle counter\n"
3760#endif
3761 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3762 " tracing_cpumask\t- Limit which CPUs to trace\n"
3763 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3764 "\t\t\t Remove sub-buffer with rmdir\n"
3765 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003766 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3767 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003768 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003769#ifdef CONFIG_DYNAMIC_FTRACE
3770 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003771 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3772 "\t\t\t functions\n"
3773 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3774 "\t modules: Can select a group via module\n"
3775 "\t Format: :mod:<module-name>\n"
3776 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3777 "\t triggers: a command to perform when function is hit\n"
3778 "\t Format: <function>:<trigger>[:count]\n"
3779 "\t trigger: traceon, traceoff\n"
3780 "\t\t enable_event:<system>:<event>\n"
3781 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003782#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003783 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003784#endif
3785#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003786 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003787#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003788 "\t\t dump\n"
3789 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003790 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3791 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3792 "\t The first one will disable tracing every time do_fault is hit\n"
3793 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3794 "\t The first time do trap is hit and it disables tracing, the\n"
3795 "\t counter will decrement to 2. If tracing is already disabled,\n"
3796 "\t the counter will not decrement. It only decrements when the\n"
3797 "\t trigger did work\n"
3798 "\t To remove trigger without count:\n"
3799 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3800 "\t To remove trigger with a count:\n"
3801 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003802 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003803 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3804 "\t modules: Can select a group via module command :mod:\n"
3805 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003806#endif /* CONFIG_DYNAMIC_FTRACE */
3807#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003808 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3809 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003810#endif
3811#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3812 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003813 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003814 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3815#endif
3816#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003817 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3818 "\t\t\t snapshot buffer. Read the contents for more\n"
3819 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003820#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003821#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003822 " stack_trace\t\t- Shows the max stack trace when active\n"
3823 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003824 "\t\t\t Write into this file to reset the max size (trigger a\n"
3825 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003826#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003827 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3828 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003829#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003830#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003831 " events/\t\t- Directory containing all trace event subsystems:\n"
3832 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3833 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003834 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3835 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003836 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003837 " events/<system>/<event>/\t- Directory containing control files for\n"
3838 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003839 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3840 " filter\t\t- If set, only events passing filter are traced\n"
3841 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003842 "\t Format: <trigger>[:count][if <filter>]\n"
3843 "\t trigger: traceon, traceoff\n"
3844 "\t enable_event:<system>:<event>\n"
3845 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003846#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003847 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003848#endif
3849#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003850 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003851#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003852 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3853 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3854 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3855 "\t events/block/block_unplug/trigger\n"
3856 "\t The first disables tracing every time block_unplug is hit.\n"
3857 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3858 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3859 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3860 "\t Like function triggers, the counter is only decremented if it\n"
3861 "\t enabled or disabled tracing.\n"
3862 "\t To remove a trigger without a count:\n"
3863 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3864 "\t To remove a trigger with a count:\n"
3865 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3866 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003867;
3868
3869static ssize_t
3870tracing_readme_read(struct file *filp, char __user *ubuf,
3871 size_t cnt, loff_t *ppos)
3872{
3873 return simple_read_from_buffer(ubuf, cnt, ppos,
3874 readme_msg, strlen(readme_msg));
3875}
3876
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003877static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003878 .open = tracing_open_generic,
3879 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003880 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003881};
3882
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003883static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003884{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003885 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003886
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003887 if (*pos || m->count)
3888 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003889
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003890 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003891
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003892 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3893 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003894 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003895 continue;
3896
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003897 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003898 }
3899
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003900 return NULL;
3901}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003902
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003903static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3904{
3905 void *v;
3906 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003907
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003908 preempt_disable();
3909 arch_spin_lock(&trace_cmdline_lock);
3910
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003911 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003912 while (l <= *pos) {
3913 v = saved_cmdlines_next(m, v, &l);
3914 if (!v)
3915 return NULL;
3916 }
3917
3918 return v;
3919}
3920
3921static void saved_cmdlines_stop(struct seq_file *m, void *v)
3922{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003923 arch_spin_unlock(&trace_cmdline_lock);
3924 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003925}
3926
3927static int saved_cmdlines_show(struct seq_file *m, void *v)
3928{
3929 char buf[TASK_COMM_LEN];
3930 unsigned int *pid = v;
3931
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003932 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003933 seq_printf(m, "%d %s\n", *pid, buf);
3934 return 0;
3935}
3936
3937static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3938 .start = saved_cmdlines_start,
3939 .next = saved_cmdlines_next,
3940 .stop = saved_cmdlines_stop,
3941 .show = saved_cmdlines_show,
3942};
3943
3944static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3945{
3946 if (tracing_disabled)
3947 return -ENODEV;
3948
3949 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003950}
3951
3952static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003953 .open = tracing_saved_cmdlines_open,
3954 .read = seq_read,
3955 .llseek = seq_lseek,
3956 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003957};
3958
3959static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003960tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3961 size_t cnt, loff_t *ppos)
3962{
3963 char buf[64];
3964 int r;
3965
3966 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003967 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003968 arch_spin_unlock(&trace_cmdline_lock);
3969
3970 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3971}
3972
3973static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3974{
3975 kfree(s->saved_cmdlines);
3976 kfree(s->map_cmdline_to_pid);
3977 kfree(s);
3978}
3979
3980static int tracing_resize_saved_cmdlines(unsigned int val)
3981{
3982 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3983
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003984 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003985 if (!s)
3986 return -ENOMEM;
3987
3988 if (allocate_cmdlines_buffer(val, s) < 0) {
3989 kfree(s);
3990 return -ENOMEM;
3991 }
3992
3993 arch_spin_lock(&trace_cmdline_lock);
3994 savedcmd_temp = savedcmd;
3995 savedcmd = s;
3996 arch_spin_unlock(&trace_cmdline_lock);
3997 free_saved_cmdlines_buffer(savedcmd_temp);
3998
3999 return 0;
4000}
4001
4002static ssize_t
4003tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4004 size_t cnt, loff_t *ppos)
4005{
4006 unsigned long val;
4007 int ret;
4008
4009 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4010 if (ret)
4011 return ret;
4012
4013 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4014 if (!val || val > PID_MAX_DEFAULT)
4015 return -EINVAL;
4016
4017 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4018 if (ret < 0)
4019 return ret;
4020
4021 *ppos += cnt;
4022
4023 return cnt;
4024}
4025
4026static const struct file_operations tracing_saved_cmdlines_size_fops = {
4027 .open = tracing_open_generic,
4028 .read = tracing_saved_cmdlines_size_read,
4029 .write = tracing_saved_cmdlines_size_write,
4030};
4031
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004032#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4033static union trace_enum_map_item *
4034update_enum_map(union trace_enum_map_item *ptr)
4035{
4036 if (!ptr->map.enum_string) {
4037 if (ptr->tail.next) {
4038 ptr = ptr->tail.next;
4039 /* Set ptr to the next real item (skip head) */
4040 ptr++;
4041 } else
4042 return NULL;
4043 }
4044 return ptr;
4045}
4046
4047static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4048{
4049 union trace_enum_map_item *ptr = v;
4050
4051 /*
4052 * Paranoid! If ptr points to end, we don't want to increment past it.
4053 * This really should never happen.
4054 */
4055 ptr = update_enum_map(ptr);
4056 if (WARN_ON_ONCE(!ptr))
4057 return NULL;
4058
4059 ptr++;
4060
4061 (*pos)++;
4062
4063 ptr = update_enum_map(ptr);
4064
4065 return ptr;
4066}
4067
4068static void *enum_map_start(struct seq_file *m, loff_t *pos)
4069{
4070 union trace_enum_map_item *v;
4071 loff_t l = 0;
4072
4073 mutex_lock(&trace_enum_mutex);
4074
4075 v = trace_enum_maps;
4076 if (v)
4077 v++;
4078
4079 while (v && l < *pos) {
4080 v = enum_map_next(m, v, &l);
4081 }
4082
4083 return v;
4084}
4085
4086static void enum_map_stop(struct seq_file *m, void *v)
4087{
4088 mutex_unlock(&trace_enum_mutex);
4089}
4090
4091static int enum_map_show(struct seq_file *m, void *v)
4092{
4093 union trace_enum_map_item *ptr = v;
4094
4095 seq_printf(m, "%s %ld (%s)\n",
4096 ptr->map.enum_string, ptr->map.enum_value,
4097 ptr->map.system);
4098
4099 return 0;
4100}
4101
4102static const struct seq_operations tracing_enum_map_seq_ops = {
4103 .start = enum_map_start,
4104 .next = enum_map_next,
4105 .stop = enum_map_stop,
4106 .show = enum_map_show,
4107};
4108
4109static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4110{
4111 if (tracing_disabled)
4112 return -ENODEV;
4113
4114 return seq_open(filp, &tracing_enum_map_seq_ops);
4115}
4116
4117static const struct file_operations tracing_enum_map_fops = {
4118 .open = tracing_enum_map_open,
4119 .read = seq_read,
4120 .llseek = seq_lseek,
4121 .release = seq_release,
4122};
4123
4124static inline union trace_enum_map_item *
4125trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4126{
4127 /* Return tail of array given the head */
4128 return ptr + ptr->head.length + 1;
4129}
4130
4131static void
4132trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4133 int len)
4134{
4135 struct trace_enum_map **stop;
4136 struct trace_enum_map **map;
4137 union trace_enum_map_item *map_array;
4138 union trace_enum_map_item *ptr;
4139
4140 stop = start + len;
4141
4142 /*
4143 * The trace_enum_maps contains the map plus a head and tail item,
4144 * where the head holds the module and length of array, and the
4145 * tail holds a pointer to the next list.
4146 */
4147 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4148 if (!map_array) {
4149 pr_warning("Unable to allocate trace enum mapping\n");
4150 return;
4151 }
4152
4153 mutex_lock(&trace_enum_mutex);
4154
4155 if (!trace_enum_maps)
4156 trace_enum_maps = map_array;
4157 else {
4158 ptr = trace_enum_maps;
4159 for (;;) {
4160 ptr = trace_enum_jmp_to_tail(ptr);
4161 if (!ptr->tail.next)
4162 break;
4163 ptr = ptr->tail.next;
4164
4165 }
4166 ptr->tail.next = map_array;
4167 }
4168 map_array->head.mod = mod;
4169 map_array->head.length = len;
4170 map_array++;
4171
4172 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4173 map_array->map = **map;
4174 map_array++;
4175 }
4176 memset(map_array, 0, sizeof(*map_array));
4177
4178 mutex_unlock(&trace_enum_mutex);
4179}
4180
4181static void trace_create_enum_file(struct dentry *d_tracer)
4182{
4183 trace_create_file("enum_map", 0444, d_tracer,
4184 NULL, &tracing_enum_map_fops);
4185}
4186
4187#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4188static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4189static inline void trace_insert_enum_map_file(struct module *mod,
4190 struct trace_enum_map **start, int len) { }
4191#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4192
4193static void trace_insert_enum_map(struct module *mod,
4194 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004195{
4196 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004197
4198 if (len <= 0)
4199 return;
4200
4201 map = start;
4202
4203 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004204
4205 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004206}
4207
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004208static ssize_t
Jamie Gennis6019e592012-11-21 15:04:25 -08004209tracing_saved_tgids_read(struct file *file, char __user *ubuf,
4210 size_t cnt, loff_t *ppos)
4211{
4212 char *file_buf;
4213 char *buf;
4214 int len = 0;
4215 int pid;
4216 int i;
4217
4218 file_buf = kmalloc(SAVED_CMDLINES*(16+1+16), GFP_KERNEL);
4219 if (!file_buf)
4220 return -ENOMEM;
4221
4222 buf = file_buf;
4223
4224 for (i = 0; i < SAVED_CMDLINES; i++) {
4225 int tgid;
4226 int r;
4227
4228 pid = map_cmdline_to_pid[i];
4229 if (pid == -1 || pid == NO_CMDLINE_MAP)
4230 continue;
4231
4232 tgid = trace_find_tgid(pid);
4233 r = sprintf(buf, "%d %d\n", pid, tgid);
4234 buf += r;
4235 len += r;
4236 }
4237
4238 len = simple_read_from_buffer(ubuf, cnt, ppos,
4239 file_buf, len);
4240
4241 kfree(file_buf);
4242
4243 return len;
4244}
4245
4246static const struct file_operations tracing_saved_tgids_fops = {
4247 .open = tracing_open_generic,
4248 .read = tracing_saved_tgids_read,
4249 .llseek = generic_file_llseek,
4250};
4251
4252static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004253tracing_set_trace_read(struct file *filp, char __user *ubuf,
4254 size_t cnt, loff_t *ppos)
4255{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004256 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004257 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004258 int r;
4259
4260 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004261 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004262 mutex_unlock(&trace_types_lock);
4263
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004264 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004265}
4266
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004267int tracer_init(struct tracer *t, struct trace_array *tr)
4268{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004269 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004270 return t->init(tr);
4271}
4272
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004273static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004274{
4275 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004276
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004277 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004278 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004279}
4280
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004281#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004282/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004283static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4284 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004285{
4286 int cpu, ret = 0;
4287
4288 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4289 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004290 ret = ring_buffer_resize(trace_buf->buffer,
4291 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004292 if (ret < 0)
4293 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004294 per_cpu_ptr(trace_buf->data, cpu)->entries =
4295 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004296 }
4297 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004298 ret = ring_buffer_resize(trace_buf->buffer,
4299 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004300 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004301 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4302 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004303 }
4304
4305 return ret;
4306}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004307#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004308
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004309static int __tracing_resize_ring_buffer(struct trace_array *tr,
4310 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004311{
4312 int ret;
4313
4314 /*
4315 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004316 * we use the size that was given, and we can forget about
4317 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004318 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004319 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004320
Steven Rostedtb382ede62012-10-10 21:44:34 -04004321 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004322 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004323 return 0;
4324
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004325 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004326 if (ret < 0)
4327 return ret;
4328
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004329#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004330 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4331 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004332 goto out;
4333
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004334 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004335 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004336 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4337 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004338 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004339 /*
4340 * AARGH! We are left with different
4341 * size max buffer!!!!
4342 * The max buffer is our "snapshot" buffer.
4343 * When a tracer needs a snapshot (one of the
4344 * latency tracers), it swaps the max buffer
4345 * with the saved snap shot. We succeeded to
4346 * update the size of the main buffer, but failed to
4347 * update the size of the max buffer. But when we tried
4348 * to reset the main buffer to the original size, we
4349 * failed there too. This is very unlikely to
4350 * happen, but if it does, warn and kill all
4351 * tracing.
4352 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004353 WARN_ON(1);
4354 tracing_disabled = 1;
4355 }
4356 return ret;
4357 }
4358
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004359 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004360 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004361 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004362 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004363
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004364 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004365#endif /* CONFIG_TRACER_MAX_TRACE */
4366
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004367 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004368 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004369 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004370 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004371
4372 return ret;
4373}
4374
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004375static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4376 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004377{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004378 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004379
4380 mutex_lock(&trace_types_lock);
4381
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004382 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4383 /* make sure, this cpu is enabled in the mask */
4384 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4385 ret = -EINVAL;
4386 goto out;
4387 }
4388 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004389
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004390 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004391 if (ret < 0)
4392 ret = -ENOMEM;
4393
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004394out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004395 mutex_unlock(&trace_types_lock);
4396
4397 return ret;
4398}
4399
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004400
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004401/**
4402 * tracing_update_buffers - used by tracing facility to expand ring buffers
4403 *
4404 * To save on memory when the tracing is never used on a system with it
4405 * configured in. The ring buffers are set to a minimum size. But once
4406 * a user starts to use the tracing facility, then they need to grow
4407 * to their default size.
4408 *
4409 * This function is to be called when a tracer is about to be used.
4410 */
4411int tracing_update_buffers(void)
4412{
4413 int ret = 0;
4414
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004415 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004416 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004417 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004418 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004419 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004420
4421 return ret;
4422}
4423
Steven Rostedt577b7852009-02-26 23:43:05 -05004424struct trace_option_dentry;
4425
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004426static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004427create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004428
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004429/*
4430 * Used to clear out the tracer before deletion of an instance.
4431 * Must have trace_types_lock held.
4432 */
4433static void tracing_set_nop(struct trace_array *tr)
4434{
4435 if (tr->current_trace == &nop_trace)
4436 return;
4437
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004438 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004439
4440 if (tr->current_trace->reset)
4441 tr->current_trace->reset(tr);
4442
4443 tr->current_trace = &nop_trace;
4444}
4445
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04004446static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004447{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004448 /* Only enable if the directory has been created already. */
4449 if (!tr->dir)
4450 return;
4451
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004452 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004453}
4454
4455static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4456{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004457 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004458#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004459 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004460#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004461 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004462
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004463 mutex_lock(&trace_types_lock);
4464
Steven Rostedt73c51622009-03-11 13:42:01 -04004465 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004466 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004467 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004468 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004469 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004470 ret = 0;
4471 }
4472
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004473 for (t = trace_types; t; t = t->next) {
4474 if (strcmp(t->name, buf) == 0)
4475 break;
4476 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004477 if (!t) {
4478 ret = -EINVAL;
4479 goto out;
4480 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004481 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004482 goto out;
4483
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004484 /* Some tracers are only allowed for the top level buffer */
4485 if (!trace_ok_for_array(t, tr)) {
4486 ret = -EINVAL;
4487 goto out;
4488 }
4489
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004490 /* If trace pipe files are being read, we can't change the tracer */
4491 if (tr->current_trace->ref) {
4492 ret = -EBUSY;
4493 goto out;
4494 }
4495
Steven Rostedt9f029e82008-11-12 15:24:24 -05004496 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004497
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004498 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004499
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004500 if (tr->current_trace->reset)
4501 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004502
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004503 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004504 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004505
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004506#ifdef CONFIG_TRACER_MAX_TRACE
4507 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004508
4509 if (had_max_tr && !t->use_max_tr) {
4510 /*
4511 * We need to make sure that the update_max_tr sees that
4512 * current_trace changed to nop_trace to keep it from
4513 * swapping the buffers after we resize it.
4514 * The update_max_tr is called from interrupts disabled
4515 * so a synchronized_sched() is sufficient.
4516 */
4517 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004518 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004519 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004520#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004521
4522#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004523 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004524 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004525 if (ret < 0)
4526 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004527 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004528#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004529
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004530 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004531 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004532 if (ret)
4533 goto out;
4534 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004535
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004536 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004537 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004538 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004539 out:
4540 mutex_unlock(&trace_types_lock);
4541
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004542 return ret;
4543}
4544
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004545static ssize_t
4546tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4547 size_t cnt, loff_t *ppos)
4548{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004549 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004550 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004551 int i;
4552 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004553 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004554
Steven Rostedt60063a62008-10-28 10:44:24 -04004555 ret = cnt;
4556
Li Zefanee6c2c12009-09-18 14:06:47 +08004557 if (cnt > MAX_TRACER_SIZE)
4558 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004559
4560 if (copy_from_user(&buf, ubuf, cnt))
4561 return -EFAULT;
4562
4563 buf[cnt] = 0;
4564
4565 /* strip ending whitespace. */
4566 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4567 buf[i] = 0;
4568
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004569 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004570 if (err)
4571 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004572
Jiri Olsacf8517c2009-10-23 19:36:16 -04004573 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004574
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004575 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004576}
4577
4578static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004579tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4580 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004581{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004582 char buf[64];
4583 int r;
4584
Steven Rostedtcffae432008-05-12 21:21:00 +02004585 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004586 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004587 if (r > sizeof(buf))
4588 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004589 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004590}
4591
4592static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004593tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4594 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004595{
Hannes Eder5e398412009-02-10 19:44:34 +01004596 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004597 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004598
Peter Huewe22fe9b52011-06-07 21:58:27 +02004599 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4600 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004601 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004602
4603 *ptr = val * 1000;
4604
4605 return cnt;
4606}
4607
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004608static ssize_t
4609tracing_thresh_read(struct file *filp, char __user *ubuf,
4610 size_t cnt, loff_t *ppos)
4611{
4612 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4613}
4614
4615static ssize_t
4616tracing_thresh_write(struct file *filp, const char __user *ubuf,
4617 size_t cnt, loff_t *ppos)
4618{
4619 struct trace_array *tr = filp->private_data;
4620 int ret;
4621
4622 mutex_lock(&trace_types_lock);
4623 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4624 if (ret < 0)
4625 goto out;
4626
4627 if (tr->current_trace->update_thresh) {
4628 ret = tr->current_trace->update_thresh(tr);
4629 if (ret < 0)
4630 goto out;
4631 }
4632
4633 ret = cnt;
4634out:
4635 mutex_unlock(&trace_types_lock);
4636
4637 return ret;
4638}
4639
Chen Gange428abb2015-11-10 05:15:15 +08004640#ifdef CONFIG_TRACER_MAX_TRACE
4641
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004642static ssize_t
4643tracing_max_lat_read(struct file *filp, char __user *ubuf,
4644 size_t cnt, loff_t *ppos)
4645{
4646 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4647}
4648
4649static ssize_t
4650tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4651 size_t cnt, loff_t *ppos)
4652{
4653 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4654}
4655
Chen Gange428abb2015-11-10 05:15:15 +08004656#endif
4657
Steven Rostedtb3806b42008-05-12 21:20:46 +02004658static int tracing_open_pipe(struct inode *inode, struct file *filp)
4659{
Oleg Nesterov15544202013-07-23 17:25:57 +02004660 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004661 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004662 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004663
4664 if (tracing_disabled)
4665 return -ENODEV;
4666
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004667 if (trace_array_get(tr) < 0)
4668 return -ENODEV;
4669
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004670 mutex_lock(&trace_types_lock);
4671
Steven Rostedtb3806b42008-05-12 21:20:46 +02004672 /* create a buffer to store the information to pass to userspace */
4673 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004674 if (!iter) {
4675 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004676 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004677 goto out;
4678 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004679
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004680 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004681 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004682
4683 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4684 ret = -ENOMEM;
4685 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304686 }
4687
Steven Rostedta3097202008-11-07 22:36:02 -05004688 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304689 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004690
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004691 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04004692 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4693
David Sharp8be07092012-11-13 12:18:22 -08004694 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004695 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004696 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4697
Oleg Nesterov15544202013-07-23 17:25:57 +02004698 iter->tr = tr;
4699 iter->trace_buffer = &tr->trace_buffer;
4700 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004701 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004702 filp->private_data = iter;
4703
Steven Rostedt107bad82008-05-12 21:21:01 +02004704 if (iter->trace->pipe_open)
4705 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004706
Arnd Bergmannb4447862010-07-07 23:40:11 +02004707 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004708
4709 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004710out:
4711 mutex_unlock(&trace_types_lock);
4712 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004713
4714fail:
4715 kfree(iter->trace);
4716 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004717 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004718 mutex_unlock(&trace_types_lock);
4719 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004720}
4721
4722static int tracing_release_pipe(struct inode *inode, struct file *file)
4723{
4724 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004725 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004726
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004727 mutex_lock(&trace_types_lock);
4728
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004729 tr->current_trace->ref--;
4730
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004731 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004732 iter->trace->pipe_close(iter);
4733
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004734 mutex_unlock(&trace_types_lock);
4735
Rusty Russell44623442009-01-01 10:12:23 +10304736 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004737 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004738 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004739
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004740 trace_array_put(tr);
4741
Steven Rostedtb3806b42008-05-12 21:20:46 +02004742 return 0;
4743}
4744
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004745static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004746trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004747{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004748 struct trace_array *tr = iter->tr;
4749
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004750 /* Iterators are static, they should be filled or empty */
4751 if (trace_buffer_iter(iter, iter->cpu_file))
4752 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004753
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004754 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004755 /*
4756 * Always select as readable when in blocking mode
4757 */
4758 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004759 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004760 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004761 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004762}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004763
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004764static unsigned int
4765tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4766{
4767 struct trace_iterator *iter = filp->private_data;
4768
4769 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004770}
4771
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004772/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004773static int tracing_wait_pipe(struct file *filp)
4774{
4775 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004776 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004777
4778 while (trace_empty(iter)) {
4779
4780 if ((filp->f_flags & O_NONBLOCK)) {
4781 return -EAGAIN;
4782 }
4783
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004784 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004785 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004786 * We still block if tracing is disabled, but we have never
4787 * read anything. This allows a user to cat this file, and
4788 * then enable tracing. But after we have read something,
4789 * we give an EOF when tracing is again disabled.
4790 *
4791 * iter->pos will be 0 if we haven't read anything.
4792 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004793 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004794 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004795
4796 mutex_unlock(&iter->mutex);
4797
Rabin Vincente30f53a2014-11-10 19:46:34 +01004798 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004799
4800 mutex_lock(&iter->mutex);
4801
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004802 if (ret)
4803 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004804 }
4805
4806 return 1;
4807}
4808
Steven Rostedtb3806b42008-05-12 21:20:46 +02004809/*
4810 * Consumer reader.
4811 */
4812static ssize_t
4813tracing_read_pipe(struct file *filp, char __user *ubuf,
4814 size_t cnt, loff_t *ppos)
4815{
4816 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004817 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004818
4819 /* return any leftover data */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004820 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4821 if (sret != -EBUSY)
4822 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004823
Steven Rostedtf9520752009-03-02 14:04:40 -05004824 trace_seq_init(&iter->seq);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004825
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004826 /*
4827 * Avoid more than one consumer on a single file descriptor
4828 * This is just a matter of traces coherency, the ring buffer itself
4829 * is protected.
4830 */
4831 mutex_lock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004832 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004833 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4834 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004835 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004836 }
4837
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004838waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004839 sret = tracing_wait_pipe(filp);
4840 if (sret <= 0)
4841 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004842
4843 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004844 if (trace_empty(iter)) {
4845 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004846 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004847 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004848
4849 if (cnt >= PAGE_SIZE)
4850 cnt = PAGE_SIZE - 1;
4851
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004852 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004853 memset(&iter->seq, 0,
4854 sizeof(struct trace_iterator) -
4855 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004856 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004857 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004858
Lai Jiangshan4f535962009-05-18 19:35:34 +08004859 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004860 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004861 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004862 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004863 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004864
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004865 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004866 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004867 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004868 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004869 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004870 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004871 if (ret != TRACE_TYPE_NO_CONSUME)
4872 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004873
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004874 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004875 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004876
4877 /*
4878 * Setting the full flag means we reached the trace_seq buffer
4879 * size and we should leave by partial output condition above.
4880 * One of the trace_seq_* functions is not used properly.
4881 */
4882 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4883 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004884 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004885 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004886 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004887
Steven Rostedtb3806b42008-05-12 21:20:46 +02004888 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004889 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004890 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004891 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004892
4893 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004894 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004895 * entries, go back to wait for more entries.
4896 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004897 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004898 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004899
Steven Rostedt107bad82008-05-12 21:21:01 +02004900out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004901 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004902
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004903 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004904}
4905
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004906static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4907 unsigned int idx)
4908{
4909 __free_page(spd->pages[idx]);
4910}
4911
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004912static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004913 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004914 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004915 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004916 .steal = generic_pipe_buf_steal,
4917 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004918};
4919
Steven Rostedt34cd4992009-02-09 12:06:29 -05004920static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004921tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004922{
4923 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004924 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004925 int ret;
4926
4927 /* Seq buffer is page-sized, exactly what we need. */
4928 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004929 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004930 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004931
4932 if (trace_seq_has_overflowed(&iter->seq)) {
4933 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004934 break;
4935 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004936
4937 /*
4938 * This should not be hit, because it should only
4939 * be set if the iter->seq overflowed. But check it
4940 * anyway to be safe.
4941 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05004942 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004943 iter->seq.seq.len = save_len;
4944 break;
4945 }
4946
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004947 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004948 if (rem < count) {
4949 rem = 0;
4950 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004951 break;
4952 }
4953
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004954 if (ret != TRACE_TYPE_NO_CONSUME)
4955 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004956 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004957 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004958 rem = 0;
4959 iter->ent = NULL;
4960 break;
4961 }
4962 }
4963
4964 return rem;
4965}
4966
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004967static ssize_t tracing_splice_read_pipe(struct file *filp,
4968 loff_t *ppos,
4969 struct pipe_inode_info *pipe,
4970 size_t len,
4971 unsigned int flags)
4972{
Jens Axboe35f3d142010-05-20 10:43:18 +02004973 struct page *pages_def[PIPE_DEF_BUFFERS];
4974 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004975 struct trace_iterator *iter = filp->private_data;
4976 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004977 .pages = pages_def,
4978 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004979 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004980 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004981 .flags = flags,
4982 .ops = &tracing_pipe_buf_ops,
4983 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004984 };
4985 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004986 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004987 unsigned int i;
4988
Jens Axboe35f3d142010-05-20 10:43:18 +02004989 if (splice_grow_spd(pipe, &spd))
4990 return -ENOMEM;
4991
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004992 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004993
4994 if (iter->trace->splice_read) {
4995 ret = iter->trace->splice_read(iter, filp,
4996 ppos, pipe, len, flags);
4997 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004998 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004999 }
5000
5001 ret = tracing_wait_pipe(filp);
5002 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005003 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005004
Jason Wessel955b61e2010-08-05 09:22:23 -05005005 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005006 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005007 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005008 }
5009
Lai Jiangshan4f535962009-05-18 19:35:34 +08005010 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005011 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005012
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005013 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005014 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005015 spd.pages[i] = alloc_page(GFP_KERNEL);
5016 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005017 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005018
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005019 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005020
5021 /* Copy the data into the page, so we can start over. */
5022 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005023 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005024 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005025 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005026 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005027 break;
5028 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005029 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005030 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005031
Steven Rostedtf9520752009-03-02 14:04:40 -05005032 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005033 }
5034
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005035 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005036 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005037 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005038
5039 spd.nr_pages = i;
5040
Jens Axboe35f3d142010-05-20 10:43:18 +02005041 ret = splice_to_pipe(pipe, &spd);
5042out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005043 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005044 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005045
Steven Rostedt34cd4992009-02-09 12:06:29 -05005046out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005047 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005048 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005049}
5050
Steven Rostedta98a3c32008-05-12 21:20:59 +02005051static ssize_t
5052tracing_entries_read(struct file *filp, char __user *ubuf,
5053 size_t cnt, loff_t *ppos)
5054{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005055 struct inode *inode = file_inode(filp);
5056 struct trace_array *tr = inode->i_private;
5057 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005058 char buf[64];
5059 int r = 0;
5060 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005061
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005062 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005063
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005064 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005065 int cpu, buf_size_same;
5066 unsigned long size;
5067
5068 size = 0;
5069 buf_size_same = 1;
5070 /* check if all cpu sizes are same */
5071 for_each_tracing_cpu(cpu) {
5072 /* fill in the size from first enabled cpu */
5073 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005074 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5075 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005076 buf_size_same = 0;
5077 break;
5078 }
5079 }
5080
5081 if (buf_size_same) {
5082 if (!ring_buffer_expanded)
5083 r = sprintf(buf, "%lu (expanded: %lu)\n",
5084 size >> 10,
5085 trace_buf_size >> 10);
5086 else
5087 r = sprintf(buf, "%lu\n", size >> 10);
5088 } else
5089 r = sprintf(buf, "X\n");
5090 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005091 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005092
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005093 mutex_unlock(&trace_types_lock);
5094
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005095 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5096 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005097}
5098
5099static ssize_t
5100tracing_entries_write(struct file *filp, const char __user *ubuf,
5101 size_t cnt, loff_t *ppos)
5102{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005103 struct inode *inode = file_inode(filp);
5104 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005105 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005106 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005107
Peter Huewe22fe9b52011-06-07 21:58:27 +02005108 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5109 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005110 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005111
5112 /* must have at least 1 entry */
5113 if (!val)
5114 return -EINVAL;
5115
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005116 /* value is in KB */
5117 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005118 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005119 if (ret < 0)
5120 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005121
Jiri Olsacf8517c2009-10-23 19:36:16 -04005122 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005123
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005124 return cnt;
5125}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005126
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005127static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005128tracing_total_entries_read(struct file *filp, char __user *ubuf,
5129 size_t cnt, loff_t *ppos)
5130{
5131 struct trace_array *tr = filp->private_data;
5132 char buf[64];
5133 int r, cpu;
5134 unsigned long size = 0, expanded_size = 0;
5135
5136 mutex_lock(&trace_types_lock);
5137 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005138 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005139 if (!ring_buffer_expanded)
5140 expanded_size += trace_buf_size >> 10;
5141 }
5142 if (ring_buffer_expanded)
5143 r = sprintf(buf, "%lu\n", size);
5144 else
5145 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5146 mutex_unlock(&trace_types_lock);
5147
5148 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5149}
5150
5151static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005152tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5153 size_t cnt, loff_t *ppos)
5154{
5155 /*
5156 * There is no need to read what the user has written, this function
5157 * is just to make sure that there is no error when "echo" is used
5158 */
5159
5160 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005161
5162 return cnt;
5163}
5164
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005165static int
5166tracing_free_buffer_release(struct inode *inode, struct file *filp)
5167{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005168 struct trace_array *tr = inode->i_private;
5169
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005170 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005171 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005172 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005173 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005174 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005175
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005176 trace_array_put(tr);
5177
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005178 return 0;
5179}
5180
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005181static ssize_t
5182tracing_mark_write(struct file *filp, const char __user *ubuf,
5183 size_t cnt, loff_t *fpos)
5184{
Steven Rostedtd696b582011-09-22 11:50:27 -04005185 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005186 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005187 struct ring_buffer_event *event;
5188 struct ring_buffer *buffer;
5189 struct print_entry *entry;
5190 unsigned long irq_flags;
5191 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005192 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005193 int nr_pages = 1;
5194 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005195 int offset;
5196 int size;
5197 int len;
5198 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005199 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005200
Steven Rostedtc76f0692008-11-07 22:36:02 -05005201 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005202 return -EINVAL;
5203
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005204 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005205 return -EINVAL;
5206
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005207 if (cnt > TRACE_BUF_SIZE)
5208 cnt = TRACE_BUF_SIZE;
5209
Steven Rostedtd696b582011-09-22 11:50:27 -04005210 /*
5211 * Userspace is injecting traces into the kernel trace buffer.
5212 * We want to be as non intrusive as possible.
5213 * To do so, we do not want to allocate any special buffers
5214 * or take any locks, but instead write the userspace data
5215 * straight into the ring buffer.
5216 *
5217 * First we need to pin the userspace buffer into memory,
5218 * which, most likely it is, because it just referenced it.
5219 * But there's no guarantee that it is. By using get_user_pages_fast()
5220 * and kmap_atomic/kunmap_atomic() we can get access to the
5221 * pages directly. We then write the data directly into the
5222 * ring buffer.
5223 */
5224 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005225
Steven Rostedtd696b582011-09-22 11:50:27 -04005226 /* check if we cross pages */
5227 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5228 nr_pages = 2;
5229
5230 offset = addr & (PAGE_SIZE - 1);
5231 addr &= PAGE_MASK;
5232
5233 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5234 if (ret < nr_pages) {
5235 while (--ret >= 0)
5236 put_page(pages[ret]);
5237 written = -EFAULT;
5238 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005239 }
5240
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005241 for (i = 0; i < nr_pages; i++)
5242 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005243
5244 local_save_flags(irq_flags);
5245 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005246 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005247 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5248 irq_flags, preempt_count());
5249 if (!event) {
5250 /* Ring buffer disabled, return as if not open for write */
5251 written = -EBADF;
5252 goto out_unlock;
5253 }
5254
5255 entry = ring_buffer_event_data(event);
5256 entry->ip = _THIS_IP_;
5257
5258 if (nr_pages == 2) {
5259 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005260 memcpy(&entry->buf, map_page[0] + offset, len);
5261 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005262 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005263 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005264
5265 if (entry->buf[cnt - 1] != '\n') {
5266 entry->buf[cnt] = '\n';
5267 entry->buf[cnt + 1] = '\0';
5268 } else
5269 entry->buf[cnt] = '\0';
5270
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005271 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005272
5273 written = cnt;
5274
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005275 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005276
Steven Rostedtd696b582011-09-22 11:50:27 -04005277 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005278 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005279 kunmap_atomic(map_page[i]);
5280 put_page(pages[i]);
5281 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005282 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005283 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005284}
5285
Li Zefan13f16d22009-12-08 11:16:11 +08005286static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005287{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005288 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005289 int i;
5290
5291 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005292 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005293 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005294 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5295 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005296 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005297
Li Zefan13f16d22009-12-08 11:16:11 +08005298 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005299}
5300
Steven Rostedte1e232c2014-02-10 23:38:46 -05005301static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005302{
Zhaolei5079f322009-08-25 16:12:56 +08005303 int i;
5304
Zhaolei5079f322009-08-25 16:12:56 +08005305 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5306 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5307 break;
5308 }
5309 if (i == ARRAY_SIZE(trace_clocks))
5310 return -EINVAL;
5311
Zhaolei5079f322009-08-25 16:12:56 +08005312 mutex_lock(&trace_types_lock);
5313
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005314 tr->clock_id = i;
5315
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005316 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005317
David Sharp60303ed2012-10-11 16:27:52 -07005318 /*
5319 * New clock may not be consistent with the previous clock.
5320 * Reset the buffer so that it doesn't have incomparable timestamps.
5321 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005322 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005323
5324#ifdef CONFIG_TRACER_MAX_TRACE
5325 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5326 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005327 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005328#endif
David Sharp60303ed2012-10-11 16:27:52 -07005329
Zhaolei5079f322009-08-25 16:12:56 +08005330 mutex_unlock(&trace_types_lock);
5331
Steven Rostedte1e232c2014-02-10 23:38:46 -05005332 return 0;
5333}
5334
5335static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5336 size_t cnt, loff_t *fpos)
5337{
5338 struct seq_file *m = filp->private_data;
5339 struct trace_array *tr = m->private;
5340 char buf[64];
5341 const char *clockstr;
5342 int ret;
5343
5344 if (cnt >= sizeof(buf))
5345 return -EINVAL;
5346
5347 if (copy_from_user(&buf, ubuf, cnt))
5348 return -EFAULT;
5349
5350 buf[cnt] = 0;
5351
5352 clockstr = strstrip(buf);
5353
5354 ret = tracing_set_clock(tr, clockstr);
5355 if (ret)
5356 return ret;
5357
Zhaolei5079f322009-08-25 16:12:56 +08005358 *fpos += cnt;
5359
5360 return cnt;
5361}
5362
Li Zefan13f16d22009-12-08 11:16:11 +08005363static int tracing_clock_open(struct inode *inode, struct file *file)
5364{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005365 struct trace_array *tr = inode->i_private;
5366 int ret;
5367
Li Zefan13f16d22009-12-08 11:16:11 +08005368 if (tracing_disabled)
5369 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005370
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005371 if (trace_array_get(tr))
5372 return -ENODEV;
5373
5374 ret = single_open(file, tracing_clock_show, inode->i_private);
5375 if (ret < 0)
5376 trace_array_put(tr);
5377
5378 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005379}
5380
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005381struct ftrace_buffer_info {
5382 struct trace_iterator iter;
5383 void *spare;
5384 unsigned int read;
5385};
5386
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005387#ifdef CONFIG_TRACER_SNAPSHOT
5388static int tracing_snapshot_open(struct inode *inode, struct file *file)
5389{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005390 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005391 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005392 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005393 int ret = 0;
5394
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005395 if (trace_array_get(tr) < 0)
5396 return -ENODEV;
5397
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005398 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005399 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005400 if (IS_ERR(iter))
5401 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005402 } else {
5403 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005404 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005405 m = kzalloc(sizeof(*m), GFP_KERNEL);
5406 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005407 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005408 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5409 if (!iter) {
5410 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005411 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005412 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005413 ret = 0;
5414
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005415 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005416 iter->trace_buffer = &tr->max_buffer;
5417 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005418 m->private = iter;
5419 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005420 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005421out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005422 if (ret < 0)
5423 trace_array_put(tr);
5424
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005425 return ret;
5426}
5427
5428static ssize_t
5429tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5430 loff_t *ppos)
5431{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005432 struct seq_file *m = filp->private_data;
5433 struct trace_iterator *iter = m->private;
5434 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005435 unsigned long val;
5436 int ret;
5437
5438 ret = tracing_update_buffers();
5439 if (ret < 0)
5440 return ret;
5441
5442 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5443 if (ret)
5444 return ret;
5445
5446 mutex_lock(&trace_types_lock);
5447
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005448 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005449 ret = -EBUSY;
5450 goto out;
5451 }
5452
5453 switch (val) {
5454 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005455 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5456 ret = -EINVAL;
5457 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005458 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005459 if (tr->allocated_snapshot)
5460 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005461 break;
5462 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005463/* Only allow per-cpu swap if the ring buffer supports it */
5464#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5465 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5466 ret = -EINVAL;
5467 break;
5468 }
5469#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005470 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005471 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005472 if (ret < 0)
5473 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005474 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005475 local_irq_disable();
5476 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005477 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005478 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005479 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005480 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005481 local_irq_enable();
5482 break;
5483 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005484 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005485 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5486 tracing_reset_online_cpus(&tr->max_buffer);
5487 else
5488 tracing_reset(&tr->max_buffer, iter->cpu_file);
5489 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005490 break;
5491 }
5492
5493 if (ret >= 0) {
5494 *ppos += cnt;
5495 ret = cnt;
5496 }
5497out:
5498 mutex_unlock(&trace_types_lock);
5499 return ret;
5500}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005501
5502static int tracing_snapshot_release(struct inode *inode, struct file *file)
5503{
5504 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005505 int ret;
5506
5507 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005508
5509 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005510 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005511
5512 /* If write only, the seq_file is just a stub */
5513 if (m)
5514 kfree(m->private);
5515 kfree(m);
5516
5517 return 0;
5518}
5519
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005520static int tracing_buffers_open(struct inode *inode, struct file *filp);
5521static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5522 size_t count, loff_t *ppos);
5523static int tracing_buffers_release(struct inode *inode, struct file *file);
5524static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5525 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5526
5527static int snapshot_raw_open(struct inode *inode, struct file *filp)
5528{
5529 struct ftrace_buffer_info *info;
5530 int ret;
5531
5532 ret = tracing_buffers_open(inode, filp);
5533 if (ret < 0)
5534 return ret;
5535
5536 info = filp->private_data;
5537
5538 if (info->iter.trace->use_max_tr) {
5539 tracing_buffers_release(inode, filp);
5540 return -EBUSY;
5541 }
5542
5543 info->iter.snapshot = true;
5544 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5545
5546 return ret;
5547}
5548
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005549#endif /* CONFIG_TRACER_SNAPSHOT */
5550
5551
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005552static const struct file_operations tracing_thresh_fops = {
5553 .open = tracing_open_generic,
5554 .read = tracing_thresh_read,
5555 .write = tracing_thresh_write,
5556 .llseek = generic_file_llseek,
5557};
5558
Chen Gange428abb2015-11-10 05:15:15 +08005559#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005560static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005561 .open = tracing_open_generic,
5562 .read = tracing_max_lat_read,
5563 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005564 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005565};
Chen Gange428abb2015-11-10 05:15:15 +08005566#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005567
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005568static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005569 .open = tracing_open_generic,
5570 .read = tracing_set_trace_read,
5571 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005572 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005573};
5574
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005575static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005576 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005577 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005578 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005579 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005580 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005581 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005582};
5583
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005584static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005585 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005586 .read = tracing_entries_read,
5587 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005588 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005589 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005590};
5591
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005592static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005593 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005594 .read = tracing_total_entries_read,
5595 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005596 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005597};
5598
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005599static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005600 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005601 .write = tracing_free_buffer_write,
5602 .release = tracing_free_buffer_release,
5603};
5604
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005605static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005606 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005607 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005608 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005609 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005610};
5611
Zhaolei5079f322009-08-25 16:12:56 +08005612static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005613 .open = tracing_clock_open,
5614 .read = seq_read,
5615 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005616 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005617 .write = tracing_clock_write,
5618};
5619
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005620#ifdef CONFIG_TRACER_SNAPSHOT
5621static const struct file_operations snapshot_fops = {
5622 .open = tracing_snapshot_open,
5623 .read = seq_read,
5624 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005625 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005626 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005627};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005628
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005629static const struct file_operations snapshot_raw_fops = {
5630 .open = snapshot_raw_open,
5631 .read = tracing_buffers_read,
5632 .release = tracing_buffers_release,
5633 .splice_read = tracing_buffers_splice_read,
5634 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005635};
5636
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005637#endif /* CONFIG_TRACER_SNAPSHOT */
5638
Steven Rostedt2cadf912008-12-01 22:20:19 -05005639static int tracing_buffers_open(struct inode *inode, struct file *filp)
5640{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005641 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005642 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005643 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005644
5645 if (tracing_disabled)
5646 return -ENODEV;
5647
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005648 if (trace_array_get(tr) < 0)
5649 return -ENODEV;
5650
Steven Rostedt2cadf912008-12-01 22:20:19 -05005651 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005652 if (!info) {
5653 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005654 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005655 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005656
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005657 mutex_lock(&trace_types_lock);
5658
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005659 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005660 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005661 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005662 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005663 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005664 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005665 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005666
5667 filp->private_data = info;
5668
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005669 tr->current_trace->ref++;
5670
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005671 mutex_unlock(&trace_types_lock);
5672
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005673 ret = nonseekable_open(inode, filp);
5674 if (ret < 0)
5675 trace_array_put(tr);
5676
5677 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005678}
5679
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005680static unsigned int
5681tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5682{
5683 struct ftrace_buffer_info *info = filp->private_data;
5684 struct trace_iterator *iter = &info->iter;
5685
5686 return trace_poll(iter, filp, poll_table);
5687}
5688
Steven Rostedt2cadf912008-12-01 22:20:19 -05005689static ssize_t
5690tracing_buffers_read(struct file *filp, char __user *ubuf,
5691 size_t count, loff_t *ppos)
5692{
5693 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005694 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005695 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005696 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005697
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005698 if (!count)
5699 return 0;
5700
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005701#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005702 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5703 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005704#endif
5705
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005706 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005707 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5708 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005709 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005710 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005711
Steven Rostedt2cadf912008-12-01 22:20:19 -05005712 /* Do we have previous read data to read? */
5713 if (info->read < PAGE_SIZE)
5714 goto read;
5715
Steven Rostedtb6273442013-02-28 13:44:11 -05005716 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005717 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005718 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005719 &info->spare,
5720 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005721 iter->cpu_file, 0);
5722 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005723
5724 if (ret < 0) {
5725 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005726 if ((filp->f_flags & O_NONBLOCK))
5727 return -EAGAIN;
5728
Rabin Vincente30f53a2014-11-10 19:46:34 +01005729 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005730 if (ret)
5731 return ret;
5732
Steven Rostedtb6273442013-02-28 13:44:11 -05005733 goto again;
5734 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005735 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005736 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005737
Steven Rostedt436fc282011-10-14 10:44:25 -04005738 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005739 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005740 size = PAGE_SIZE - info->read;
5741 if (size > count)
5742 size = count;
5743
5744 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005745 if (ret == size)
5746 return -EFAULT;
5747
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005748 size -= ret;
5749
Steven Rostedt2cadf912008-12-01 22:20:19 -05005750 *ppos += size;
5751 info->read += size;
5752
5753 return size;
5754}
5755
5756static int tracing_buffers_release(struct inode *inode, struct file *file)
5757{
5758 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005759 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005760
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005761 mutex_lock(&trace_types_lock);
5762
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005763 iter->tr->current_trace->ref--;
5764
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005765 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005766
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005767 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005768 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005769 kfree(info);
5770
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005771 mutex_unlock(&trace_types_lock);
5772
Steven Rostedt2cadf912008-12-01 22:20:19 -05005773 return 0;
5774}
5775
5776struct buffer_ref {
5777 struct ring_buffer *buffer;
5778 void *page;
5779 int ref;
5780};
5781
5782static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5783 struct pipe_buffer *buf)
5784{
5785 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5786
5787 if (--ref->ref)
5788 return;
5789
5790 ring_buffer_free_read_page(ref->buffer, ref->page);
5791 kfree(ref);
5792 buf->private = 0;
5793}
5794
Steven Rostedt2cadf912008-12-01 22:20:19 -05005795static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5796 struct pipe_buffer *buf)
5797{
5798 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5799
5800 ref->ref++;
5801}
5802
5803/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005804static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005805 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005806 .confirm = generic_pipe_buf_confirm,
5807 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005808 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005809 .get = buffer_pipe_buf_get,
5810};
5811
5812/*
5813 * Callback from splice_to_pipe(), if we need to release some pages
5814 * at the end of the spd in case we error'ed out in filling the pipe.
5815 */
5816static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5817{
5818 struct buffer_ref *ref =
5819 (struct buffer_ref *)spd->partial[i].private;
5820
5821 if (--ref->ref)
5822 return;
5823
5824 ring_buffer_free_read_page(ref->buffer, ref->page);
5825 kfree(ref);
5826 spd->partial[i].private = 0;
5827}
5828
5829static ssize_t
5830tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5831 struct pipe_inode_info *pipe, size_t len,
5832 unsigned int flags)
5833{
5834 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005835 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005836 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5837 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005838 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005839 .pages = pages_def,
5840 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005841 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005842 .flags = flags,
5843 .ops = &buffer_pipe_buf_ops,
5844 .spd_release = buffer_spd_release,
5845 };
5846 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005847 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01005848 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005849
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005850#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005851 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5852 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005853#endif
5854
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005855 if (splice_grow_spd(pipe, &spd))
5856 return -ENOMEM;
Jens Axboe35f3d142010-05-20 10:43:18 +02005857
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005858 if (*ppos & (PAGE_SIZE - 1))
5859 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005860
5861 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005862 if (len < PAGE_SIZE)
5863 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005864 len &= PAGE_MASK;
5865 }
5866
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005867 again:
5868 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005869 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005870
Al Viroa786c062014-04-11 12:01:03 -04005871 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005872 struct page *page;
5873 int r;
5874
5875 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01005876 if (!ref) {
5877 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005878 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01005879 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005880
Steven Rostedt7267fa62009-04-29 00:16:21 -04005881 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005882 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005883 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005884 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005885 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005886 kfree(ref);
5887 break;
5888 }
5889
5890 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005891 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005892 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005893 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005894 kfree(ref);
5895 break;
5896 }
5897
5898 /*
5899 * zero out any left over data, this is going to
5900 * user land.
5901 */
5902 size = ring_buffer_page_len(ref->page);
5903 if (size < PAGE_SIZE)
5904 memset(ref->page + size, 0, PAGE_SIZE - size);
5905
5906 page = virt_to_page(ref->page);
5907
5908 spd.pages[i] = page;
5909 spd.partial[i].len = PAGE_SIZE;
5910 spd.partial[i].offset = 0;
5911 spd.partial[i].private = (unsigned long)ref;
5912 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005913 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005914
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005915 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005916 }
5917
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005918 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005919 spd.nr_pages = i;
5920
5921 /* did we read anything? */
5922 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005923 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005924 return ret;
Rabin Vincent07906da2014-11-06 22:26:07 +01005925
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005926 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
5927 return -EAGAIN;
5928
Rabin Vincente30f53a2014-11-10 19:46:34 +01005929 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005930 if (ret)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005931 return ret;
Rabin Vincente30f53a2014-11-10 19:46:34 +01005932
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005933 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005934 }
5935
5936 ret = splice_to_pipe(pipe, &spd);
Eric Dumazet047fe362012-06-12 15:24:40 +02005937 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005938
Steven Rostedt2cadf912008-12-01 22:20:19 -05005939 return ret;
5940}
5941
5942static const struct file_operations tracing_buffers_fops = {
5943 .open = tracing_buffers_open,
5944 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005945 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005946 .release = tracing_buffers_release,
5947 .splice_read = tracing_buffers_splice_read,
5948 .llseek = no_llseek,
5949};
5950
Steven Rostedtc8d77182009-04-29 18:03:45 -04005951static ssize_t
5952tracing_stats_read(struct file *filp, char __user *ubuf,
5953 size_t count, loff_t *ppos)
5954{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005955 struct inode *inode = file_inode(filp);
5956 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005957 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005958 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005959 struct trace_seq *s;
5960 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005961 unsigned long long t;
5962 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005963
Li Zefane4f2d102009-06-15 10:57:28 +08005964 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005965 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005966 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005967
5968 trace_seq_init(s);
5969
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005970 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005971 trace_seq_printf(s, "entries: %ld\n", cnt);
5972
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005973 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005974 trace_seq_printf(s, "overrun: %ld\n", cnt);
5975
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005976 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005977 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5978
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005979 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005980 trace_seq_printf(s, "bytes: %ld\n", cnt);
5981
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005982 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005983 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005984 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005985 usec_rem = do_div(t, USEC_PER_SEC);
5986 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5987 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005988
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005989 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005990 usec_rem = do_div(t, USEC_PER_SEC);
5991 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5992 } else {
5993 /* counter or tsc mode for trace_clock */
5994 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005995 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005996
5997 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005998 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005999 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006000
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006001 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006002 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6003
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006004 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006005 trace_seq_printf(s, "read events: %ld\n", cnt);
6006
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006007 count = simple_read_from_buffer(ubuf, count, ppos,
6008 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006009
6010 kfree(s);
6011
6012 return count;
6013}
6014
6015static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006016 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006017 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006018 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006019 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006020};
6021
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006022#ifdef CONFIG_DYNAMIC_FTRACE
6023
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006024int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006025{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006026 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006027}
6028
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006029static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006030tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006031 size_t cnt, loff_t *ppos)
6032{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006033 static char ftrace_dyn_info_buffer[1024];
6034 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006035 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006036 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006037 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006038 int r;
6039
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006040 mutex_lock(&dyn_info_mutex);
6041 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006042
Steven Rostedta26a2a22008-10-31 00:03:22 -04006043 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006044 buf[r++] = '\n';
6045
6046 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6047
6048 mutex_unlock(&dyn_info_mutex);
6049
6050 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006051}
6052
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006053static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006054 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006055 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006056 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006057};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006058#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006059
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006060#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6061static void
6062ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006063{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006064 tracing_snapshot();
6065}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006066
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006067static void
6068ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6069{
6070 unsigned long *count = (long *)data;
6071
6072 if (!*count)
6073 return;
6074
6075 if (*count != -1)
6076 (*count)--;
6077
6078 tracing_snapshot();
6079}
6080
6081static int
6082ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6083 struct ftrace_probe_ops *ops, void *data)
6084{
6085 long count = (long)data;
6086
6087 seq_printf(m, "%ps:", (void *)ip);
6088
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006089 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006090
6091 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006092 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006093 else
6094 seq_printf(m, ":count=%ld\n", count);
6095
6096 return 0;
6097}
6098
6099static struct ftrace_probe_ops snapshot_probe_ops = {
6100 .func = ftrace_snapshot,
6101 .print = ftrace_snapshot_print,
6102};
6103
6104static struct ftrace_probe_ops snapshot_count_probe_ops = {
6105 .func = ftrace_count_snapshot,
6106 .print = ftrace_snapshot_print,
6107};
6108
6109static int
6110ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6111 char *glob, char *cmd, char *param, int enable)
6112{
6113 struct ftrace_probe_ops *ops;
6114 void *count = (void *)-1;
6115 char *number;
6116 int ret;
6117
6118 /* hash funcs only work with set_ftrace_filter */
6119 if (!enable)
6120 return -EINVAL;
6121
6122 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6123
6124 if (glob[0] == '!') {
6125 unregister_ftrace_function_probe_func(glob+1, ops);
6126 return 0;
6127 }
6128
6129 if (!param)
6130 goto out_reg;
6131
6132 number = strsep(&param, ":");
6133
6134 if (!strlen(number))
6135 goto out_reg;
6136
6137 /*
6138 * We use the callback data field (which is a pointer)
6139 * as our counter.
6140 */
6141 ret = kstrtoul(number, 0, (unsigned long *)&count);
6142 if (ret)
6143 return ret;
6144
6145 out_reg:
6146 ret = register_ftrace_function_probe(glob, ops, count);
6147
6148 if (ret >= 0)
6149 alloc_snapshot(&global_trace);
6150
6151 return ret < 0 ? ret : 0;
6152}
6153
6154static struct ftrace_func_command ftrace_snapshot_cmd = {
6155 .name = "snapshot",
6156 .func = ftrace_trace_snapshot_callback,
6157};
6158
Tom Zanussi38de93a2013-10-24 08:34:18 -05006159static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006160{
6161 return register_ftrace_command(&ftrace_snapshot_cmd);
6162}
6163#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006164static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006165#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006166
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006167static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006168{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006169 if (WARN_ON(!tr->dir))
6170 return ERR_PTR(-ENODEV);
6171
6172 /* Top directory uses NULL as the parent */
6173 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6174 return NULL;
6175
6176 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006177 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006178}
6179
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006180static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6181{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006182 struct dentry *d_tracer;
6183
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006184 if (tr->percpu_dir)
6185 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006186
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006187 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006188 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006189 return NULL;
6190
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006191 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006192
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006193 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006194 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006195
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006196 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006197}
6198
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006199static struct dentry *
6200trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6201 void *data, long cpu, const struct file_operations *fops)
6202{
6203 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6204
6205 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006206 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006207 return ret;
6208}
6209
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006210static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006211tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006212{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006213 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006214 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006215 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006216
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006217 if (!d_percpu)
6218 return;
6219
Steven Rostedtdd49a382010-10-20 21:51:26 -04006220 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006221 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006222 if (!d_cpu) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006223 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006224 return;
6225 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006226
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006227 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006228 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006229 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006230
6231 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006232 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006233 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006234
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006235 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006236 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006237
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006238 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006239 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006240
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006241 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006242 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006243
6244#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006245 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006246 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006247
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006248 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006249 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006250#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006251}
6252
Steven Rostedt60a11772008-05-12 21:20:44 +02006253#ifdef CONFIG_FTRACE_SELFTEST
6254/* Let selftest have access to static functions in this file */
6255#include "trace_selftest.c"
6256#endif
6257
Steven Rostedt577b7852009-02-26 23:43:05 -05006258static ssize_t
6259trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6260 loff_t *ppos)
6261{
6262 struct trace_option_dentry *topt = filp->private_data;
6263 char *buf;
6264
6265 if (topt->flags->val & topt->opt->bit)
6266 buf = "1\n";
6267 else
6268 buf = "0\n";
6269
6270 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6271}
6272
6273static ssize_t
6274trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6275 loff_t *ppos)
6276{
6277 struct trace_option_dentry *topt = filp->private_data;
6278 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006279 int ret;
6280
Peter Huewe22fe9b52011-06-07 21:58:27 +02006281 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6282 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006283 return ret;
6284
Li Zefan8d18eaa2009-12-08 11:17:06 +08006285 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006286 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006287
6288 if (!!(topt->flags->val & topt->opt->bit) != val) {
6289 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006290 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006291 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006292 mutex_unlock(&trace_types_lock);
6293 if (ret)
6294 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006295 }
6296
6297 *ppos += cnt;
6298
6299 return cnt;
6300}
6301
6302
6303static const struct file_operations trace_options_fops = {
6304 .open = tracing_open_generic,
6305 .read = trace_options_read,
6306 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006307 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006308};
6309
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006310/*
6311 * In order to pass in both the trace_array descriptor as well as the index
6312 * to the flag that the trace option file represents, the trace_array
6313 * has a character array of trace_flags_index[], which holds the index
6314 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6315 * The address of this character array is passed to the flag option file
6316 * read/write callbacks.
6317 *
6318 * In order to extract both the index and the trace_array descriptor,
6319 * get_tr_index() uses the following algorithm.
6320 *
6321 * idx = *ptr;
6322 *
6323 * As the pointer itself contains the address of the index (remember
6324 * index[1] == 1).
6325 *
6326 * Then to get the trace_array descriptor, by subtracting that index
6327 * from the ptr, we get to the start of the index itself.
6328 *
6329 * ptr - idx == &index[0]
6330 *
6331 * Then a simple container_of() from that pointer gets us to the
6332 * trace_array descriptor.
6333 */
6334static void get_tr_index(void *data, struct trace_array **ptr,
6335 unsigned int *pindex)
6336{
6337 *pindex = *(unsigned char *)data;
6338
6339 *ptr = container_of(data - *pindex, struct trace_array,
6340 trace_flags_index);
6341}
6342
Steven Rostedta8259072009-02-26 22:19:12 -05006343static ssize_t
6344trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6345 loff_t *ppos)
6346{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006347 void *tr_index = filp->private_data;
6348 struct trace_array *tr;
6349 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006350 char *buf;
6351
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006352 get_tr_index(tr_index, &tr, &index);
6353
6354 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05006355 buf = "1\n";
6356 else
6357 buf = "0\n";
6358
6359 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6360}
6361
6362static ssize_t
6363trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6364 loff_t *ppos)
6365{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006366 void *tr_index = filp->private_data;
6367 struct trace_array *tr;
6368 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006369 unsigned long val;
6370 int ret;
6371
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006372 get_tr_index(tr_index, &tr, &index);
6373
Peter Huewe22fe9b52011-06-07 21:58:27 +02006374 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6375 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006376 return ret;
6377
Zhaoleif2d84b62009-08-07 18:55:48 +08006378 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006379 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006380
6381 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006382 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006383 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006384
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006385 if (ret < 0)
6386 return ret;
6387
Steven Rostedta8259072009-02-26 22:19:12 -05006388 *ppos += cnt;
6389
6390 return cnt;
6391}
6392
Steven Rostedta8259072009-02-26 22:19:12 -05006393static const struct file_operations trace_options_core_fops = {
6394 .open = tracing_open_generic,
6395 .read = trace_options_core_read,
6396 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006397 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006398};
6399
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006400struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006401 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006402 struct dentry *parent,
6403 void *data,
6404 const struct file_operations *fops)
6405{
6406 struct dentry *ret;
6407
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006408 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006409 if (!ret)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006410 pr_warning("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006411
6412 return ret;
6413}
6414
6415
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006416static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006417{
6418 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006419
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006420 if (tr->options)
6421 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006422
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006423 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006424 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006425 return NULL;
6426
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006427 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006428 if (!tr->options) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006429 pr_warning("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006430 return NULL;
6431 }
6432
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006433 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006434}
6435
Steven Rostedt577b7852009-02-26 23:43:05 -05006436static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006437create_trace_option_file(struct trace_array *tr,
6438 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006439 struct tracer_flags *flags,
6440 struct tracer_opt *opt)
6441{
6442 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006443
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006444 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006445 if (!t_options)
6446 return;
6447
6448 topt->flags = flags;
6449 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006450 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006451
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006452 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006453 &trace_options_fops);
6454
Steven Rostedt577b7852009-02-26 23:43:05 -05006455}
6456
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006457static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006458create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006459{
6460 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006461 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05006462 struct tracer_flags *flags;
6463 struct tracer_opt *opts;
6464 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006465 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05006466
6467 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006468 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05006469
6470 flags = tracer->flags;
6471
6472 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006473 return;
6474
6475 /*
6476 * If this is an instance, only create flags for tracers
6477 * the instance may have.
6478 */
6479 if (!trace_ok_for_array(tracer, tr))
6480 return;
6481
6482 for (i = 0; i < tr->nr_topts; i++) {
6483 /*
6484 * Check if these flags have already been added.
6485 * Some tracers share flags.
6486 */
6487 if (tr->topts[i].tracer->flags == tracer->flags)
6488 return;
6489 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006490
6491 opts = flags->opts;
6492
6493 for (cnt = 0; opts[cnt].name; cnt++)
6494 ;
6495
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006496 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006497 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006498 return;
6499
6500 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6501 GFP_KERNEL);
6502 if (!tr_topts) {
6503 kfree(topts);
6504 return;
6505 }
6506
6507 tr->topts = tr_topts;
6508 tr->topts[tr->nr_topts].tracer = tracer;
6509 tr->topts[tr->nr_topts].topts = topts;
6510 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05006511
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006512 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006513 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006514 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006515 WARN_ONCE(topts[cnt].entry == NULL,
6516 "Failed to create trace option: %s",
6517 opts[cnt].name);
6518 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006519}
6520
Steven Rostedta8259072009-02-26 22:19:12 -05006521static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006522create_trace_option_core_file(struct trace_array *tr,
6523 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006524{
6525 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006526
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006527 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006528 if (!t_options)
6529 return NULL;
6530
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006531 return trace_create_file(option, 0644, t_options,
6532 (void *)&tr->trace_flags_index[index],
6533 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006534}
6535
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006536static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006537{
6538 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006539 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006540 int i;
6541
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006542 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006543 if (!t_options)
6544 return;
6545
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006546 for (i = 0; trace_options[i]; i++) {
6547 if (top_level ||
6548 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6549 create_trace_option_core_file(tr, trace_options[i], i);
6550 }
Steven Rostedta8259072009-02-26 22:19:12 -05006551}
6552
Steven Rostedt499e5472012-02-22 15:50:28 -05006553static ssize_t
6554rb_simple_read(struct file *filp, char __user *ubuf,
6555 size_t cnt, loff_t *ppos)
6556{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006557 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006558 char buf[64];
6559 int r;
6560
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006561 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006562 r = sprintf(buf, "%d\n", r);
6563
6564 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6565}
6566
6567static ssize_t
6568rb_simple_write(struct file *filp, const char __user *ubuf,
6569 size_t cnt, loff_t *ppos)
6570{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006571 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006572 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006573 unsigned long val;
6574 int ret;
6575
6576 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6577 if (ret)
6578 return ret;
6579
6580 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006581 mutex_lock(&trace_types_lock);
6582 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006583 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006584 if (tr->current_trace->start)
6585 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006586 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006587 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006588 if (tr->current_trace->stop)
6589 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006590 }
6591 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006592 }
6593
6594 (*ppos)++;
6595
6596 return cnt;
6597}
6598
6599static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006600 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006601 .read = rb_simple_read,
6602 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006603 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006604 .llseek = default_llseek,
6605};
6606
Steven Rostedt277ba042012-08-03 16:10:49 -04006607struct dentry *trace_instance_dir;
6608
6609static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006610init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04006611
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006612static int
6613allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006614{
6615 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006616
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006617 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006618
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006619 buf->tr = tr;
6620
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006621 buf->buffer = ring_buffer_alloc(size, rb_flags);
6622 if (!buf->buffer)
6623 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006624
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006625 buf->data = alloc_percpu(struct trace_array_cpu);
6626 if (!buf->data) {
6627 ring_buffer_free(buf->buffer);
6628 return -ENOMEM;
6629 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006630
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006631 /* Allocate the first page for all buffers */
6632 set_buffer_entries(&tr->trace_buffer,
6633 ring_buffer_size(tr->trace_buffer.buffer, 0));
6634
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006635 return 0;
6636}
6637
6638static int allocate_trace_buffers(struct trace_array *tr, int size)
6639{
6640 int ret;
6641
6642 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6643 if (ret)
6644 return ret;
6645
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006646#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006647 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6648 allocate_snapshot ? size : 1);
6649 if (WARN_ON(ret)) {
6650 ring_buffer_free(tr->trace_buffer.buffer);
6651 free_percpu(tr->trace_buffer.data);
6652 return -ENOMEM;
6653 }
6654 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006655
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006656 /*
6657 * Only the top level trace array gets its snapshot allocated
6658 * from the kernel command line.
6659 */
6660 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006661#endif
6662 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006663}
6664
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006665static void free_trace_buffer(struct trace_buffer *buf)
6666{
6667 if (buf->buffer) {
6668 ring_buffer_free(buf->buffer);
6669 buf->buffer = NULL;
6670 free_percpu(buf->data);
6671 buf->data = NULL;
6672 }
6673}
6674
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006675static void free_trace_buffers(struct trace_array *tr)
6676{
6677 if (!tr)
6678 return;
6679
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006680 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006681
6682#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006683 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006684#endif
6685}
6686
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006687static void init_trace_flags_index(struct trace_array *tr)
6688{
6689 int i;
6690
6691 /* Used by the trace options files */
6692 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6693 tr->trace_flags_index[i] = i;
6694}
6695
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006696static void __update_tracer_options(struct trace_array *tr)
6697{
6698 struct tracer *t;
6699
6700 for (t = trace_types; t; t = t->next)
6701 add_tracer_options(tr, t);
6702}
6703
6704static void update_tracer_options(struct trace_array *tr)
6705{
6706 mutex_lock(&trace_types_lock);
6707 __update_tracer_options(tr);
6708 mutex_unlock(&trace_types_lock);
6709}
6710
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006711static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006712{
Steven Rostedt277ba042012-08-03 16:10:49 -04006713 struct trace_array *tr;
6714 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006715
6716 mutex_lock(&trace_types_lock);
6717
6718 ret = -EEXIST;
6719 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6720 if (tr->name && strcmp(tr->name, name) == 0)
6721 goto out_unlock;
6722 }
6723
6724 ret = -ENOMEM;
6725 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6726 if (!tr)
6727 goto out_unlock;
6728
6729 tr->name = kstrdup(name, GFP_KERNEL);
6730 if (!tr->name)
6731 goto out_free_tr;
6732
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006733 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6734 goto out_free_tr;
6735
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006736 tr->trace_flags = global_trace.trace_flags;
6737
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006738 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6739
Steven Rostedt277ba042012-08-03 16:10:49 -04006740 raw_spin_lock_init(&tr->start_lock);
6741
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006742 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6743
Steven Rostedt277ba042012-08-03 16:10:49 -04006744 tr->current_trace = &nop_trace;
6745
6746 INIT_LIST_HEAD(&tr->systems);
6747 INIT_LIST_HEAD(&tr->events);
6748
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006749 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006750 goto out_free_tr;
6751
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006752 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006753 if (!tr->dir)
6754 goto out_free_tr;
6755
6756 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006757 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006758 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006759 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006760 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006761
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006762 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006763 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006764 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04006765
6766 list_add(&tr->list, &ftrace_trace_arrays);
6767
6768 mutex_unlock(&trace_types_lock);
6769
6770 return 0;
6771
6772 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006773 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006774 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006775 kfree(tr->name);
6776 kfree(tr);
6777
6778 out_unlock:
6779 mutex_unlock(&trace_types_lock);
6780
6781 return ret;
6782
6783}
6784
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006785static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006786{
6787 struct trace_array *tr;
6788 int found = 0;
6789 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006790 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006791
6792 mutex_lock(&trace_types_lock);
6793
6794 ret = -ENODEV;
6795 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6796 if (tr->name && strcmp(tr->name, name) == 0) {
6797 found = 1;
6798 break;
6799 }
6800 }
6801 if (!found)
6802 goto out_unlock;
6803
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006804 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006805 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006806 goto out_unlock;
6807
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006808 list_del(&tr->list);
6809
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006810 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006811 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006812 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08006813 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006814 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006815
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006816 for (i = 0; i < tr->nr_topts; i++) {
6817 kfree(tr->topts[i].topts);
6818 }
6819 kfree(tr->topts);
6820
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006821 kfree(tr->name);
6822 kfree(tr);
6823
6824 ret = 0;
6825
6826 out_unlock:
6827 mutex_unlock(&trace_types_lock);
6828
6829 return ret;
6830}
6831
Steven Rostedt277ba042012-08-03 16:10:49 -04006832static __init void create_trace_instances(struct dentry *d_tracer)
6833{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006834 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6835 instance_mkdir,
6836 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006837 if (WARN_ON(!trace_instance_dir))
6838 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04006839}
6840
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006841static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006842init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006843{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006844 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006845
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006846 trace_create_file("available_tracers", 0444, d_tracer,
6847 tr, &show_traces_fops);
6848
6849 trace_create_file("current_tracer", 0644, d_tracer,
6850 tr, &set_tracer_fops);
6851
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006852 trace_create_file("tracing_cpumask", 0644, d_tracer,
6853 tr, &tracing_cpumask_fops);
6854
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006855 trace_create_file("trace_options", 0644, d_tracer,
6856 tr, &tracing_iter_fops);
6857
6858 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006859 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006860
6861 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006862 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006863
6864 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006865 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006866
6867 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6868 tr, &tracing_total_entries_fops);
6869
Wang YanQing238ae932013-05-26 16:52:01 +08006870 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006871 tr, &tracing_free_buffer_fops);
6872
6873 trace_create_file("trace_marker", 0220, d_tracer,
6874 tr, &tracing_mark_fops);
6875
Jamie Gennis6019e592012-11-21 15:04:25 -08006876 trace_create_file("saved_tgids", 0444, d_tracer,
6877 tr, &tracing_saved_tgids_fops);
6878
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006879 trace_create_file("trace_clock", 0644, d_tracer, tr,
6880 &trace_clock_fops);
6881
6882 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006883 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006884
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006885 create_trace_options_dir(tr);
6886
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006887#ifdef CONFIG_TRACER_MAX_TRACE
6888 trace_create_file("tracing_max_latency", 0644, d_tracer,
6889 &tr->max_latency, &tracing_max_lat_fops);
6890#endif
6891
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006892 if (ftrace_create_function_files(tr, d_tracer))
6893 WARN(1, "Could not allocate function filter files");
6894
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006895#ifdef CONFIG_TRACER_SNAPSHOT
6896 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006897 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006898#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006899
6900 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006901 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006902
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006903}
6904
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006905static struct vfsmount *trace_automount(void *ingore)
6906{
6907 struct vfsmount *mnt;
6908 struct file_system_type *type;
6909
6910 /*
6911 * To maintain backward compatibility for tools that mount
6912 * debugfs to get to the tracing facility, tracefs is automatically
6913 * mounted to the debugfs/tracing directory.
6914 */
6915 type = get_fs_type("tracefs");
6916 if (!type)
6917 return NULL;
6918 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6919 put_filesystem(type);
6920 if (IS_ERR(mnt))
6921 return NULL;
6922 mntget(mnt);
6923
6924 return mnt;
6925}
6926
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006927/**
6928 * tracing_init_dentry - initialize top level trace array
6929 *
6930 * This is called when creating files or directories in the tracing
6931 * directory. It is called via fs_initcall() by any of the boot up code
6932 * and expects to return the dentry of the top level tracing directory.
6933 */
6934struct dentry *tracing_init_dentry(void)
6935{
6936 struct trace_array *tr = &global_trace;
6937
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006938 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006939 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006940 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006941
Jiaxing Wang8b129192015-11-06 16:04:16 +08006942 if (WARN_ON(!tracefs_initialized()) ||
6943 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6944 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006945 return ERR_PTR(-ENODEV);
6946
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006947 /*
6948 * As there may still be users that expect the tracing
6949 * files to exist in debugfs/tracing, we must automount
6950 * the tracefs file system there, so older tools still
6951 * work with the newer kerenl.
6952 */
6953 tr->dir = debugfs_create_automount("tracing", NULL,
6954 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006955 if (!tr->dir) {
6956 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6957 return ERR_PTR(-ENOMEM);
6958 }
6959
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006960 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006961}
6962
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006963extern struct trace_enum_map *__start_ftrace_enum_maps[];
6964extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6965
6966static void __init trace_enum_init(void)
6967{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006968 int len;
6969
6970 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006971 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006972}
6973
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006974#ifdef CONFIG_MODULES
6975static void trace_module_add_enums(struct module *mod)
6976{
6977 if (!mod->num_trace_enums)
6978 return;
6979
6980 /*
6981 * Modules with bad taint do not have events created, do
6982 * not bother with enums either.
6983 */
6984 if (trace_module_has_bad_taint(mod))
6985 return;
6986
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006987 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006988}
6989
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006990#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6991static void trace_module_remove_enums(struct module *mod)
6992{
6993 union trace_enum_map_item *map;
6994 union trace_enum_map_item **last = &trace_enum_maps;
6995
6996 if (!mod->num_trace_enums)
6997 return;
6998
6999 mutex_lock(&trace_enum_mutex);
7000
7001 map = trace_enum_maps;
7002
7003 while (map) {
7004 if (map->head.mod == mod)
7005 break;
7006 map = trace_enum_jmp_to_tail(map);
7007 last = &map->tail.next;
7008 map = map->tail.next;
7009 }
7010 if (!map)
7011 goto out;
7012
7013 *last = trace_enum_jmp_to_tail(map)->tail.next;
7014 kfree(map);
7015 out:
7016 mutex_unlock(&trace_enum_mutex);
7017}
7018#else
7019static inline void trace_module_remove_enums(struct module *mod) { }
7020#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7021
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007022static int trace_module_notify(struct notifier_block *self,
7023 unsigned long val, void *data)
7024{
7025 struct module *mod = data;
7026
7027 switch (val) {
7028 case MODULE_STATE_COMING:
7029 trace_module_add_enums(mod);
7030 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007031 case MODULE_STATE_GOING:
7032 trace_module_remove_enums(mod);
7033 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007034 }
7035
7036 return 0;
7037}
7038
7039static struct notifier_block trace_module_nb = {
7040 .notifier_call = trace_module_notify,
7041 .priority = 0,
7042};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007043#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007044
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007045static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007046{
7047 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007048
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007049 trace_access_lock_init();
7050
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007051 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007052 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007053 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007054
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007055 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007056
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007057 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007058 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007059
Li Zefan339ae5d2009-04-17 10:34:30 +08007060 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007061 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007062
Avadh Patel69abe6a2009-04-10 16:04:48 -04007063 trace_create_file("saved_cmdlines", 0444, d_tracer,
7064 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007065
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007066 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7067 NULL, &tracing_saved_cmdlines_size_fops);
7068
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007069 trace_enum_init();
7070
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007071 trace_create_enum_file(d_tracer);
7072
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007073#ifdef CONFIG_MODULES
7074 register_module_notifier(&trace_module_nb);
7075#endif
7076
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007077#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007078 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7079 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007080#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007081
Steven Rostedt277ba042012-08-03 16:10:49 -04007082 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007083
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007084 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007085
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007086 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007087}
7088
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007089static int trace_panic_handler(struct notifier_block *this,
7090 unsigned long event, void *unused)
7091{
Steven Rostedt944ac422008-10-23 19:26:08 -04007092 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007093 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007094 return NOTIFY_OK;
7095}
7096
7097static struct notifier_block trace_panic_notifier = {
7098 .notifier_call = trace_panic_handler,
7099 .next = NULL,
7100 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7101};
7102
7103static int trace_die_handler(struct notifier_block *self,
7104 unsigned long val,
7105 void *data)
7106{
7107 switch (val) {
7108 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007109 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007110 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007111 break;
7112 default:
7113 break;
7114 }
7115 return NOTIFY_OK;
7116}
7117
7118static struct notifier_block trace_die_notifier = {
7119 .notifier_call = trace_die_handler,
7120 .priority = 200
7121};
7122
7123/*
7124 * printk is set to max of 1024, we really don't need it that big.
7125 * Nothing should be printing 1000 characters anyway.
7126 */
7127#define TRACE_MAX_PRINT 1000
7128
7129/*
7130 * Define here KERN_TRACE so that we have one place to modify
7131 * it if we decide to change what log level the ftrace dump
7132 * should be at.
7133 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007134#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007135
Jason Wessel955b61e2010-08-05 09:22:23 -05007136void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007137trace_printk_seq(struct trace_seq *s)
7138{
7139 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007140 if (s->seq.len >= TRACE_MAX_PRINT)
7141 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007142
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007143 /*
7144 * More paranoid code. Although the buffer size is set to
7145 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7146 * an extra layer of protection.
7147 */
7148 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7149 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007150
7151 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007152 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007153
7154 printk(KERN_TRACE "%s", s->buffer);
7155
Steven Rostedtf9520752009-03-02 14:04:40 -05007156 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007157}
7158
Jason Wessel955b61e2010-08-05 09:22:23 -05007159void trace_init_global_iter(struct trace_iterator *iter)
7160{
7161 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007162 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007163 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007164 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007165
7166 if (iter->trace && iter->trace->open)
7167 iter->trace->open(iter);
7168
7169 /* Annotate start of buffers if we had overruns */
7170 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7171 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7172
7173 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7174 if (trace_clocks[iter->tr->clock_id].in_ns)
7175 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007176}
7177
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007178void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007179{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007180 /* use static because iter can be a bit big for the stack */
7181 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007182 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007183 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007184 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007185 unsigned long flags;
7186 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007187
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007188 /* Only allow one dump user at a time. */
7189 if (atomic_inc_return(&dump_running) != 1) {
7190 atomic_dec(&dump_running);
7191 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007192 }
7193
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007194 /*
7195 * Always turn off tracing when we dump.
7196 * We don't need to show trace output of what happens
7197 * between multiple crashes.
7198 *
7199 * If the user does a sysrq-z, then they can re-enable
7200 * tracing with echo 1 > tracing_on.
7201 */
7202 tracing_off();
7203
7204 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007205
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007206 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007207 trace_init_global_iter(&iter);
7208
Steven Rostedtd7690412008-10-01 00:29:53 -04007209 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307210 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007211 }
7212
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007213 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007214
Török Edwinb54d3de2008-11-22 13:28:48 +02007215 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007216 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007217
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007218 switch (oops_dump_mode) {
7219 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007220 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007221 break;
7222 case DUMP_ORIG:
7223 iter.cpu_file = raw_smp_processor_id();
7224 break;
7225 case DUMP_NONE:
7226 goto out_enable;
7227 default:
7228 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007229 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007230 }
7231
7232 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007233
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007234 /* Did function tracer already get disabled? */
7235 if (ftrace_is_dead()) {
7236 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7237 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7238 }
7239
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007240 /*
7241 * We need to stop all tracing on all CPUS to read the
7242 * the next buffer. This is a bit expensive, but is
7243 * not done often. We fill all what we can read,
7244 * and then release the locks again.
7245 */
7246
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007247 while (!trace_empty(&iter)) {
7248
7249 if (!cnt)
7250 printk(KERN_TRACE "---------------------------------\n");
7251
7252 cnt++;
7253
7254 /* reset all but tr, trace, and overruns */
7255 memset(&iter.seq, 0,
7256 sizeof(struct trace_iterator) -
7257 offsetof(struct trace_iterator, seq));
7258 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7259 iter.pos = -1;
7260
Jason Wessel955b61e2010-08-05 09:22:23 -05007261 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007262 int ret;
7263
7264 ret = print_trace_line(&iter);
7265 if (ret != TRACE_TYPE_NO_CONSUME)
7266 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007267 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007268 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007269
7270 trace_printk_seq(&iter.seq);
7271 }
7272
7273 if (!cnt)
7274 printk(KERN_TRACE " (ftrace buffer empty)\n");
7275 else
7276 printk(KERN_TRACE "---------------------------------\n");
7277
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007278 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007279 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007280
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007281 for_each_tracing_cpu(cpu) {
7282 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007283 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007284 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007285 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007286}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007287EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007288
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007289__init static int tracer_alloc_buffers(void)
7290{
Steven Rostedt73c51622009-03-11 13:42:01 -04007291 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307292 int ret = -ENOMEM;
7293
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007294 /*
7295 * Make sure we don't accidently add more trace options
7296 * than we have bits for.
7297 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007298 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007299
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307300 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7301 goto out;
7302
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007303 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307304 goto out_free_buffer_mask;
7305
Steven Rostedt07d777f2011-09-22 14:01:55 -04007306 /* Only allocate trace_printk buffers if a trace_printk exists */
7307 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007308 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007309 trace_printk_init_buffers();
7310
Steven Rostedt73c51622009-03-11 13:42:01 -04007311 /* To save memory, keep the ring buffer size to its minimum */
7312 if (ring_buffer_expanded)
7313 ring_buf_size = trace_buf_size;
7314 else
7315 ring_buf_size = 1;
7316
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307317 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007318 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007319
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007320 raw_spin_lock_init(&global_trace.start_lock);
7321
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007322 /* Used for event triggers */
7323 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7324 if (!temp_buffer)
7325 goto out_free_cpumask;
7326
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007327 if (trace_create_savedcmd() < 0)
7328 goto out_free_temp_buffer;
7329
Steven Rostedtab464282008-05-12 21:21:00 +02007330 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007331 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007332 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7333 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007334 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007335 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007336
Steven Rostedt499e5472012-02-22 15:50:28 -05007337 if (global_trace.buffer_disabled)
7338 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007339
Steven Rostedte1e232c2014-02-10 23:38:46 -05007340 if (trace_boot_clock) {
7341 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7342 if (ret < 0)
7343 pr_warning("Trace clock %s not defined, going back to default\n",
7344 trace_boot_clock);
7345 }
7346
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007347 /*
7348 * register_tracer() might reference current_trace, so it
7349 * needs to be set before we register anything. This is
7350 * just a bootstrap of current_trace anyway.
7351 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007352 global_trace.current_trace = &nop_trace;
7353
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007354 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7355
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007356 ftrace_init_global_array_ops(&global_trace);
7357
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007358 init_trace_flags_index(&global_trace);
7359
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007360 register_tracer(&nop_trace);
7361
Steven Rostedt60a11772008-05-12 21:20:44 +02007362 /* All seems OK, enable tracing */
7363 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007364
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007365 atomic_notifier_chain_register(&panic_notifier_list,
7366 &trace_panic_notifier);
7367
7368 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007369
Steven Rostedtae63b312012-05-03 23:09:03 -04007370 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7371
7372 INIT_LIST_HEAD(&global_trace.systems);
7373 INIT_LIST_HEAD(&global_trace.events);
7374 list_add(&global_trace.list, &ftrace_trace_arrays);
7375
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08007376 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007377
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007378 register_snapshot_cmd();
7379
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007380 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007381
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007382out_free_savedcmd:
7383 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007384out_free_temp_buffer:
7385 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307386out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007387 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307388out_free_buffer_mask:
7389 free_cpumask_var(tracing_buffer_mask);
7390out:
7391 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007392}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007393
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007394void __init trace_init(void)
7395{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007396 if (tracepoint_printk) {
7397 tracepoint_print_iter =
7398 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7399 if (WARN_ON(!tracepoint_print_iter))
7400 tracepoint_printk = 0;
7401 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007402 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007403 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007404}
7405
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007406__init static int clear_boot_tracer(void)
7407{
7408 /*
7409 * The default tracer at boot buffer is an init section.
7410 * This function is called in lateinit. If we did not
7411 * find the boot tracer, then clear it out, to prevent
7412 * later registration from accessing the buffer that is
7413 * about to be freed.
7414 */
7415 if (!default_bootup_tracer)
7416 return 0;
7417
7418 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7419 default_bootup_tracer);
7420 default_bootup_tracer = NULL;
7421
7422 return 0;
7423}
7424
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007425fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007426late_initcall(clear_boot_tracer);