blob: 587a383c413d5ef8038e79989bb801bf1c5c8542 [file] [log] [blame]
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001/*
2 * ring buffer based function tracer
3 *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally taken from the RT patch by:
8 * Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code from the latency_tracer, that is:
11 * Copyright (C) 2004-2006 Ingo Molnar
Nadia Yvette Chambers6d49e352012-12-06 10:39:54 +010012 * Copyright (C) 2004 Nadia Yvette Chambers
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020013 */
Steven Rostedt2cadf912008-12-01 22:20:19 -050014#include <linux/ring_buffer.h>
Sam Ravnborg273b2812009-10-18 00:52:28 +020015#include <generated/utsrelease.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050016#include <linux/stacktrace.h>
17#include <linux/writeback.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020018#include <linux/kallsyms.h>
19#include <linux/seq_file.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040020#include <linux/notifier.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050021#include <linux/irqflags.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020022#include <linux/debugfs.h>
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -050023#include <linux/tracefs.h>
Steven Rostedt4c11d7a2008-05-12 21:20:43 +020024#include <linux/pagemap.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020025#include <linux/hardirq.h>
26#include <linux/linkage.h>
27#include <linux/uaccess.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050028#include <linux/kprobes.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020029#include <linux/ftrace.h>
30#include <linux/module.h>
31#include <linux/percpu.h>
Steven Rostedt2cadf912008-12-01 22:20:19 -050032#include <linux/splice.h>
Steven Rostedt3f5a54e2008-07-30 22:36:46 -040033#include <linux/kdebug.h>
Frederic Weisbecker5f0c6c02009-03-27 14:22:10 +010034#include <linux/string.h>
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -050035#include <linux/mount.h>
Lai Jiangshan7e53bd42010-01-06 20:08:50 +080036#include <linux/rwsem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020038#include <linux/ctype.h>
39#include <linux/init.h>
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +020040#include <linux/poll.h>
Steven Rostedtb892e5c2012-03-01 22:06:48 -050041#include <linux/nmi.h>
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020042#include <linux/fs.h>
Clark Williams8bd75c72013-02-07 09:47:07 -060043#include <linux/sched/rt.h>
Ingo Molnar86387f72008-05-12 21:20:51 +020044
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020045#include "trace.h"
Steven Rostedtf0868d12008-12-23 23:24:12 -050046#include "trace_output.h"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +020047
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010048/*
Steven Rostedt73c51622009-03-11 13:42:01 -040049 * On boot up, the ring buffer is set to the minimum size, so that
50 * we do not waste memory on systems that are not using tracing.
51 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -050052bool ring_buffer_expanded;
Steven Rostedt73c51622009-03-11 13:42:01 -040053
54/*
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010055 * We need to change this state when a selftest is running.
Frederic Weisbeckerff325042008-12-04 23:47:35 +010056 * A selftest will lurk into the ring-buffer to count the
57 * entries inserted during the selftest although some concurrent
Ingo Molnar5e1607a2009-03-05 10:24:48 +010058 * insertions into the ring-buffer such as trace_printk could occurred
Frederic Weisbeckerff325042008-12-04 23:47:35 +010059 * at the same time, giving false positive or negative results.
60 */
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +010061static bool __read_mostly tracing_selftest_running;
Frederic Weisbeckerff325042008-12-04 23:47:35 +010062
Steven Rostedtb2821ae2009-02-02 21:38:32 -050063/*
64 * If a tracer is running, we do not want to run SELFTEST.
65 */
Li Zefan020e5f82009-07-01 10:47:05 +080066bool __read_mostly tracing_selftest_disabled;
Steven Rostedtb2821ae2009-02-02 21:38:32 -050067
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -050068/* Pipe tracepoints to printk */
69struct trace_iterator *tracepoint_print_iter;
70int tracepoint_printk;
71
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010072/* For tracers that don't implement custom flags */
73static struct tracer_opt dummy_tracer_opt[] = {
74 { }
75};
76
77static struct tracer_flags dummy_tracer_flags = {
78 .val = 0,
79 .opts = dummy_tracer_opt
80};
81
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -050082static int
83dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +010084{
85 return 0;
86}
Steven Rostedt0f048702008-11-05 16:05:44 -050087
88/*
Steven Rostedt7ffbd482012-10-11 12:14:25 -040089 * To prevent the comm cache from being overwritten when no
90 * tracing is active, only save the comm when a trace event
91 * occurred.
92 */
93static DEFINE_PER_CPU(bool, trace_cmdline_save);
94
95/*
Steven Rostedt0f048702008-11-05 16:05:44 -050096 * Kill all tracing for good (never come back).
97 * It is initialized to 1 but will turn to zero if the initialization
98 * of the tracer is successful. But that is the only place that sets
99 * this back to zero.
100 */
Hannes Eder4fd27352009-02-10 19:44:12 +0100101static int tracing_disabled = 1;
Steven Rostedt0f048702008-11-05 16:05:44 -0500102
Jason Wessel955b61e2010-08-05 09:22:23 -0500103cpumask_var_t __read_mostly tracing_buffer_mask;
Steven Rostedtab464282008-05-12 21:21:00 +0200104
Steven Rostedt944ac422008-10-23 19:26:08 -0400105/*
106 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 *
108 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
109 * is set, then ftrace_dump is called. This will output the contents
110 * of the ftrace buffers to the console. This is very useful for
111 * capturing traces that lead to crashes and outputing it to a
112 * serial console.
113 *
114 * It is default off, but you can enable it with either specifying
115 * "ftrace_dump_on_oops" in the kernel command line, or setting
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200116 * /proc/sys/kernel/ftrace_dump_on_oops
117 * Set 1 if you want to dump buffers of all CPUs
118 * Set 2 if you want to dump the buffer of the CPU that triggered oops
Steven Rostedt944ac422008-10-23 19:26:08 -0400119 */
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200120
121enum ftrace_dump_mode ftrace_dump_on_oops;
Steven Rostedt944ac422008-10-23 19:26:08 -0400122
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400123/* When set, tracing will stop when a WARN*() is hit */
124int __disable_trace_on_warning;
125
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -0400126#ifdef CONFIG_TRACE_ENUM_MAP_FILE
127/* Map of enums to their values, for "enum_map" file */
128struct trace_enum_map_head {
129 struct module *mod;
130 unsigned long length;
131};
132
133union trace_enum_map_item;
134
135struct trace_enum_map_tail {
136 /*
137 * "end" is first and points to NULL as it must be different
138 * than "mod" or "enum_string"
139 */
140 union trace_enum_map_item *next;
141 const char *end; /* points to NULL */
142};
143
144static DEFINE_MUTEX(trace_enum_mutex);
145
146/*
147 * The trace_enum_maps are saved in an array with two extra elements,
148 * one at the beginning, and one at the end. The beginning item contains
149 * the count of the saved maps (head.length), and the module they
150 * belong to if not built in (head.mod). The ending item contains a
151 * pointer to the next array of saved enum_map items.
152 */
153union trace_enum_map_item {
154 struct trace_enum_map map;
155 struct trace_enum_map_head head;
156 struct trace_enum_map_tail tail;
157};
158
159static union trace_enum_map_item *trace_enum_maps;
160#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
161
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -0500162static int tracing_set_tracer(struct trace_array *tr, const char *buf);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500163
Li Zefanee6c2c12009-09-18 14:06:47 +0800164#define MAX_TRACER_SIZE 100
165static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500166static char *default_bootup_tracer;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100167
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500168static bool allocate_snapshot;
169
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200170static int __init set_cmdline_ftrace(char *str)
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100171{
Chen Gang67012ab2013-04-08 12:06:44 +0800172 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
Steven Rostedtb2821ae2009-02-02 21:38:32 -0500173 default_bootup_tracer = bootup_tracer_buf;
Steven Rostedt73c51622009-03-11 13:42:01 -0400174 /* We are using ftrace early, expand it */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500175 ring_buffer_expanded = true;
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100176 return 1;
177}
Frederic Weisbecker1beee962009-10-14 20:50:32 +0200178__setup("ftrace=", set_cmdline_ftrace);
Peter Zijlstrad9e54072008-11-01 19:57:37 +0100179
Steven Rostedt944ac422008-10-23 19:26:08 -0400180static int __init set_ftrace_dump_on_oops(char *str)
181{
Frederic Weisbeckercecbca92010-04-18 19:08:41 +0200182 if (*str++ != '=' || !*str) {
183 ftrace_dump_on_oops = DUMP_ALL;
184 return 1;
185 }
186
187 if (!strcmp("orig_cpu", str)) {
188 ftrace_dump_on_oops = DUMP_ORIG;
189 return 1;
190 }
191
192 return 0;
Steven Rostedt944ac422008-10-23 19:26:08 -0400193}
194__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
Steven Rostedt60a11772008-05-12 21:20:44 +0200195
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400196static int __init stop_trace_on_warning(char *str)
197{
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200198 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
199 __disable_trace_on_warning = 1;
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400200 return 1;
201}
Luis Claudio R. Goncalves933ff9f2014-11-12 21:14:00 -0200202__setup("traceoff_on_warning", stop_trace_on_warning);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400203
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400204static int __init boot_alloc_snapshot(char *str)
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500205{
206 allocate_snapshot = true;
207 /* We also need the main ring buffer expanded */
208 ring_buffer_expanded = true;
209 return 1;
210}
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400211__setup("alloc_snapshot", boot_alloc_snapshot);
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -0500212
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400213
214static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400215
216static int __init set_trace_boot_options(char *str)
217{
Chen Gang67012ab2013-04-08 12:06:44 +0800218 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
Steven Rostedt7bcfaf52012-11-01 22:56:07 -0400219 return 0;
220}
221__setup("trace_options=", set_trace_boot_options);
222
Steven Rostedte1e232c2014-02-10 23:38:46 -0500223static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
224static char *trace_boot_clock __initdata;
225
226static int __init set_trace_boot_clock(char *str)
227{
228 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
229 trace_boot_clock = trace_boot_clock_buf;
230 return 0;
231}
232__setup("trace_clock=", set_trace_boot_clock);
233
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -0500234static int __init set_tracepoint_printk(char *str)
235{
236 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
237 tracepoint_printk = 1;
238 return 1;
239}
240__setup("tp_printk", set_tracepoint_printk);
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400241
Lai Jiangshancf8e3472009-03-30 13:48:00 +0800242unsigned long long ns2usecs(cycle_t nsec)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200243{
244 nsec += 500;
245 do_div(nsec, 1000);
246 return nsec;
247}
248
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400249/* trace_flags holds trace_options default values */
250#define TRACE_DEFAULT_FLAGS \
251 (FUNCTION_DEFAULT_FLAGS | \
252 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
253 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
254 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
255 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
256
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -0400257/* trace_options that are only supported by global_trace */
258#define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
259 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
260
261
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200262/*
263 * The global_trace is the descriptor that holds the tracing
264 * buffers for the live tracing. For each CPU, it contains
265 * a link list of pages that will store trace entries. The
266 * page descriptor of the pages in the memory is used to hold
267 * the link list by linking the lru item in the page descriptor
268 * to each of the pages in the buffer per CPU.
269 *
270 * For each active CPU there is a data field that holds the
271 * pages for the buffer for that CPU. Each CPU has the same number
272 * of pages allocated for its buffer.
273 */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400274static struct trace_array global_trace = {
275 .trace_flags = TRACE_DEFAULT_FLAGS,
276};
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200277
Steven Rostedtae63b312012-05-03 23:09:03 -0400278LIST_HEAD(ftrace_trace_arrays);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200279
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -0400280int trace_array_get(struct trace_array *this_tr)
281{
282 struct trace_array *tr;
283 int ret = -ENODEV;
284
285 mutex_lock(&trace_types_lock);
286 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
287 if (tr == this_tr) {
288 tr->ref++;
289 ret = 0;
290 break;
291 }
292 }
293 mutex_unlock(&trace_types_lock);
294
295 return ret;
296}
297
298static void __trace_array_put(struct trace_array *this_tr)
299{
300 WARN_ON(!this_tr->ref);
301 this_tr->ref--;
302}
303
304void trace_array_put(struct trace_array *this_tr)
305{
306 mutex_lock(&trace_types_lock);
307 __trace_array_put(this_tr);
308 mutex_unlock(&trace_types_lock);
309}
310
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -0400311int filter_check_discard(struct trace_event_file *file, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500312 struct ring_buffer *buffer,
313 struct ring_buffer_event *event)
Tom Zanussieb02ce02009-04-08 03:15:54 -0500314{
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -0400315 if (unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
Tom Zanussif306cc82013-10-24 08:34:17 -0500316 !filter_match_preds(file->filter, rec)) {
317 ring_buffer_discard_commit(buffer, event);
318 return 1;
319 }
320
321 return 0;
Tom Zanussieb02ce02009-04-08 03:15:54 -0500322}
Tom Zanussif306cc82013-10-24 08:34:17 -0500323EXPORT_SYMBOL_GPL(filter_check_discard);
324
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -0400325int call_filter_check_discard(struct trace_event_call *call, void *rec,
Tom Zanussif306cc82013-10-24 08:34:17 -0500326 struct ring_buffer *buffer,
327 struct ring_buffer_event *event)
328{
329 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
330 !filter_match_preds(call->filter, rec)) {
331 ring_buffer_discard_commit(buffer, event);
332 return 1;
333 }
334
335 return 0;
336}
337EXPORT_SYMBOL_GPL(call_filter_check_discard);
Tom Zanussieb02ce02009-04-08 03:15:54 -0500338
Fabian Frederickad1438a2014-04-17 21:44:42 +0200339static cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
Steven Rostedt37886f62009-03-17 17:22:06 -0400340{
341 u64 ts;
342
343 /* Early boot up does not have a buffer yet */
Alexander Z Lam94571582013-08-02 18:36:16 -0700344 if (!buf->buffer)
Steven Rostedt37886f62009-03-17 17:22:06 -0400345 return trace_clock_local();
346
Alexander Z Lam94571582013-08-02 18:36:16 -0700347 ts = ring_buffer_time_stamp(buf->buffer, cpu);
348 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
Steven Rostedt37886f62009-03-17 17:22:06 -0400349
350 return ts;
351}
352
Alexander Z Lam94571582013-08-02 18:36:16 -0700353cycle_t ftrace_now(int cpu)
354{
355 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
356}
357
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400358/**
359 * tracing_is_enabled - Show if global_trace has been disabled
360 *
361 * Shows if the global trace has been enabled or not. It uses the
362 * mirror flag "buffer_disabled" to be used in fast paths such as for
363 * the irqsoff tracer. But it may be inaccurate due to races. If you
364 * need to know the accurate state, use tracing_is_on() which is a little
365 * slower, but accurate.
366 */
Steven Rostedt90369902008-11-05 16:05:44 -0500367int tracing_is_enabled(void)
368{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400369 /*
370 * For quick access (irqsoff uses this in fast path), just
371 * return the mirror variable of the state of the ring buffer.
372 * It's a little racy, but we don't really care.
373 */
374 smp_rmb();
375 return !global_trace.buffer_disabled;
Steven Rostedt90369902008-11-05 16:05:44 -0500376}
377
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200378/*
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400379 * trace_buf_size is the size in bytes that is allocated
380 * for a buffer. Note, the number of bytes is always rounded
381 * to page size.
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400382 *
383 * This number is purposely set to a low number of 16384.
384 * If the dump on oops happens, it will be much appreciated
385 * to not have to wait for all that output. Anyway this can be
386 * boot time and run time configurable.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200387 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400388#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
Steven Rostedt3f5a54e2008-07-30 22:36:46 -0400389
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400390static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200391
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200392/* trace_types holds a link list of available tracers. */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200393static struct tracer *trace_types __read_mostly;
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200394
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200395/*
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200396 * trace_types_lock is used to protect the trace_types list.
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200397 */
Alexander Z Lama8227412013-07-01 19:37:54 -0700398DEFINE_MUTEX(trace_types_lock);
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200399
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800400/*
401 * serialize the access of the ring buffer
402 *
403 * ring buffer serializes readers, but it is low level protection.
404 * The validity of the events (which returns by ring_buffer_peek() ..etc)
405 * are not protected by ring buffer.
406 *
407 * The content of events may become garbage if we allow other process consumes
408 * these events concurrently:
409 * A) the page of the consumed events may become a normal page
410 * (not reader page) in ring buffer, and this page will be rewrited
411 * by events producer.
412 * B) The page of the consumed events may become a page for splice_read,
413 * and this page will be returned to system.
414 *
415 * These primitives allow multi process access to different cpu ring buffer
416 * concurrently.
417 *
418 * These primitives don't distinguish read-only and read-consume access.
419 * Multi read-only access are also serialized.
420 */
421
422#ifdef CONFIG_SMP
423static DECLARE_RWSEM(all_cpu_access_lock);
424static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
425
426static inline void trace_access_lock(int cpu)
427{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500428 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800429 /* gain it for accessing the whole ring buffer. */
430 down_write(&all_cpu_access_lock);
431 } else {
432 /* gain it for accessing a cpu ring buffer. */
433
Steven Rostedtae3b5092013-01-23 15:22:59 -0500434 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800435 down_read(&all_cpu_access_lock);
436
437 /* Secondly block other access to this @cpu ring buffer. */
438 mutex_lock(&per_cpu(cpu_access_lock, cpu));
439 }
440}
441
442static inline void trace_access_unlock(int cpu)
443{
Steven Rostedtae3b5092013-01-23 15:22:59 -0500444 if (cpu == RING_BUFFER_ALL_CPUS) {
Lai Jiangshan7e53bd42010-01-06 20:08:50 +0800445 up_write(&all_cpu_access_lock);
446 } else {
447 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
448 up_read(&all_cpu_access_lock);
449 }
450}
451
452static inline void trace_access_lock_init(void)
453{
454 int cpu;
455
456 for_each_possible_cpu(cpu)
457 mutex_init(&per_cpu(cpu_access_lock, cpu));
458}
459
460#else
461
462static DEFINE_MUTEX(access_lock);
463
464static inline void trace_access_lock(int cpu)
465{
466 (void)cpu;
467 mutex_lock(&access_lock);
468}
469
470static inline void trace_access_unlock(int cpu)
471{
472 (void)cpu;
473 mutex_unlock(&access_lock);
474}
475
476static inline void trace_access_lock_init(void)
477{
478}
479
480#endif
481
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400482#ifdef CONFIG_STACKTRACE
483static void __ftrace_trace_stack(struct ring_buffer *buffer,
484 unsigned long flags,
485 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400486static inline void ftrace_trace_stack(struct trace_array *tr,
487 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400488 unsigned long flags,
489 int skip, int pc, struct pt_regs *regs);
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400490
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400491#else
492static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
493 unsigned long flags,
494 int skip, int pc, struct pt_regs *regs)
495{
496}
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400497static inline void ftrace_trace_stack(struct trace_array *tr,
498 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -0400499 unsigned long flags,
500 int skip, int pc, struct pt_regs *regs)
Steven Rostedt (Red Hat)ca475e82015-09-28 09:41:11 -0400501{
502}
503
Steven Rostedt (Red Hat)d78a4612015-09-25 13:30:47 -0400504#endif
505
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400506static void tracer_tracing_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400507{
508 if (tr->trace_buffer.buffer)
509 ring_buffer_record_on(tr->trace_buffer.buffer);
510 /*
511 * This flag is looked at when buffers haven't been allocated
512 * yet, or by some tracers (like irqsoff), that just want to
513 * know if the ring buffer has been disabled, but it can handle
514 * races of where it gets disabled but we still do a record.
515 * As the check is in the fast path of the tracers, it is more
516 * important to be fast than accurate.
517 */
518 tr->buffer_disabled = 0;
519 /* Make the flag seen by readers */
520 smp_wmb();
521}
522
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200523/**
Steven Rostedt499e5472012-02-22 15:50:28 -0500524 * tracing_on - enable tracing buffers
525 *
526 * This function enables tracing buffers that may have been
527 * disabled with tracing_off.
528 */
529void tracing_on(void)
530{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400531 tracer_tracing_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500532}
533EXPORT_SYMBOL_GPL(tracing_on);
534
535/**
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500536 * __trace_puts - write a constant string into the trace buffer.
537 * @ip: The address of the caller
538 * @str: The constant string to write
539 * @size: The size of the string.
540 */
541int __trace_puts(unsigned long ip, const char *str, int size)
542{
543 struct ring_buffer_event *event;
544 struct ring_buffer *buffer;
545 struct print_entry *entry;
546 unsigned long irq_flags;
547 int alloc;
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800548 int pc;
549
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400550 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800551 return 0;
552
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800553 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500554
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500555 if (unlikely(tracing_selftest_running || tracing_disabled))
556 return 0;
557
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500558 alloc = sizeof(*entry) + size + 2; /* possible \n added */
559
560 local_save_flags(irq_flags);
561 buffer = global_trace.trace_buffer.buffer;
562 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800563 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500564 if (!event)
565 return 0;
566
567 entry = ring_buffer_event_data(event);
568 entry->ip = ip;
569
570 memcpy(&entry->buf, str, size);
571
572 /* Add a newline if necessary */
573 if (entry->buf[size - 1] != '\n') {
574 entry->buf[size] = '\n';
575 entry->buf[size + 1] = '\0';
576 } else
577 entry->buf[size] = '\0';
578
579 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400580 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500581
582 return size;
583}
584EXPORT_SYMBOL_GPL(__trace_puts);
585
586/**
587 * __trace_bputs - write the pointer to a constant string into trace buffer
588 * @ip: The address of the caller
589 * @str: The constant string to write to the buffer to
590 */
591int __trace_bputs(unsigned long ip, const char *str)
592{
593 struct ring_buffer_event *event;
594 struct ring_buffer *buffer;
595 struct bputs_entry *entry;
596 unsigned long irq_flags;
597 int size = sizeof(struct bputs_entry);
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800598 int pc;
599
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -0400600 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
zhangwei(Jovi)f0160a52013-07-18 16:31:18 +0800601 return 0;
602
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800603 pc = preempt_count();
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500604
Steven Rostedt (Red Hat)3132e102014-01-23 12:27:59 -0500605 if (unlikely(tracing_selftest_running || tracing_disabled))
606 return 0;
607
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500608 local_save_flags(irq_flags);
609 buffer = global_trace.trace_buffer.buffer;
610 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
zhangwei(Jovi)8abfb872013-07-18 16:31:05 +0800611 irq_flags, pc);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500612 if (!event)
613 return 0;
614
615 entry = ring_buffer_event_data(event);
616 entry->ip = ip;
617 entry->str = str;
618
619 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -0400620 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -0500621
622 return 1;
623}
624EXPORT_SYMBOL_GPL(__trace_bputs);
625
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500626#ifdef CONFIG_TRACER_SNAPSHOT
627/**
628 * trace_snapshot - take a snapshot of the current buffer.
629 *
630 * This causes a swap between the snapshot buffer and the current live
631 * tracing buffer. You can use this to take snapshots of the live
632 * trace when some condition is triggered, but continue to trace.
633 *
634 * Note, make sure to allocate the snapshot with either
635 * a tracing_snapshot_alloc(), or by doing it manually
636 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
637 *
638 * If the snapshot buffer is not allocated, it will stop tracing.
639 * Basically making a permanent snapshot.
640 */
641void tracing_snapshot(void)
642{
643 struct trace_array *tr = &global_trace;
644 struct tracer *tracer = tr->current_trace;
645 unsigned long flags;
646
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500647 if (in_nmi()) {
648 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
649 internal_trace_puts("*** snapshot is being ignored ***\n");
650 return;
651 }
652
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500653 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500654 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
655 internal_trace_puts("*** stopping trace here! ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500656 tracing_off();
657 return;
658 }
659
660 /* Note, snapshot can not be used when the tracer uses it */
661 if (tracer->use_max_tr) {
Steven Rostedt (Red Hat)ca268da2013-03-09 00:40:58 -0500662 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
663 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500664 return;
665 }
666
667 local_irq_save(flags);
668 update_max_tr(tr, current, smp_processor_id());
669 local_irq_restore(flags);
670}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500671EXPORT_SYMBOL_GPL(tracing_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500672
673static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
674 struct trace_buffer *size_buf, int cpu_id);
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400675static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
676
677static int alloc_snapshot(struct trace_array *tr)
678{
679 int ret;
680
681 if (!tr->allocated_snapshot) {
682
683 /* allocate spare buffer */
684 ret = resize_buffer_duplicate_size(&tr->max_buffer,
685 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
686 if (ret < 0)
687 return ret;
688
689 tr->allocated_snapshot = true;
690 }
691
692 return 0;
693}
694
Fabian Frederickad1438a2014-04-17 21:44:42 +0200695static void free_snapshot(struct trace_array *tr)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400696{
697 /*
698 * We don't free the ring buffer. instead, resize it because
699 * The max_tr ring buffer has some state (e.g. ring->clock) and
700 * we want preserve it.
701 */
702 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
703 set_buffer_entries(&tr->max_buffer, 1);
704 tracing_reset_online_cpus(&tr->max_buffer);
705 tr->allocated_snapshot = false;
706}
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500707
708/**
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500709 * tracing_alloc_snapshot - allocate snapshot buffer.
710 *
711 * This only allocates the snapshot buffer if it isn't already
712 * allocated - it doesn't also take a snapshot.
713 *
714 * This is meant to be used in cases where the snapshot buffer needs
715 * to be set up for events that can't sleep but need to be able to
716 * trigger a snapshot.
717 */
718int tracing_alloc_snapshot(void)
719{
720 struct trace_array *tr = &global_trace;
721 int ret;
722
723 ret = alloc_snapshot(tr);
724 WARN_ON(ret < 0);
725
726 return ret;
727}
728EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
729
730/**
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500731 * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
732 *
733 * This is similar to trace_snapshot(), but it will allocate the
734 * snapshot buffer if it isn't already allocated. Use this only
735 * where it is safe to sleep, as the allocation may sleep.
736 *
737 * This causes a swap between the snapshot buffer and the current live
738 * tracing buffer. You can use this to take snapshots of the live
739 * trace when some condition is triggered, but continue to trace.
740 */
741void tracing_snapshot_alloc(void)
742{
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500743 int ret;
744
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500745 ret = tracing_alloc_snapshot();
746 if (ret < 0)
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -0400747 return;
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500748
749 tracing_snapshot();
750}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500751EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500752#else
753void tracing_snapshot(void)
754{
755 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
756}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500757EXPORT_SYMBOL_GPL(tracing_snapshot);
Tom Zanussi93e31ff2013-10-24 08:59:26 -0500758int tracing_alloc_snapshot(void)
759{
760 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
761 return -ENODEV;
762}
763EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500764void tracing_snapshot_alloc(void)
765{
766 /* Give warning */
767 tracing_snapshot();
768}
Steven Rostedt (Red Hat)1b22e382013-03-09 00:56:08 -0500769EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
Steven Rostedt (Red Hat)ad909e22013-03-06 21:45:37 -0500770#endif /* CONFIG_TRACER_SNAPSHOT */
771
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400772static void tracer_tracing_off(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400773{
774 if (tr->trace_buffer.buffer)
775 ring_buffer_record_off(tr->trace_buffer.buffer);
776 /*
777 * This flag is looked at when buffers haven't been allocated
778 * yet, or by some tracers (like irqsoff), that just want to
779 * know if the ring buffer has been disabled, but it can handle
780 * races of where it gets disabled but we still do a record.
781 * As the check is in the fast path of the tracers, it is more
782 * important to be fast than accurate.
783 */
784 tr->buffer_disabled = 1;
785 /* Make the flag seen by readers */
786 smp_wmb();
787}
788
Steven Rostedt499e5472012-02-22 15:50:28 -0500789/**
790 * tracing_off - turn off tracing buffers
791 *
792 * This function stops the tracing buffers from recording data.
793 * It does not disable any overhead the tracers themselves may
794 * be causing. This function simply causes all recording to
795 * the ring buffers to fail.
796 */
797void tracing_off(void)
798{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400799 tracer_tracing_off(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500800}
801EXPORT_SYMBOL_GPL(tracing_off);
802
Steven Rostedt (Red Hat)de7edd32013-06-14 16:21:43 -0400803void disable_trace_on_warning(void)
804{
805 if (__disable_trace_on_warning)
806 tracing_off();
807}
808
Steven Rostedt499e5472012-02-22 15:50:28 -0500809/**
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400810 * tracer_tracing_is_on - show real state of ring buffer enabled
811 * @tr : the trace array to know if ring buffer is enabled
812 *
813 * Shows real state of the ring buffer if it is enabled or not.
814 */
Steven Rostedt (Red Hat)5280bce2013-07-02 19:59:57 -0400815static int tracer_tracing_is_on(struct trace_array *tr)
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400816{
817 if (tr->trace_buffer.buffer)
818 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
819 return !tr->buffer_disabled;
820}
821
Steven Rostedt499e5472012-02-22 15:50:28 -0500822/**
823 * tracing_is_on - show state of ring buffers enabled
824 */
825int tracing_is_on(void)
826{
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -0400827 return tracer_tracing_is_on(&global_trace);
Steven Rostedt499e5472012-02-22 15:50:28 -0500828}
829EXPORT_SYMBOL_GPL(tracing_is_on);
830
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400831static int __init set_buf_size(char *str)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200832{
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400833 unsigned long buf_size;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200834
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200835 if (!str)
836 return 0;
Li Zefan9d612be2009-06-24 17:33:15 +0800837 buf_size = memparse(str, &str);
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200838 /* nr_entries can not be zero */
Li Zefan9d612be2009-06-24 17:33:15 +0800839 if (buf_size == 0)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +0200840 return 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400841 trace_buf_size = buf_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200842 return 1;
843}
Steven Rostedt3928a8a2008-09-29 23:02:41 -0400844__setup("trace_buf_size=", set_buf_size);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200845
Tim Bird0e950172010-02-25 15:36:43 -0800846static int __init set_tracing_thresh(char *str)
847{
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800848 unsigned long threshold;
Tim Bird0e950172010-02-25 15:36:43 -0800849 int ret;
850
851 if (!str)
852 return 0;
Daniel Walterbcd83ea2012-09-26 22:08:38 +0200853 ret = kstrtoul(str, 0, &threshold);
Tim Bird0e950172010-02-25 15:36:43 -0800854 if (ret < 0)
855 return 0;
Wang Tianhong87abb3b2012-08-02 14:02:00 +0800856 tracing_thresh = threshold * 1000;
Tim Bird0e950172010-02-25 15:36:43 -0800857 return 1;
858}
859__setup("tracing_thresh=", set_tracing_thresh);
860
Steven Rostedt57f50be2008-05-12 21:20:44 +0200861unsigned long nsecs_to_usecs(unsigned long nsecs)
862{
863 return nsecs / 1000;
864}
865
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -0400866/*
867 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
868 * It uses C(a, b) where 'a' is the enum name and 'b' is the string that
869 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
870 * of strings in the order that the enums were defined.
871 */
872#undef C
873#define C(a, b) b
874
Steven Rostedt4fcdae82008-05-12 21:21:00 +0200875/* These must match the bit postions in trace_iterator_flags */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200876static const char *trace_options[] = {
Steven Rostedt (Red Hat)a3418a32015-09-29 09:43:30 -0400877 TRACE_FLAGS
Steven Rostedtbc0c38d2008-05-12 21:20:42 +0200878 NULL
879};
880
Zhaolei5079f322009-08-25 16:12:56 +0800881static struct {
882 u64 (*func)(void);
883 const char *name;
David Sharp8be07092012-11-13 12:18:22 -0800884 int in_ns; /* is this clock in nanoseconds? */
Zhaolei5079f322009-08-25 16:12:56 +0800885} trace_clocks[] = {
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000886 { trace_clock_local, "local", 1 },
887 { trace_clock_global, "global", 1 },
888 { trace_clock_counter, "counter", 0 },
Linus Torvaldse7fda6c2014-08-05 17:46:42 -0700889 { trace_clock_jiffies, "uptime", 0 },
Thomas Gleixner1b3e5c02014-07-16 21:05:25 +0000890 { trace_clock, "perf", 1 },
891 { ktime_get_mono_fast_ns, "mono", 1 },
Drew Richardsonaabfa5f2015-05-08 07:30:39 -0700892 { ktime_get_raw_fast_ns, "mono_raw", 1 },
David Sharp8cbd9cc2012-11-13 12:18:21 -0800893 ARCH_TRACE_CLOCKS
Zhaolei5079f322009-08-25 16:12:56 +0800894};
895
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200896/*
897 * trace_parser_get_init - gets the buffer for trace parser
898 */
899int trace_parser_get_init(struct trace_parser *parser, int size)
900{
901 memset(parser, 0, sizeof(*parser));
902
903 parser->buffer = kmalloc(size, GFP_KERNEL);
904 if (!parser->buffer)
905 return 1;
906
907 parser->size = size;
908 return 0;
909}
910
911/*
912 * trace_parser_put - frees the buffer for trace parser
913 */
914void trace_parser_put(struct trace_parser *parser)
915{
916 kfree(parser->buffer);
917}
918
919/*
920 * trace_get_user - reads the user input string separated by space
921 * (matched by isspace(ch))
922 *
923 * For each string found the 'struct trace_parser' is updated,
924 * and the function returns.
925 *
926 * Returns number of bytes read.
927 *
928 * See kernel/trace/trace.h for 'struct trace_parser' details.
929 */
930int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
931 size_t cnt, loff_t *ppos)
932{
933 char ch;
934 size_t read = 0;
935 ssize_t ret;
936
937 if (!*ppos)
938 trace_parser_clear(parser);
939
940 ret = get_user(ch, ubuf++);
941 if (ret)
942 goto out;
943
944 read++;
945 cnt--;
946
947 /*
948 * The parser is not finished with the last write,
949 * continue reading the user input without skipping spaces.
950 */
951 if (!parser->cont) {
952 /* skip white space */
953 while (cnt && isspace(ch)) {
954 ret = get_user(ch, ubuf++);
955 if (ret)
956 goto out;
957 read++;
958 cnt--;
959 }
960
961 /* only spaces were written */
962 if (isspace(ch)) {
963 *ppos += read;
964 ret = read;
965 goto out;
966 }
967
968 parser->idx = 0;
969 }
970
971 /* read the non-space input */
972 while (cnt && !isspace(ch)) {
Li Zefan3c235a32009-09-22 13:51:54 +0800973 if (parser->idx < parser->size - 1)
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200974 parser->buffer[parser->idx++] = ch;
975 else {
976 ret = -EINVAL;
977 goto out;
978 }
979 ret = get_user(ch, ubuf++);
980 if (ret)
981 goto out;
982 read++;
983 cnt--;
984 }
985
986 /* We either got finished input or we have to wait for another call. */
987 if (isspace(ch)) {
988 parser->buffer[parser->idx] = 0;
989 parser->cont = false;
Steven Rostedt057db842013-10-09 22:23:23 -0400990 } else if (parser->idx < parser->size - 1) {
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200991 parser->cont = true;
992 parser->buffer[parser->idx++] = ch;
Steven Rostedt057db842013-10-09 22:23:23 -0400993 } else {
994 ret = -EINVAL;
995 goto out;
jolsa@redhat.comb63f39e2009-09-11 17:29:27 +0200996 }
997
998 *ppos += read;
999 ret = read;
1000
1001out:
1002 return ret;
1003}
1004
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001005/* TODO add a seq_buf_to_buffer() */
Dmitri Vorobievb8b94262009-03-22 19:11:11 +02001006static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001007{
1008 int len;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001009
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001010 if (trace_seq_used(s) <= s->seq.readpos)
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001011 return -EBUSY;
1012
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05001013 len = trace_seq_used(s) - s->seq.readpos;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001014 if (cnt > len)
1015 cnt = len;
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001016 memcpy(buf, s->buffer + s->seq.readpos, cnt);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001017
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04001018 s->seq.readpos += cnt;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02001019 return cnt;
1020}
1021
Tim Bird0e950172010-02-25 15:36:43 -08001022unsigned long __read_mostly tracing_thresh;
1023
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001024#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001025/*
1026 * Copy the new maximum trace into the separate maximum-trace
1027 * structure. (this way the maximum trace is permanently saved,
1028 * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
1029 */
1030static void
1031__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1032{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001033 struct trace_buffer *trace_buf = &tr->trace_buffer;
1034 struct trace_buffer *max_buf = &tr->max_buffer;
1035 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1036 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001037
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001038 max_buf->cpu = cpu;
1039 max_buf->time_start = data->preempt_timestamp;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001040
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05001041 max_data->saved_latency = tr->max_latency;
Steven Rostedt8248ac02009-09-02 12:27:41 -04001042 max_data->critical_start = data->critical_start;
1043 max_data->critical_end = data->critical_end;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001044
Arnaldo Carvalho de Melo1acaa1b2010-03-05 18:23:50 -03001045 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
Steven Rostedt8248ac02009-09-02 12:27:41 -04001046 max_data->pid = tsk->pid;
Steven Rostedt (Red Hat)f17a5192013-05-30 21:10:37 -04001047 /*
1048 * If tsk == current, then use current_uid(), as that does not use
1049 * RCU. The irq tracer can be called out of RCU scope.
1050 */
1051 if (tsk == current)
1052 max_data->uid = current_uid();
1053 else
1054 max_data->uid = task_uid(tsk);
1055
Steven Rostedt8248ac02009-09-02 12:27:41 -04001056 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1057 max_data->policy = tsk->policy;
1058 max_data->rt_priority = tsk->rt_priority;
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001059
1060 /* record this tasks comm */
1061 tracing_record_cmdline(tsk);
1062}
1063
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001064/**
1065 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1066 * @tr: tracer
1067 * @tsk: the task with the latency
1068 * @cpu: The cpu that initiated the trace.
1069 *
1070 * Flip the buffers between the @tr and the max_tr and record information
1071 * about which task was the cause of this latency.
1072 */
Ingo Molnare309b412008-05-12 21:20:51 +02001073void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001074update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1075{
Steven Rostedt (Red Hat)2721e722013-03-12 11:32:32 -04001076 struct ring_buffer *buf;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001077
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001078 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001079 return;
1080
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001081 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt34600f02013-01-22 13:35:11 -05001082
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05001083 if (!tr->allocated_snapshot) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001084 /* Only the nop tracer should hit this when disabling */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001085 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001086 return;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09001087 }
Steven Rostedt34600f02013-01-22 13:35:11 -05001088
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001089 arch_spin_lock(&tr->max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001090
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001091 buf = tr->trace_buffer.buffer;
1092 tr->trace_buffer.buffer = tr->max_buffer.buffer;
1093 tr->max_buffer.buffer = buf;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001094
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001095 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001096 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001097}
1098
1099/**
1100 * update_max_tr_single - only copy one trace over, and reset the rest
1101 * @tr - tracer
1102 * @tsk - task with the latency
1103 * @cpu - the cpu of the buffer to copy.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001104 *
1105 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001106 */
Ingo Molnare309b412008-05-12 21:20:51 +02001107void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001108update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1109{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001110 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001111
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001112 if (tr->stop_count)
Steven Rostedtb8de7bd12009-08-31 22:32:27 -04001113 return;
1114
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02001115 WARN_ON_ONCE(!irqs_disabled());
Steven Rostedt6c244992013-04-29 20:08:14 -04001116 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001117 /* Only the nop tracer should hit this when disabling */
Linus Torvalds9e8529a2013-04-29 13:55:38 -07001118 WARN_ON_ONCE(tr->current_trace != &nop_trace);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001119 return;
Steven Rostedt (Red Hat)2930e042013-03-26 17:33:00 -04001120 }
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09001121
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001122 arch_spin_lock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001123
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001124 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001125
Steven Rostedte8165db2009-09-03 19:13:05 -04001126 if (ret == -EBUSY) {
1127 /*
1128 * We failed to swap the buffer due to a commit taking
1129 * place on this CPU. We fail to record, but we reset
1130 * the max trace buffer (no one writes directly to it)
1131 * and flag that it failed.
1132 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001133 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
Steven Rostedte8165db2009-09-03 19:13:05 -04001134 "Failed to swap buffers due to commit in progress\n");
1135 }
1136
Steven Rostedte8165db2009-09-03 19:13:05 -04001137 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001138
1139 __update_max_tr(tr, tsk, cpu);
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001140 arch_spin_unlock(&tr->max_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001141}
Steven Rostedt5d4a9db2009-08-27 16:52:21 -04001142#endif /* CONFIG_TRACER_MAX_TRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001143
Rabin Vincente30f53a2014-11-10 19:46:34 +01001144static int wait_on_pipe(struct trace_iterator *iter, bool full)
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001145{
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05001146 /* Iterators are static, they should be filled or empty */
1147 if (trace_buffer_iter(iter, iter->cpu_file))
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04001148 return 0;
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001149
Rabin Vincente30f53a2014-11-10 19:46:34 +01001150 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1151 full);
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001152}
1153
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001154#ifdef CONFIG_FTRACE_STARTUP_TEST
1155static int run_tracer_selftest(struct tracer *type)
1156{
1157 struct trace_array *tr = &global_trace;
1158 struct tracer *saved_tracer = tr->current_trace;
1159 int ret;
1160
1161 if (!type->selftest || tracing_selftest_disabled)
1162 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001163
1164 /*
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001165 * Run a selftest on this tracer.
1166 * Here we reset the trace buffer, and set the current
1167 * tracer to be this tracer. The tracer can then run some
1168 * internal tracing to verify that everything is in order.
1169 * If we fail, we do not register this tracer.
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001170 */
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001171 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001172
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001173 tr->current_trace = type;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001174
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001175#ifdef CONFIG_TRACER_MAX_TRACE
1176 if (type->use_max_tr) {
1177 /* If we expanded the buffers, make sure the max is expanded too */
1178 if (ring_buffer_expanded)
1179 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1180 RING_BUFFER_ALL_CPUS);
1181 tr->allocated_snapshot = true;
1182 }
1183#endif
1184
1185 /* the test is responsible for initializing and enabling */
1186 pr_info("Testing tracer %s: ", type->name);
1187 ret = type->selftest(type, tr);
1188 /* the test is responsible for resetting too */
1189 tr->current_trace = saved_tracer;
1190 if (ret) {
1191 printk(KERN_CONT "FAILED!\n");
1192 /* Add the warning after printing 'FAILED' */
1193 WARN_ON(1);
1194 return -1;
1195 }
1196 /* Only reset on passing, to avoid touching corrupted buffers */
1197 tracing_reset_online_cpus(&tr->trace_buffer);
1198
1199#ifdef CONFIG_TRACER_MAX_TRACE
1200 if (type->use_max_tr) {
1201 tr->allocated_snapshot = false;
1202
1203 /* Shrink the max buffer again */
1204 if (ring_buffer_expanded)
1205 ring_buffer_resize(tr->max_buffer.buffer, 1,
1206 RING_BUFFER_ALL_CPUS);
1207 }
1208#endif
1209
1210 printk(KERN_CONT "PASSED\n");
1211 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001212}
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001213#else
1214static inline int run_tracer_selftest(struct tracer *type)
1215{
1216 return 0;
1217}
1218#endif /* CONFIG_FTRACE_STARTUP_TEST */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001219
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001220static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1221
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001222static void __init apply_trace_boot_options(void);
1223
Steven Rostedt4fcdae82008-05-12 21:21:00 +02001224/**
1225 * register_tracer - register a tracer with the ftrace system.
1226 * @type - the plugin for the tracer
1227 *
1228 * Register a new plugin tracer.
1229 */
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001230int __init register_tracer(struct tracer *type)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001231{
1232 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001233 int ret = 0;
1234
1235 if (!type->name) {
1236 pr_info("Tracer must have a name\n");
1237 return -1;
1238 }
1239
Dan Carpenter24a461d2010-07-10 12:06:44 +02001240 if (strlen(type->name) >= MAX_TRACER_SIZE) {
Li Zefanee6c2c12009-09-18 14:06:47 +08001241 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1242 return -1;
1243 }
1244
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001245 mutex_lock(&trace_types_lock);
Ingo Molnar86fa2f62008-11-19 10:00:15 +01001246
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001247 tracing_selftest_running = true;
1248
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001249 for (t = trace_types; t; t = t->next) {
1250 if (strcmp(type->name, t->name) == 0) {
1251 /* already found */
Li Zefanee6c2c12009-09-18 14:06:47 +08001252 pr_info("Tracer %s already registered\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001253 type->name);
1254 ret = -1;
1255 goto out;
1256 }
1257 }
1258
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01001259 if (!type->set_flag)
1260 type->set_flag = &dummy_set_flag;
1261 if (!type->flags)
1262 type->flags = &dummy_tracer_flags;
1263 else
1264 if (!type->flags->opts)
1265 type->flags->opts = dummy_tracer_opt;
Frederic Weisbecker6eaaa5d2009-02-11 02:25:00 +01001266
Steven Rostedt (Red Hat)f4e781c2013-03-07 11:10:56 -05001267 ret = run_tracer_selftest(type);
1268 if (ret < 0)
1269 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +02001270
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001271 type->next = trace_types;
1272 trace_types = type;
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04001273 add_tracer_options(&global_trace, type);
Steven Rostedt60a11772008-05-12 21:20:44 +02001274
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001275 out:
Frederic Weisbecker8e1b82e2008-12-06 03:41:33 +01001276 tracing_selftest_running = false;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001277 mutex_unlock(&trace_types_lock);
1278
Steven Rostedtdac74942009-02-05 01:13:38 -05001279 if (ret || !default_bootup_tracer)
1280 goto out_unlock;
Steven Rostedtb2821ae2009-02-02 21:38:32 -05001281
Li Zefanee6c2c12009-09-18 14:06:47 +08001282 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
Steven Rostedtdac74942009-02-05 01:13:38 -05001283 goto out_unlock;
1284
1285 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1286 /* Do we want this tracer to start on bootup? */
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05001287 tracing_set_tracer(&global_trace, type->name);
Steven Rostedtdac74942009-02-05 01:13:38 -05001288 default_bootup_tracer = NULL;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08001289
1290 apply_trace_boot_options();
1291
Steven Rostedtdac74942009-02-05 01:13:38 -05001292 /* disable other selftests, since this will break it. */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05001293 tracing_selftest_disabled = true;
Steven Rostedtdac74942009-02-05 01:13:38 -05001294#ifdef CONFIG_FTRACE_STARTUP_TEST
1295 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1296 type->name);
1297#endif
1298
1299 out_unlock:
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001300 return ret;
1301}
1302
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001303void tracing_reset(struct trace_buffer *buf, int cpu)
Steven Rostedtf6339032009-09-04 12:35:16 -04001304{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001305 struct ring_buffer *buffer = buf->buffer;
Steven Rostedtf6339032009-09-04 12:35:16 -04001306
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001307 if (!buffer)
1308 return;
1309
Steven Rostedtf6339032009-09-04 12:35:16 -04001310 ring_buffer_record_disable(buffer);
1311
1312 /* Make sure all commits have finished */
1313 synchronize_sched();
Steven Rostedt68179682012-05-08 20:57:53 -04001314 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedtf6339032009-09-04 12:35:16 -04001315
1316 ring_buffer_record_enable(buffer);
1317}
1318
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001319void tracing_reset_online_cpus(struct trace_buffer *buf)
Pekka J Enberg213cc062008-12-19 12:08:39 +02001320{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001321 struct ring_buffer *buffer = buf->buffer;
Pekka J Enberg213cc062008-12-19 12:08:39 +02001322 int cpu;
1323
Hiraku Toyookaa5416412012-12-19 16:02:34 +09001324 if (!buffer)
1325 return;
1326
Steven Rostedt621968c2009-09-04 12:02:35 -04001327 ring_buffer_record_disable(buffer);
1328
1329 /* Make sure all commits have finished */
1330 synchronize_sched();
1331
Alexander Z Lam94571582013-08-02 18:36:16 -07001332 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001333
1334 for_each_online_cpu(cpu)
Steven Rostedt68179682012-05-08 20:57:53 -04001335 ring_buffer_reset_cpu(buffer, cpu);
Steven Rostedt621968c2009-09-04 12:02:35 -04001336
1337 ring_buffer_record_enable(buffer);
Pekka J Enberg213cc062008-12-19 12:08:39 +02001338}
1339
Steven Rostedt (Red Hat)09d80912013-07-23 22:21:59 -04001340/* Must have trace_types_lock held */
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001341void tracing_reset_all_online_cpus(void)
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001342{
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001343 struct trace_array *tr;
1344
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001345 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001346 tracing_reset_online_cpus(&tr->trace_buffer);
1347#ifdef CONFIG_TRACER_MAX_TRACE
1348 tracing_reset_online_cpus(&tr->max_buffer);
1349#endif
Steven Rostedt (Red Hat)873c6422013-03-04 23:26:06 -05001350 }
Steven Rostedt9456f0f2009-05-06 21:54:09 -04001351}
1352
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001353#define SAVED_CMDLINES_DEFAULT 128
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001354#define NO_CMDLINE_MAP UINT_MAX
Dmitry Shmidt99dd30a2015-10-28 10:45:04 -07001355static unsigned saved_tgids[SAVED_CMDLINES_DEFAULT];
Thomas Gleixneredc35bd2009-12-03 12:38:57 +01001356static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001357struct saved_cmdlines_buffer {
1358 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1359 unsigned *map_cmdline_to_pid;
1360 unsigned cmdline_num;
1361 int cmdline_idx;
1362 char *saved_cmdlines;
1363};
1364static struct saved_cmdlines_buffer *savedcmd;
Steven Rostedt25b0b442008-05-12 21:21:00 +02001365
Steven Rostedt25b0b442008-05-12 21:21:00 +02001366/* temporary disable recording */
Hannes Eder4fd27352009-02-10 19:44:12 +01001367static atomic_t trace_record_cmdline_disabled __read_mostly;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001368
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001369static inline char *get_saved_cmdlines(int idx)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001370{
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001371 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1372}
1373
1374static inline void set_cmdline(int idx, const char *cmdline)
1375{
1376 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1377}
1378
1379static int allocate_cmdlines_buffer(unsigned int val,
1380 struct saved_cmdlines_buffer *s)
1381{
1382 s->map_cmdline_to_pid = kmalloc(val * sizeof(*s->map_cmdline_to_pid),
1383 GFP_KERNEL);
1384 if (!s->map_cmdline_to_pid)
1385 return -ENOMEM;
1386
1387 s->saved_cmdlines = kmalloc(val * TASK_COMM_LEN, GFP_KERNEL);
1388 if (!s->saved_cmdlines) {
1389 kfree(s->map_cmdline_to_pid);
1390 return -ENOMEM;
1391 }
1392
1393 s->cmdline_idx = 0;
1394 s->cmdline_num = val;
1395 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1396 sizeof(s->map_pid_to_cmdline));
1397 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1398 val * sizeof(*s->map_cmdline_to_pid));
1399
1400 return 0;
1401}
1402
1403static int trace_create_savedcmd(void)
1404{
1405 int ret;
1406
Namhyung Kima6af8fb2014-06-10 16:11:35 +09001407 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001408 if (!savedcmd)
1409 return -ENOMEM;
1410
1411 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1412 if (ret < 0) {
1413 kfree(savedcmd);
1414 savedcmd = NULL;
1415 return -ENOMEM;
1416 }
1417
1418 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001419}
1420
Carsten Emdeb5130b12009-09-13 01:43:07 +02001421int is_tracing_stopped(void)
1422{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001423 return global_trace.stop_count;
Carsten Emdeb5130b12009-09-13 01:43:07 +02001424}
1425
Steven Rostedt0f048702008-11-05 16:05:44 -05001426/**
1427 * tracing_start - quick start of the tracer
1428 *
1429 * If tracing is enabled but was stopped by tracing_stop,
1430 * this will start the tracer back up.
1431 */
1432void tracing_start(void)
1433{
1434 struct ring_buffer *buffer;
1435 unsigned long flags;
1436
1437 if (tracing_disabled)
1438 return;
1439
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001440 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1441 if (--global_trace.stop_count) {
1442 if (global_trace.stop_count < 0) {
Steven Rostedtb06a8302009-01-22 14:26:15 -05001443 /* Someone screwed up their debugging */
1444 WARN_ON_ONCE(1);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001445 global_trace.stop_count = 0;
Steven Rostedtb06a8302009-01-22 14:26:15 -05001446 }
Steven Rostedt0f048702008-11-05 16:05:44 -05001447 goto out;
1448 }
1449
Steven Rostedta2f80712010-03-12 19:56:00 -05001450 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001451 arch_spin_lock(&global_trace.max_lock);
Steven Rostedt0f048702008-11-05 16:05:44 -05001452
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001453 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001454 if (buffer)
1455 ring_buffer_record_enable(buffer);
1456
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001457#ifdef CONFIG_TRACER_MAX_TRACE
1458 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001459 if (buffer)
1460 ring_buffer_record_enable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001461#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001462
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001463 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001464
Steven Rostedt0f048702008-11-05 16:05:44 -05001465 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001466 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1467}
1468
1469static void tracing_start_tr(struct trace_array *tr)
1470{
1471 struct ring_buffer *buffer;
1472 unsigned long flags;
1473
1474 if (tracing_disabled)
1475 return;
1476
1477 /* If global, we need to also start the max tracer */
1478 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1479 return tracing_start();
1480
1481 raw_spin_lock_irqsave(&tr->start_lock, flags);
1482
1483 if (--tr->stop_count) {
1484 if (tr->stop_count < 0) {
1485 /* Someone screwed up their debugging */
1486 WARN_ON_ONCE(1);
1487 tr->stop_count = 0;
1488 }
1489 goto out;
1490 }
1491
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001492 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001493 if (buffer)
1494 ring_buffer_record_enable(buffer);
1495
1496 out:
1497 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001498}
1499
1500/**
1501 * tracing_stop - quick stop of the tracer
1502 *
1503 * Light weight way to stop tracing. Use in conjunction with
1504 * tracing_start.
1505 */
1506void tracing_stop(void)
1507{
1508 struct ring_buffer *buffer;
1509 unsigned long flags;
1510
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001511 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1512 if (global_trace.stop_count++)
Steven Rostedt0f048702008-11-05 16:05:44 -05001513 goto out;
1514
Steven Rostedta2f80712010-03-12 19:56:00 -05001515 /* Prevent the buffers from switching */
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001516 arch_spin_lock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001517
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001518 buffer = global_trace.trace_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001519 if (buffer)
1520 ring_buffer_record_disable(buffer);
1521
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001522#ifdef CONFIG_TRACER_MAX_TRACE
1523 buffer = global_trace.max_buffer.buffer;
Steven Rostedt0f048702008-11-05 16:05:44 -05001524 if (buffer)
1525 ring_buffer_record_disable(buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001526#endif
Steven Rostedt0f048702008-11-05 16:05:44 -05001527
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05001528 arch_spin_unlock(&global_trace.max_lock);
Steven Rostedta2f80712010-03-12 19:56:00 -05001529
Steven Rostedt0f048702008-11-05 16:05:44 -05001530 out:
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001531 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1532}
1533
1534static void tracing_stop_tr(struct trace_array *tr)
1535{
1536 struct ring_buffer *buffer;
1537 unsigned long flags;
1538
1539 /* If global, we need to also stop the max tracer */
1540 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1541 return tracing_stop();
1542
1543 raw_spin_lock_irqsave(&tr->start_lock, flags);
1544 if (tr->stop_count++)
1545 goto out;
1546
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001547 buffer = tr->trace_buffer.buffer;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04001548 if (buffer)
1549 ring_buffer_record_disable(buffer);
1550
1551 out:
1552 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
Steven Rostedt0f048702008-11-05 16:05:44 -05001553}
1554
Ingo Molnare309b412008-05-12 21:20:51 +02001555void trace_stop_cmdline_recording(void);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001556
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001557static int trace_save_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001558{
Carsten Emdea635cf02009-03-18 09:00:41 +01001559 unsigned pid, idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001560
1561 if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001562 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001563
1564 /*
1565 * It's not the end of the world if we don't get
1566 * the lock, but we also don't want to spin
1567 * nor do we want to disable interrupts,
1568 * so if we miss here, then better luck next time.
1569 */
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001570 if (!arch_spin_trylock(&trace_cmdline_lock))
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001571 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001572
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001573 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
Thomas Gleixner2c7eea42009-03-18 09:03:19 +01001574 if (idx == NO_CMDLINE_MAP) {
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001575 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001576
Carsten Emdea635cf02009-03-18 09:00:41 +01001577 /*
1578 * Check whether the cmdline buffer at idx has a pid
1579 * mapped. We are going to overwrite that entry so we
1580 * need to clear the map_pid_to_cmdline. Otherwise we
1581 * would read the new comm for the old pid.
1582 */
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001583 pid = savedcmd->map_cmdline_to_pid[idx];
Carsten Emdea635cf02009-03-18 09:00:41 +01001584 if (pid != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001585 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001586
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001587 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1588 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001589
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001590 savedcmd->cmdline_idx = idx;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001591 }
1592
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001593 set_cmdline(idx, tsk->comm);
Jamie Gennis6019e592012-11-21 15:04:25 -08001594 saved_tgids[idx] = tsk->tgid;
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001595 arch_spin_unlock(&trace_cmdline_lock);
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001596
1597 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001598}
1599
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001600static void __trace_find_cmdline(int pid, char comm[])
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001601{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001602 unsigned map;
1603
Steven Rostedt4ca53082009-03-16 19:20:15 -04001604 if (!pid) {
1605 strcpy(comm, "<idle>");
1606 return;
1607 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001608
Steven Rostedt74bf4072010-01-25 15:11:53 -05001609 if (WARN_ON_ONCE(pid < 0)) {
1610 strcpy(comm, "<XXX>");
1611 return;
1612 }
1613
Steven Rostedt4ca53082009-03-16 19:20:15 -04001614 if (pid > PID_MAX_DEFAULT) {
1615 strcpy(comm, "<...>");
1616 return;
1617 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001618
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001619 map = savedcmd->map_pid_to_cmdline[pid];
Thomas Gleixner50d88752009-03-18 08:58:44 +01001620 if (map != NO_CMDLINE_MAP)
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09001621 strcpy(comm, get_saved_cmdlines(map));
Thomas Gleixner50d88752009-03-18 08:58:44 +01001622 else
1623 strcpy(comm, "<...>");
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04001624}
1625
1626void trace_find_cmdline(int pid, char comm[])
1627{
1628 preempt_disable();
1629 arch_spin_lock(&trace_cmdline_lock);
1630
1631 __trace_find_cmdline(pid, comm);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001632
Thomas Gleixner0199c4e2009-12-02 20:01:25 +01001633 arch_spin_unlock(&trace_cmdline_lock);
Heiko Carstens5b6045a2009-05-26 17:28:02 +02001634 preempt_enable();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001635}
1636
Jamie Gennis6019e592012-11-21 15:04:25 -08001637int trace_find_tgid(int pid)
1638{
1639 unsigned map;
1640 int tgid;
1641
1642 preempt_disable();
1643 arch_spin_lock(&trace_cmdline_lock);
Dmitry Shmidt99dd30a2015-10-28 10:45:04 -07001644 map = savedcmd->map_pid_to_cmdline[pid];
Jamie Gennis6019e592012-11-21 15:04:25 -08001645 if (map != NO_CMDLINE_MAP)
1646 tgid = saved_tgids[map];
1647 else
1648 tgid = -1;
1649
1650 arch_spin_unlock(&trace_cmdline_lock);
1651 preempt_enable();
1652
1653 return tgid;
1654}
1655
Ingo Molnare309b412008-05-12 21:20:51 +02001656void tracing_record_cmdline(struct task_struct *tsk)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001657{
Steven Rostedt0fb96562012-05-11 14:25:30 -04001658 if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001659 return;
1660
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001661 if (!__this_cpu_read(trace_cmdline_save))
1662 return;
1663
Steven Rostedt (Red Hat)379cfda2014-05-30 09:42:39 -04001664 if (trace_save_cmdline(tsk))
1665 __this_cpu_write(trace_cmdline_save, false);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001666}
1667
Pekka Paalanen45dcd8b2008-09-16 21:56:41 +03001668void
Steven Rostedt38697052008-10-01 13:14:09 -04001669tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1670 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001671{
1672 struct task_struct *tsk = current;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001673
Steven Rostedt777e2082008-09-29 23:02:42 -04001674 entry->preempt_count = pc & 0xff;
1675 entry->pid = (tsk) ? tsk->pid : 0;
1676 entry->flags =
Steven Rostedt92444892008-10-24 09:42:59 -04001677#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
Steven Rostedt2e2ca152008-08-01 12:26:40 -04001678 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
Steven Rostedt92444892008-10-24 09:42:59 -04001679#else
1680 TRACE_FLAG_IRQS_NOSUPPORT |
1681#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001682 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1683 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
Peter Zijlstrae5137b52013-10-04 17:28:26 +02001684 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
1685 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001686}
Frederic Weisbeckerf413cdb2009-08-07 01:25:54 +02001687EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001688
Steven Rostedte77405a2009-09-02 14:17:06 -04001689struct ring_buffer_event *
1690trace_buffer_lock_reserve(struct ring_buffer *buffer,
1691 int type,
1692 unsigned long len,
1693 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001694{
1695 struct ring_buffer_event *event;
1696
Steven Rostedte77405a2009-09-02 14:17:06 -04001697 event = ring_buffer_lock_reserve(buffer, len);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001698 if (event != NULL) {
1699 struct trace_entry *ent = ring_buffer_event_data(event);
1700
1701 tracing_generic_entry_update(ent, flags, pc);
1702 ent->type = type;
1703 }
1704
1705 return event;
1706}
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001707
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001708void
1709__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1710{
1711 __this_cpu_write(trace_cmdline_save, true);
1712 ring_buffer_unlock_commit(buffer, event);
1713}
1714
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04001715void trace_buffer_unlock_commit(struct trace_array *tr,
1716 struct ring_buffer *buffer,
1717 struct ring_buffer_event *event,
1718 unsigned long flags, int pc)
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001719{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001720 __buffer_unlock_commit(buffer, event);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001721
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001722 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedte77405a2009-09-02 14:17:06 -04001723 ftrace_trace_userstack(buffer, flags, pc);
Frederic Weisbecker07edf712009-03-22 23:10:46 +01001724}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001725EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001726
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001727static struct ring_buffer *temp_buffer;
1728
Steven Rostedtef5580d2009-02-27 19:38:04 -05001729struct ring_buffer_event *
Steven Rostedtccb469a2012-08-02 10:32:10 -04001730trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001731 struct trace_event_file *trace_file,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001732 int type, unsigned long len,
1733 unsigned long flags, int pc)
1734{
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001735 struct ring_buffer_event *entry;
1736
Steven Rostedt (Red Hat)7f1d2f82015-05-05 10:09:53 -04001737 *current_rb = trace_file->tr->trace_buffer.buffer;
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001738 entry = trace_buffer_lock_reserve(*current_rb,
Steven Rostedtccb469a2012-08-02 10:32:10 -04001739 type, len, flags, pc);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001740 /*
1741 * If tracing is off, but we have triggers enabled
1742 * we still need to look at the event data. Use the temp_buffer
1743 * to store the trace event for the tigger to use. It's recusive
1744 * safe and will not be recorded anywhere.
1745 */
Steven Rostedt (Red Hat)5d6ad962015-05-13 15:12:33 -04001746 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04001747 *current_rb = temp_buffer;
1748 entry = trace_buffer_lock_reserve(*current_rb,
1749 type, len, flags, pc);
1750 }
1751 return entry;
Steven Rostedtccb469a2012-08-02 10:32:10 -04001752}
1753EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
1754
1755struct ring_buffer_event *
Steven Rostedte77405a2009-09-02 14:17:06 -04001756trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1757 int type, unsigned long len,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001758 unsigned long flags, int pc)
1759{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001760 *current_rb = global_trace.trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04001761 return trace_buffer_lock_reserve(*current_rb,
Steven Rostedtef5580d2009-02-27 19:38:04 -05001762 type, len, flags, pc);
1763}
Steven Rostedt94487d62009-05-05 19:22:53 -04001764EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001765
Steven Rostedt (Red Hat)b7f0c952015-09-25 17:38:44 -04001766void trace_buffer_unlock_commit_regs(struct trace_array *tr,
1767 struct ring_buffer *buffer,
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001768 struct ring_buffer_event *event,
1769 unsigned long flags, int pc,
1770 struct pt_regs *regs)
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001771{
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001772 __buffer_unlock_commit(buffer, event);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001773
Steven Rostedt (Red Hat)cc6d9802016-01-13 15:48:54 -05001774 ftrace_trace_stack(tr, buffer, flags, 0, pc, regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001775 ftrace_trace_userstack(buffer, flags, pc);
1776}
Steven Rostedt0d5c6e12012-11-01 20:54:21 -04001777EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001778
Steven Rostedte77405a2009-09-02 14:17:06 -04001779void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1780 struct ring_buffer_event *event)
Steven Rostedt77d9f462009-04-02 01:16:59 -04001781{
Steven Rostedte77405a2009-09-02 14:17:06 -04001782 ring_buffer_discard_commit(buffer, event);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001783}
Steven Rostedt12acd472009-04-17 16:01:56 -04001784EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
Steven Rostedtef5580d2009-02-27 19:38:04 -05001785
Ingo Molnare309b412008-05-12 21:20:51 +02001786void
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001787trace_function(struct trace_array *tr,
Steven Rostedt38697052008-10-01 13:14:09 -04001788 unsigned long ip, unsigned long parent_ip, unsigned long flags,
1789 int pc)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001790{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001791 struct trace_event_call *call = &event_function;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001792 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001793 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001794 struct ftrace_entry *entry;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001795
Steven Rostedte77405a2009-09-02 14:17:06 -04001796 event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001797 flags, pc);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001798 if (!event)
1799 return;
1800 entry = ring_buffer_event_data(event);
Steven Rostedt777e2082008-09-29 23:02:42 -04001801 entry->ip = ip;
1802 entry->parent_ip = parent_ip;
Tom Zanussie1112b42009-03-31 00:48:49 -05001803
Tom Zanussif306cc82013-10-24 08:34:17 -05001804 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001805 __buffer_unlock_commit(buffer, event);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02001806}
1807
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001808#ifdef CONFIG_STACKTRACE
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001809
1810#define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1811struct ftrace_stack {
1812 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
1813};
1814
1815static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1816static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1817
Steven Rostedte77405a2009-09-02 14:17:06 -04001818static void __ftrace_trace_stack(struct ring_buffer *buffer,
Steven Rostedt53614992009-01-15 19:12:40 -05001819 unsigned long flags,
Masami Hiramatsu1fd8df22011-06-08 16:09:34 +09001820 int skip, int pc, struct pt_regs *regs)
Ingo Molnar86387f72008-05-12 21:20:51 +02001821{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001822 struct trace_event_call *call = &event_kernel_stack;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04001823 struct ring_buffer_event *event;
Steven Rostedt777e2082008-09-29 23:02:42 -04001824 struct stack_entry *entry;
Ingo Molnar86387f72008-05-12 21:20:51 +02001825 struct stack_trace trace;
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001826 int use_stack;
1827 int size = FTRACE_STACK_ENTRIES;
Ingo Molnar86387f72008-05-12 21:20:51 +02001828
1829 trace.nr_entries = 0;
Ingo Molnar86387f72008-05-12 21:20:51 +02001830 trace.skip = skip;
Ingo Molnar86387f72008-05-12 21:20:51 +02001831
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001832 /*
1833 * Since events can happen in NMIs there's no safe way to
1834 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1835 * or NMI comes in, it will just have to use the default
1836 * FTRACE_STACK_SIZE.
1837 */
1838 preempt_disable_notrace();
1839
Shan Wei82146522012-11-19 13:21:01 +08001840 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001841 /*
1842 * We don't need any atomic variables, just a barrier.
1843 * If an interrupt comes in, we don't care, because it would
1844 * have exited and put the counter back to what we want.
1845 * We just need a barrier to keep gcc from moving things
1846 * around.
1847 */
1848 barrier();
1849 if (use_stack == 1) {
Christoph Lameterbdffd892014-04-29 14:17:40 -05001850 trace.entries = this_cpu_ptr(ftrace_stack.calls);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001851 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
1852
1853 if (regs)
1854 save_stack_trace_regs(regs, &trace);
1855 else
1856 save_stack_trace(&trace);
1857
1858 if (trace.nr_entries > size)
1859 size = trace.nr_entries;
1860 } else
1861 /* From now on, use_stack is a boolean */
1862 use_stack = 0;
1863
1864 size *= sizeof(unsigned long);
1865
1866 event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1867 sizeof(*entry) + size, flags, pc);
1868 if (!event)
1869 goto out;
1870 entry = ring_buffer_event_data(event);
1871
1872 memset(&entry->caller, 0, size);
1873
1874 if (use_stack)
1875 memcpy(&entry->caller, trace.entries,
1876 trace.nr_entries * sizeof(unsigned long));
1877 else {
1878 trace.max_entries = FTRACE_STACK_ENTRIES;
1879 trace.entries = entry->caller;
1880 if (regs)
1881 save_stack_trace_regs(regs, &trace);
1882 else
1883 save_stack_trace(&trace);
1884 }
1885
1886 entry->size = trace.nr_entries;
1887
Tom Zanussif306cc82013-10-24 08:34:17 -05001888 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001889 __buffer_unlock_commit(buffer, event);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001890
1891 out:
1892 /* Again, don't let gcc optimize things here */
1893 barrier();
Shan Wei82146522012-11-19 13:21:01 +08001894 __this_cpu_dec(ftrace_stack_reserve);
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04001895 preempt_enable_notrace();
1896
Ingo Molnarf0a920d2008-05-12 21:20:47 +02001897}
1898
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001899static inline void ftrace_trace_stack(struct trace_array *tr,
1900 struct ring_buffer *buffer,
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001901 unsigned long flags,
1902 int skip, int pc, struct pt_regs *regs)
Steven Rostedt53614992009-01-15 19:12:40 -05001903{
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04001904 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
Steven Rostedt53614992009-01-15 19:12:40 -05001905 return;
1906
Steven Rostedt (Red Hat)73dddbb2015-09-29 15:38:55 -04001907 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
Steven Rostedt53614992009-01-15 19:12:40 -05001908}
1909
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001910void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1911 int pc)
Steven Rostedt38697052008-10-01 13:14:09 -04001912{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05001913 __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
Steven Rostedt38697052008-10-01 13:14:09 -04001914}
1915
Steven Rostedt03889382009-12-11 09:48:22 -05001916/**
1917 * trace_dump_stack - record a stack back trace in the trace buffer
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001918 * @skip: Number of functions to skip (helper handlers)
Steven Rostedt03889382009-12-11 09:48:22 -05001919 */
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001920void trace_dump_stack(int skip)
Steven Rostedt03889382009-12-11 09:48:22 -05001921{
1922 unsigned long flags;
1923
1924 if (tracing_disabled || tracing_selftest_running)
Steven Rostedte36c5452009-12-14 15:58:33 -05001925 return;
Steven Rostedt03889382009-12-11 09:48:22 -05001926
1927 local_save_flags(flags);
1928
Steven Rostedt (Red Hat)c142be82013-03-13 09:55:57 -04001929 /*
1930 * Skip 3 more, seems to get us at the caller of
1931 * this function.
1932 */
1933 skip += 3;
1934 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
1935 flags, skip, preempt_count(), NULL);
Steven Rostedt03889382009-12-11 09:48:22 -05001936}
1937
Steven Rostedt91e86e52010-11-10 12:56:12 +01001938static DEFINE_PER_CPU(int, user_stack_count);
1939
Steven Rostedte77405a2009-09-02 14:17:06 -04001940void
1941ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
Török Edwin02b67512008-11-22 13:28:47 +02001942{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04001943 struct trace_event_call *call = &event_user_stack;
Török Edwin8d7c6a92008-11-23 12:39:06 +02001944 struct ring_buffer_event *event;
Török Edwin02b67512008-11-22 13:28:47 +02001945 struct userstack_entry *entry;
1946 struct stack_trace trace;
Török Edwin02b67512008-11-22 13:28:47 +02001947
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04001948 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
Török Edwin02b67512008-11-22 13:28:47 +02001949 return;
1950
Steven Rostedtb6345872010-03-12 20:03:30 -05001951 /*
1952 * NMIs can not handle page faults, even with fix ups.
1953 * The save user stack can (and often does) fault.
1954 */
1955 if (unlikely(in_nmi()))
1956 return;
1957
Steven Rostedt91e86e52010-11-10 12:56:12 +01001958 /*
1959 * prevent recursion, since the user stack tracing may
1960 * trigger other kernel events.
1961 */
1962 preempt_disable();
1963 if (__this_cpu_read(user_stack_count))
1964 goto out;
1965
1966 __this_cpu_inc(user_stack_count);
1967
Steven Rostedte77405a2009-09-02 14:17:06 -04001968 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
Arnaldo Carvalho de Melo51a763d2009-02-05 16:14:13 -02001969 sizeof(*entry), flags, pc);
Török Edwin02b67512008-11-22 13:28:47 +02001970 if (!event)
Li Zefan1dbd1952010-12-09 15:47:56 +08001971 goto out_drop_count;
Török Edwin02b67512008-11-22 13:28:47 +02001972 entry = ring_buffer_event_data(event);
Török Edwin02b67512008-11-22 13:28:47 +02001973
Steven Rostedt48659d32009-09-11 11:36:23 -04001974 entry->tgid = current->tgid;
Török Edwin02b67512008-11-22 13:28:47 +02001975 memset(&entry->caller, 0, sizeof(entry->caller));
1976
1977 trace.nr_entries = 0;
1978 trace.max_entries = FTRACE_STACK_ENTRIES;
1979 trace.skip = 0;
1980 trace.entries = entry->caller;
1981
1982 save_stack_trace_user(&trace);
Tom Zanussif306cc82013-10-24 08:34:17 -05001983 if (!call_filter_check_discard(call, entry, buffer, event))
Steven Rostedt7ffbd482012-10-11 12:14:25 -04001984 __buffer_unlock_commit(buffer, event);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001985
Li Zefan1dbd1952010-12-09 15:47:56 +08001986 out_drop_count:
Steven Rostedt91e86e52010-11-10 12:56:12 +01001987 __this_cpu_dec(user_stack_count);
Steven Rostedt91e86e52010-11-10 12:56:12 +01001988 out:
1989 preempt_enable();
Török Edwin02b67512008-11-22 13:28:47 +02001990}
1991
Hannes Eder4fd27352009-02-10 19:44:12 +01001992#ifdef UNUSED
1993static void __trace_userstack(struct trace_array *tr, unsigned long flags)
Török Edwin02b67512008-11-22 13:28:47 +02001994{
Arnaldo Carvalho de Melo7be42152009-02-05 01:13:37 -05001995 ftrace_trace_userstack(tr, flags, preempt_count());
Török Edwin02b67512008-11-22 13:28:47 +02001996}
Hannes Eder4fd27352009-02-10 19:44:12 +01001997#endif /* UNUSED */
Török Edwin02b67512008-11-22 13:28:47 +02001998
Frederic Weisbeckerc0a0d0d2009-07-29 17:51:13 +02001999#endif /* CONFIG_STACKTRACE */
2000
Steven Rostedt07d777f2011-09-22 14:01:55 -04002001/* created for use with alloc_percpu */
2002struct trace_buffer_struct {
2003 char buffer[TRACE_BUF_SIZE];
2004};
2005
2006static struct trace_buffer_struct *trace_percpu_buffer;
2007static struct trace_buffer_struct *trace_percpu_sirq_buffer;
2008static struct trace_buffer_struct *trace_percpu_irq_buffer;
2009static struct trace_buffer_struct *trace_percpu_nmi_buffer;
2010
2011/*
2012 * The buffer used is dependent on the context. There is a per cpu
2013 * buffer for normal context, softirq contex, hard irq context and
2014 * for NMI context. Thise allows for lockless recording.
2015 *
2016 * Note, if the buffers failed to be allocated, then this returns NULL
2017 */
2018static char *get_trace_buf(void)
2019{
2020 struct trace_buffer_struct *percpu_buffer;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002021
2022 /*
2023 * If we have allocated per cpu buffers, then we do not
2024 * need to do any locking.
2025 */
2026 if (in_nmi())
2027 percpu_buffer = trace_percpu_nmi_buffer;
2028 else if (in_irq())
2029 percpu_buffer = trace_percpu_irq_buffer;
2030 else if (in_softirq())
2031 percpu_buffer = trace_percpu_sirq_buffer;
2032 else
2033 percpu_buffer = trace_percpu_buffer;
2034
2035 if (!percpu_buffer)
2036 return NULL;
2037
Shan Weid8a03492012-11-13 09:53:04 +08002038 return this_cpu_ptr(&percpu_buffer->buffer[0]);
Steven Rostedt07d777f2011-09-22 14:01:55 -04002039}
2040
2041static int alloc_percpu_trace_buffer(void)
2042{
2043 struct trace_buffer_struct *buffers;
2044 struct trace_buffer_struct *sirq_buffers;
2045 struct trace_buffer_struct *irq_buffers;
2046 struct trace_buffer_struct *nmi_buffers;
2047
2048 buffers = alloc_percpu(struct trace_buffer_struct);
2049 if (!buffers)
2050 goto err_warn;
2051
2052 sirq_buffers = alloc_percpu(struct trace_buffer_struct);
2053 if (!sirq_buffers)
2054 goto err_sirq;
2055
2056 irq_buffers = alloc_percpu(struct trace_buffer_struct);
2057 if (!irq_buffers)
2058 goto err_irq;
2059
2060 nmi_buffers = alloc_percpu(struct trace_buffer_struct);
2061 if (!nmi_buffers)
2062 goto err_nmi;
2063
2064 trace_percpu_buffer = buffers;
2065 trace_percpu_sirq_buffer = sirq_buffers;
2066 trace_percpu_irq_buffer = irq_buffers;
2067 trace_percpu_nmi_buffer = nmi_buffers;
2068
2069 return 0;
2070
2071 err_nmi:
2072 free_percpu(irq_buffers);
2073 err_irq:
2074 free_percpu(sirq_buffers);
2075 err_sirq:
2076 free_percpu(buffers);
2077 err_warn:
2078 WARN(1, "Could not allocate percpu trace_printk buffer");
2079 return -ENOMEM;
2080}
2081
Steven Rostedt81698832012-10-11 10:15:05 -04002082static int buffers_allocated;
2083
Steven Rostedt07d777f2011-09-22 14:01:55 -04002084void trace_printk_init_buffers(void)
2085{
Steven Rostedt07d777f2011-09-22 14:01:55 -04002086 if (buffers_allocated)
2087 return;
2088
2089 if (alloc_percpu_trace_buffer())
2090 return;
2091
Steven Rostedt2184db42014-05-28 13:14:40 -04002092 /* trace_printk() is for debug use only. Don't use it in production. */
2093
Sandipan Patrac0ddfd52017-03-23 09:34:49 +05302094 pr_debug("**********************************************************\n");
2095 pr_debug("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2096 pr_debug("** **\n");
2097 pr_debug("** trace_printk() being used. Allocating extra memory. **\n");
2098 pr_debug("** **\n");
2099 pr_debug("** This means that this is a DEBUG kernel and it is **\n");
2100 pr_debug("** unsafe for production use. **\n");
2101 pr_debug("** **\n");
2102 pr_debug("** If you see this message and you are not debugging **\n");
2103 pr_debug("** the kernel, report this immediately to your vendor! **\n");
2104 pr_debug("** **\n");
2105 pr_debug("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2106 pr_debug("**********************************************************\n");
Steven Rostedt07d777f2011-09-22 14:01:55 -04002107
Steven Rostedtb382ede62012-10-10 21:44:34 -04002108 /* Expand the buffers to set size */
2109 tracing_update_buffers();
2110
Steven Rostedt07d777f2011-09-22 14:01:55 -04002111 buffers_allocated = 1;
Steven Rostedt81698832012-10-11 10:15:05 -04002112
2113 /*
2114 * trace_printk_init_buffers() can be called by modules.
2115 * If that happens, then we need to start cmdline recording
2116 * directly here. If the global_trace.buffer is already
2117 * allocated here, then this was called by module code.
2118 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002119 if (global_trace.trace_buffer.buffer)
Steven Rostedt81698832012-10-11 10:15:05 -04002120 tracing_start_cmdline_record();
2121}
2122
2123void trace_printk_start_comm(void)
2124{
2125 /* Start tracing comms if trace printk is set */
2126 if (!buffers_allocated)
2127 return;
2128 tracing_start_cmdline_record();
2129}
2130
2131static void trace_printk_start_stop_comm(int enabled)
2132{
2133 if (!buffers_allocated)
2134 return;
2135
2136 if (enabled)
2137 tracing_start_cmdline_record();
2138 else
2139 tracing_stop_cmdline_record();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002140}
2141
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002142/**
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002143 * trace_vbprintk - write binary msg to tracing buffer
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002144 *
2145 */
Steven Rostedt40ce74f2009-03-19 14:03:53 -04002146int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002147{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002148 struct trace_event_call *call = &event_bprint;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002149 struct ring_buffer_event *event;
Steven Rostedte77405a2009-09-02 14:17:06 -04002150 struct ring_buffer *buffer;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002151 struct trace_array *tr = &global_trace;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002152 struct bprint_entry *entry;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002153 unsigned long flags;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002154 char *tbuffer;
2155 int len = 0, size, pc;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002156
2157 if (unlikely(tracing_selftest_running || tracing_disabled))
2158 return 0;
2159
2160 /* Don't pollute graph traces with trace_vprintk internals */
2161 pause_graph_tracing();
2162
2163 pc = preempt_count();
Steven Rostedt5168ae52010-06-03 09:36:50 -04002164 preempt_disable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002165
Steven Rostedt07d777f2011-09-22 14:01:55 -04002166 tbuffer = get_trace_buf();
2167 if (!tbuffer) {
2168 len = 0;
2169 goto out;
2170 }
2171
2172 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2173
2174 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002175 goto out;
2176
Steven Rostedt07d777f2011-09-22 14:01:55 -04002177 local_save_flags(flags);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002178 size = sizeof(*entry) + sizeof(u32) * len;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002179 buffer = tr->trace_buffer.buffer;
Steven Rostedte77405a2009-09-02 14:17:06 -04002180 event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2181 flags, pc);
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002182 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002183 goto out;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002184 entry = ring_buffer_event_data(event);
2185 entry->ip = ip;
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002186 entry->fmt = fmt;
2187
Steven Rostedt07d777f2011-09-22 14:01:55 -04002188 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
Tom Zanussif306cc82013-10-24 08:34:17 -05002189 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002190 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002191 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002192 }
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002193
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002194out:
Steven Rostedt5168ae52010-06-03 09:36:50 -04002195 preempt_enable_notrace();
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002196 unpause_graph_tracing();
2197
2198 return len;
2199}
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002200EXPORT_SYMBOL_GPL(trace_vbprintk);
2201
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002202static int
2203__trace_array_vprintk(struct ring_buffer *buffer,
2204 unsigned long ip, const char *fmt, va_list args)
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002205{
Steven Rostedt (Red Hat)2425bcb2015-05-05 11:45:27 -04002206 struct trace_event_call *call = &event_print;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002207 struct ring_buffer_event *event;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002208 int len = 0, size, pc;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002209 struct print_entry *entry;
Steven Rostedt07d777f2011-09-22 14:01:55 -04002210 unsigned long flags;
2211 char *tbuffer;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002212
2213 if (tracing_disabled || tracing_selftest_running)
2214 return 0;
2215
Steven Rostedt07d777f2011-09-22 14:01:55 -04002216 /* Don't pollute graph traces with trace_vprintk internals */
2217 pause_graph_tracing();
2218
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002219 pc = preempt_count();
2220 preempt_disable_notrace();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002221
Steven Rostedt07d777f2011-09-22 14:01:55 -04002222
2223 tbuffer = get_trace_buf();
2224 if (!tbuffer) {
2225 len = 0;
2226 goto out;
2227 }
2228
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002229 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002230
Steven Rostedt07d777f2011-09-22 14:01:55 -04002231 local_save_flags(flags);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002232 size = sizeof(*entry) + len + 1;
Steven Rostedte77405a2009-09-02 14:17:06 -04002233 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
Steven Rostedt07d777f2011-09-22 14:01:55 -04002234 flags, pc);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002235 if (!event)
Steven Rostedt07d777f2011-09-22 14:01:55 -04002236 goto out;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002237 entry = ring_buffer_event_data(event);
Carsten Emdec13d2f72009-11-16 20:56:13 +01002238 entry->ip = ip;
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002239
Dan Carpenter3558a5a2014-11-27 18:57:52 +03002240 memcpy(&entry->buf, tbuffer, len + 1);
Tom Zanussif306cc82013-10-24 08:34:17 -05002241 if (!call_filter_check_discard(call, entry, buffer, event)) {
Steven Rostedt7ffbd482012-10-11 12:14:25 -04002242 __buffer_unlock_commit(buffer, event);
Steven Rostedt (Red Hat)2d34f482015-09-30 11:45:22 -04002243 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
Steven Rostedtd9313692010-01-06 17:27:11 -05002244 }
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002245 out:
2246 preempt_enable_notrace();
Steven Rostedt07d777f2011-09-22 14:01:55 -04002247 unpause_graph_tracing();
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002248
2249 return len;
2250}
Steven Rostedt659372d2009-09-03 19:11:07 -04002251
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002252int trace_array_vprintk(struct trace_array *tr,
2253 unsigned long ip, const char *fmt, va_list args)
2254{
2255 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
2256}
2257
2258int trace_array_printk(struct trace_array *tr,
2259 unsigned long ip, const char *fmt, ...)
2260{
2261 int ret;
2262 va_list ap;
2263
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002264 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002265 return 0;
2266
2267 va_start(ap, fmt);
2268 ret = trace_array_vprintk(tr, ip, fmt, ap);
2269 va_end(ap);
2270 return ret;
2271}
2272
2273int trace_array_printk_buf(struct ring_buffer *buffer,
2274 unsigned long ip, const char *fmt, ...)
2275{
2276 int ret;
2277 va_list ap;
2278
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002279 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002280 return 0;
2281
2282 va_start(ap, fmt);
2283 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
2284 va_end(ap);
2285 return ret;
2286}
2287
Steven Rostedt659372d2009-09-03 19:11:07 -04002288int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2289{
Steven Rostedta813a152009-10-09 01:41:35 -04002290 return trace_array_vprintk(&global_trace, ip, fmt, args);
Steven Rostedt659372d2009-09-03 19:11:07 -04002291}
Frederic Weisbecker769b0442009-03-06 17:21:49 +01002292EXPORT_SYMBOL_GPL(trace_vprintk);
2293
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002294static void trace_iterator_increment(struct trace_iterator *iter)
Steven Rostedt5a90f572008-09-03 17:42:51 -04002295{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002296 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
2297
Steven Rostedt5a90f572008-09-03 17:42:51 -04002298 iter->idx++;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002299 if (buf_iter)
2300 ring_buffer_read(buf_iter, NULL);
Steven Rostedt5a90f572008-09-03 17:42:51 -04002301}
2302
Ingo Molnare309b412008-05-12 21:20:51 +02002303static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002304peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
2305 unsigned long *lost_events)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002306{
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002307 struct ring_buffer_event *event;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002308 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002309
Steven Rostedtd7690412008-10-01 00:29:53 -04002310 if (buf_iter)
2311 event = ring_buffer_iter_peek(buf_iter, ts);
2312 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002313 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002314 lost_events);
Steven Rostedtd7690412008-10-01 00:29:53 -04002315
Steven Rostedt4a9bd3f2011-07-14 16:36:53 -04002316 if (event) {
2317 iter->ent_size = ring_buffer_event_length(event);
2318 return ring_buffer_event_data(event);
2319 }
2320 iter->ent_size = 0;
2321 return NULL;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002322}
Steven Rostedtd7690412008-10-01 00:29:53 -04002323
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002324static struct trace_entry *
Steven Rostedtbc21b472010-03-31 19:49:26 -04002325__find_next_entry(struct trace_iterator *iter, int *ent_cpu,
2326 unsigned long *missing_events, u64 *ent_ts)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002327{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002328 struct ring_buffer *buffer = iter->trace_buffer->buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002329 struct trace_entry *ent, *next = NULL;
Lai Jiangshanaa274972010-04-05 17:11:05 +08002330 unsigned long lost_events = 0, next_lost = 0;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002331 int cpu_file = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002332 u64 next_ts = 0, ts;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002333 int next_cpu = -1;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002334 int next_size = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002335 int cpu;
2336
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002337 /*
2338 * If we are in a per_cpu trace file, don't bother by iterating over
2339 * all cpu and peek directly.
2340 */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002341 if (cpu_file > RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002342 if (ring_buffer_empty_cpu(buffer, cpu_file))
2343 return NULL;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002344 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002345 if (ent_cpu)
2346 *ent_cpu = cpu_file;
2347
2348 return ent;
2349 }
2350
Steven Rostedtab464282008-05-12 21:21:00 +02002351 for_each_tracing_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002352
2353 if (ring_buffer_empty_cpu(buffer, cpu))
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002354 continue;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002355
Steven Rostedtbc21b472010-03-31 19:49:26 -04002356 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002357
Ingo Molnarcdd31cd2008-05-12 21:20:46 +02002358 /*
2359 * Pick the entry with the smallest timestamp:
2360 */
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002361 if (ent && (!next || ts < next_ts)) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002362 next = ent;
2363 next_cpu = cpu;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002364 next_ts = ts;
Steven Rostedtbc21b472010-03-31 19:49:26 -04002365 next_lost = lost_events;
Steven Rostedt12b5da32012-03-27 10:43:28 -04002366 next_size = iter->ent_size;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002367 }
2368 }
2369
Steven Rostedt12b5da32012-03-27 10:43:28 -04002370 iter->ent_size = next_size;
2371
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002372 if (ent_cpu)
2373 *ent_cpu = next_cpu;
2374
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002375 if (ent_ts)
2376 *ent_ts = next_ts;
2377
Steven Rostedtbc21b472010-03-31 19:49:26 -04002378 if (missing_events)
2379 *missing_events = next_lost;
2380
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002381 return next;
2382}
2383
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002384/* Find the next real entry, without updating the iterator itself */
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002385struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
2386 int *ent_cpu, u64 *ent_ts)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002387{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002388 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002389}
Ingo Molnar8c523a92008-05-12 21:20:46 +02002390
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002391/* Find the next real entry, and increment the iterator to the next entry */
Jason Wessel955b61e2010-08-05 09:22:23 -05002392void *trace_find_next_entry_inc(struct trace_iterator *iter)
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002393{
Steven Rostedtbc21b472010-03-31 19:49:26 -04002394 iter->ent = __find_next_entry(iter, &iter->cpu,
2395 &iter->lost_events, &iter->ts);
Steven Rostedtb3806b42008-05-12 21:20:46 +02002396
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002397 if (iter->ent)
Robert Richtere2ac8ef2008-11-12 12:59:32 +01002398 trace_iterator_increment(iter);
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002399
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002400 return iter->ent ? iter : NULL;
Steven Rostedtb3806b42008-05-12 21:20:46 +02002401}
2402
Ingo Molnare309b412008-05-12 21:20:51 +02002403static void trace_consume(struct trace_iterator *iter)
Steven Rostedtb3806b42008-05-12 21:20:46 +02002404{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002405 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
Steven Rostedtbc21b472010-03-31 19:49:26 -04002406 &iter->lost_events);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002407}
2408
Ingo Molnare309b412008-05-12 21:20:51 +02002409static void *s_next(struct seq_file *m, void *v, loff_t *pos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002410{
2411 struct trace_iterator *iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002412 int i = (int)*pos;
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002413 void *ent;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002414
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002415 WARN_ON_ONCE(iter->leftover);
2416
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002417 (*pos)++;
2418
2419 /* can't go backwards */
2420 if (iter->idx > i)
2421 return NULL;
2422
2423 if (iter->idx < 0)
Jason Wessel955b61e2010-08-05 09:22:23 -05002424 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002425 else
2426 ent = iter;
2427
2428 while (ent && iter->idx < i)
Jason Wessel955b61e2010-08-05 09:22:23 -05002429 ent = trace_find_next_entry_inc(iter);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002430
2431 iter->pos = *pos;
2432
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002433 return ent;
2434}
2435
Jason Wessel955b61e2010-08-05 09:22:23 -05002436void tracing_iter_reset(struct trace_iterator *iter, int cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002437{
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002438 struct ring_buffer_event *event;
2439 struct ring_buffer_iter *buf_iter;
2440 unsigned long entries = 0;
2441 u64 ts;
2442
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002443 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002444
Steven Rostedt6d158a82012-06-27 20:46:14 -04002445 buf_iter = trace_buffer_iter(iter, cpu);
2446 if (!buf_iter)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002447 return;
2448
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002449 ring_buffer_iter_reset(buf_iter);
2450
2451 /*
2452 * We could have the case with the max latency tracers
2453 * that a reset never took place on a cpu. This is evident
2454 * by the timestamp being before the start of the buffer.
2455 */
2456 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002457 if (ts >= iter->trace_buffer->time_start)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002458 break;
2459 entries++;
2460 ring_buffer_read(buf_iter, NULL);
2461 }
2462
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002463 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002464}
2465
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002466/*
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002467 * The current tracer is copied to avoid a global locking
2468 * all around.
2469 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002470static void *s_start(struct seq_file *m, loff_t *pos)
2471{
2472 struct trace_iterator *iter = m->private;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002473 struct trace_array *tr = iter->tr;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002474 int cpu_file = iter->cpu_file;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002475 void *p = NULL;
2476 loff_t l = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04002477 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002478
Hiraku Toyooka2fd196e2012-12-26 11:52:52 +09002479 /*
2480 * copy the tracer to avoid using a global lock all around.
2481 * iter->trace is a copy of current_trace, the pointer to the
2482 * name may be used instead of a strcmp(), as iter->trace->name
2483 * will point to the same string as current_trace->name.
2484 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002485 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002486 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
2487 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01002488 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002489
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002490#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002491 if (iter->snapshot && iter->trace->use_max_tr)
2492 return ERR_PTR(-EBUSY);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002493#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002494
2495 if (!iter->snapshot)
2496 atomic_inc(&trace_record_cmdline_disabled);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002497
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002498 if (*pos != iter->pos) {
2499 iter->ent = NULL;
2500 iter->cpu = 0;
2501 iter->idx = -1;
2502
Steven Rostedtae3b5092013-01-23 15:22:59 -05002503 if (cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002504 for_each_tracing_cpu(cpu)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002505 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01002506 } else
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002507 tracing_iter_reset(iter, cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002508
Lai Jiangshanac91d852010-03-02 17:54:50 +08002509 iter->leftover = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002510 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
2511 ;
2512
2513 } else {
Steven Rostedta63ce5b2009-12-07 09:11:39 -05002514 /*
2515 * If we overflowed the seq_file before, then we want
2516 * to just reuse the trace_seq buffer again.
2517 */
2518 if (iter->leftover)
2519 p = iter;
2520 else {
2521 l = *pos - 1;
2522 p = s_next(m, p, &l);
2523 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002524 }
2525
Lai Jiangshan4f535962009-05-18 19:35:34 +08002526 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002527 trace_access_lock(cpu_file);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002528 return p;
2529}
2530
2531static void s_stop(struct seq_file *m, void *p)
2532{
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002533 struct trace_iterator *iter = m->private;
2534
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002535#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002536 if (iter->snapshot && iter->trace->use_max_tr)
2537 return;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002538#endif
Hiraku Toyookadebdd572012-12-26 11:53:00 +09002539
2540 if (!iter->snapshot)
2541 atomic_dec(&trace_record_cmdline_disabled);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002542
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08002543 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08002544 trace_event_read_unlock();
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002545}
2546
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002547static void
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002548get_total_entries(struct trace_buffer *buf,
2549 unsigned long *total, unsigned long *entries)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002550{
2551 unsigned long count;
2552 int cpu;
2553
2554 *total = 0;
2555 *entries = 0;
2556
2557 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002558 count = ring_buffer_entries_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002559 /*
2560 * If this buffer has skipped entries, then we hold all
2561 * entries for the trace and we need to ignore the
2562 * ones before the time stamp.
2563 */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002564 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
2565 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002566 /* total is the same as the entries */
2567 *total += count;
2568 } else
2569 *total += count +
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002570 ring_buffer_overrun_cpu(buf->buffer, cpu);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002571 *entries += count;
2572 }
2573}
2574
Ingo Molnare309b412008-05-12 21:20:51 +02002575static void print_lat_help_header(struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002576{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002577 seq_puts(m, "# _------=> CPU# \n"
2578 "# / _-----=> irqs-off \n"
2579 "# | / _----=> need-resched \n"
2580 "# || / _---=> hardirq/softirq \n"
2581 "# ||| / _--=> preempt-depth \n"
2582 "# |||| / delay \n"
2583 "# cmd pid ||||| time | caller \n"
2584 "# \\ / ||||| \\ | / \n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002585}
2586
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002587static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002588{
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002589 unsigned long total;
2590 unsigned long entries;
2591
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002592 get_total_entries(buf, &total, &entries);
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002593 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
2594 entries, total, num_online_cpus());
2595 seq_puts(m, "#\n");
2596}
2597
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002598static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002599{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002600 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002601 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"
2602 "# | | | | |\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002603}
2604
Jamie Gennis6019e592012-11-21 15:04:25 -08002605static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
2606{
2607 print_event_info(buf, m);
2608 seq_puts(m, "# TASK-PID TGID CPU# TIMESTAMP FUNCTION\n");
2609 seq_puts(m, "# | | | | | |\n");
2610}
2611
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002612static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
Steven Rostedt77271ce2011-11-17 09:34:33 -05002613{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002614 print_event_info(buf, m);
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002615 seq_puts(m, "# _-----=> irqs-off\n"
2616 "# / _----=> need-resched\n"
2617 "# | / _---=> hardirq/softirq\n"
2618 "# || / _--=> preempt-depth\n"
2619 "# ||| / delay\n"
2620 "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"
2621 "# | | | |||| | |\n");
Steven Rostedt77271ce2011-11-17 09:34:33 -05002622}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002623
Jamie Gennis6019e592012-11-21 15:04:25 -08002624static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
2625{
2626 print_event_info(buf, m);
2627 seq_puts(m, "# _-----=> irqs-off\n");
2628 seq_puts(m, "# / _----=> need-resched\n");
2629 seq_puts(m, "# | / _---=> hardirq/softirq\n");
2630 seq_puts(m, "# || / _--=> preempt-depth\n");
2631 seq_puts(m, "# ||| / delay\n");
2632 seq_puts(m, "# TASK-PID TGID CPU# |||| TIMESTAMP FUNCTION\n");
2633 seq_puts(m, "# | | | | |||| | |\n");
2634}
2635
Jiri Olsa62b915f2010-04-02 19:01:22 +02002636void
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002637print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2638{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002639 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002640 struct trace_buffer *buf = iter->trace_buffer;
2641 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04002642 struct tracer *type = iter->trace;
Steven Rostedt39eaf7e2011-11-17 10:35:16 -05002643 unsigned long entries;
2644 unsigned long total;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002645 const char *name = "preemption";
2646
Steven Rostedt (Red Hat)d840f712013-02-01 18:38:47 -05002647 name = type->name;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002648
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002649 get_total_entries(buf, &total, &entries);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002650
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002651 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002652 name, UTS_RELEASE);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002653 seq_puts(m, "# -----------------------------------"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002654 "---------------------------------\n");
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002655 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002656 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
Steven Rostedt57f50be2008-05-12 21:20:44 +02002657 nsecs_to_usecs(data->saved_latency),
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002658 entries,
Steven Rostedt4c11d7a2008-05-12 21:20:43 +02002659 total,
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002660 buf->cpu,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002661#if defined(CONFIG_PREEMPT_NONE)
2662 "server",
2663#elif defined(CONFIG_PREEMPT_VOLUNTARY)
2664 "desktop",
Steven Rostedtb5c21b42008-07-10 20:58:12 -04002665#elif defined(CONFIG_PREEMPT)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002666 "preempt",
2667#else
2668 "unknown",
2669#endif
2670 /* These are reserved for later use */
2671 0, 0, 0, 0);
2672#ifdef CONFIG_SMP
2673 seq_printf(m, " #P:%d)\n", num_online_cpus());
2674#else
2675 seq_puts(m, ")\n");
2676#endif
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002677 seq_puts(m, "# -----------------\n");
2678 seq_printf(m, "# | task: %.16s-%d "
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002679 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
Eric W. Biedermand20b92a2012-03-13 16:02:19 -07002680 data->comm, data->pid,
2681 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002682 data->policy, data->rt_priority);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002683 seq_puts(m, "# -----------------\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002684
2685 if (data->critical_start) {
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002686 seq_puts(m, "# => started at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002687 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2688 trace_print_seq(m, &iter->seq);
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002689 seq_puts(m, "\n# => ended at: ");
Steven Rostedt214023c2008-05-12 21:20:46 +02002690 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2691 trace_print_seq(m, &iter->seq);
Steven Rostedt8248ac02009-09-02 12:27:41 -04002692 seq_puts(m, "\n#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002693 }
2694
KOSAKI Motohiro888b55d2009-03-08 13:12:43 +09002695 seq_puts(m, "#\n");
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002696}
2697
Steven Rostedta3097202008-11-07 22:36:02 -05002698static void test_cpu_buff_start(struct trace_iterator *iter)
2699{
2700 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002701 struct trace_array *tr = iter->tr;
Steven Rostedta3097202008-11-07 22:36:02 -05002702
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002703 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05002704 return;
2705
2706 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2707 return;
2708
Sasha Levin919cd972015-09-04 12:45:56 -04002709 if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
Steven Rostedta3097202008-11-07 22:36:02 -05002710 return;
2711
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002712 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04002713 return;
2714
Sasha Levin919cd972015-09-04 12:45:56 -04002715 if (iter->started)
2716 cpumask_set_cpu(iter->cpu, iter->started);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02002717
2718 /* Don't print started cpu buffer for the first entry of the trace */
2719 if (iter->idx > 1)
2720 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2721 iter->cpu);
Steven Rostedta3097202008-11-07 22:36:02 -05002722}
2723
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002724static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002725{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002726 struct trace_array *tr = iter->tr;
Steven Rostedt214023c2008-05-12 21:20:46 +02002727 struct trace_seq *s = &iter->seq;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002728 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002729 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002730 struct trace_event *event;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002731
Ingo Molnar4e3c3332008-05-12 21:20:45 +02002732 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002733
Steven Rostedta3097202008-11-07 22:36:02 -05002734 test_cpu_buff_start(iter);
2735
Steven Rostedtf633cef2008-12-23 23:24:13 -05002736 event = ftrace_find_event(entry->type);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002737
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002738 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002739 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2740 trace_print_lat_context(iter);
2741 else
2742 trace_print_context(iter);
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002743 }
2744
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002745 if (trace_seq_has_overflowed(s))
2746 return TRACE_TYPE_PARTIAL_LINE;
2747
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002748 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002749 return event->funcs->trace(iter, sym_flags, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002750
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002751 trace_seq_printf(s, "Unknown type %d\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002752
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002753 return trace_handle_return(s);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002754}
2755
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002756static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002757{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002758 struct trace_array *tr = iter->tr;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002759 struct trace_seq *s = &iter->seq;
2760 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002761 struct trace_event *event;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002762
2763 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002764
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002765 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002766 trace_seq_printf(s, "%d %d %llu ",
2767 entry->pid, iter->cpu, iter->ts);
2768
2769 if (trace_seq_has_overflowed(s))
2770 return TRACE_TYPE_PARTIAL_LINE;
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002771
Steven Rostedtf633cef2008-12-23 23:24:13 -05002772 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002773 if (event)
Steven Rostedta9a57762010-04-22 18:46:14 -04002774 return event->funcs->raw(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002775
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002776 trace_seq_printf(s, "%d ?\n", entry->type);
Steven Rostedt7104f302008-10-01 10:52:51 -04002777
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002778 return trace_handle_return(s);
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002779}
2780
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002781static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002782{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002783 struct trace_array *tr = iter->tr;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002784 struct trace_seq *s = &iter->seq;
2785 unsigned char newline = '\n';
2786 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002787 struct trace_event *event;
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002788
2789 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002790
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002791 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002792 SEQ_PUT_HEX_FIELD(s, entry->pid);
2793 SEQ_PUT_HEX_FIELD(s, iter->cpu);
2794 SEQ_PUT_HEX_FIELD(s, iter->ts);
2795 if (trace_seq_has_overflowed(s))
2796 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002797 }
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002798
Steven Rostedtf633cef2008-12-23 23:24:13 -05002799 event = ftrace_find_event(entry->type);
Arnaldo Carvalho de Melo268ccda2009-02-04 20:16:39 -02002800 if (event) {
Steven Rostedta9a57762010-04-22 18:46:14 -04002801 enum print_line_t ret = event->funcs->hex(iter, 0, event);
Arnaldo Carvalho de Melod9793bd2009-02-03 20:20:41 -02002802 if (ret != TRACE_TYPE_HANDLED)
2803 return ret;
2804 }
Steven Rostedt7104f302008-10-01 10:52:51 -04002805
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002806 SEQ_PUT_FIELD(s, newline);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002807
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002808 return trace_handle_return(s);
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002809}
2810
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002811static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002812{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002813 struct trace_array *tr = iter->tr;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002814 struct trace_seq *s = &iter->seq;
2815 struct trace_entry *entry;
Steven Rostedtf633cef2008-12-23 23:24:13 -05002816 struct trace_event *event;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002817
2818 entry = iter->ent;
Steven Rostedtdd0e5452008-08-01 12:26:41 -04002819
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002820 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002821 SEQ_PUT_FIELD(s, entry->pid);
2822 SEQ_PUT_FIELD(s, iter->cpu);
2823 SEQ_PUT_FIELD(s, iter->ts);
2824 if (trace_seq_has_overflowed(s))
2825 return TRACE_TYPE_PARTIAL_LINE;
Frederic Weisbeckerc4a8e8b2009-02-02 20:29:21 -02002826 }
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002827
Steven Rostedtf633cef2008-12-23 23:24:13 -05002828 event = ftrace_find_event(entry->type);
Steven Rostedta9a57762010-04-22 18:46:14 -04002829 return event ? event->funcs->binary(iter, 0, event) :
2830 TRACE_TYPE_HANDLED;
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002831}
2832
Jiri Olsa62b915f2010-04-02 19:01:22 +02002833int trace_empty(struct trace_iterator *iter)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002834{
Steven Rostedt6d158a82012-06-27 20:46:14 -04002835 struct ring_buffer_iter *buf_iter;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002836 int cpu;
2837
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002838 /* If we are looking at one CPU buffer, only check that one */
Steven Rostedtae3b5092013-01-23 15:22:59 -05002839 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002840 cpu = iter->cpu_file;
Steven Rostedt6d158a82012-06-27 20:46:14 -04002841 buf_iter = trace_buffer_iter(iter, cpu);
2842 if (buf_iter) {
2843 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002844 return 0;
2845 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002846 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedt9aba60f2009-03-11 19:52:30 -04002847 return 0;
2848 }
2849 return 1;
2850 }
2851
Steven Rostedtab464282008-05-12 21:21:00 +02002852 for_each_tracing_cpu(cpu) {
Steven Rostedt6d158a82012-06-27 20:46:14 -04002853 buf_iter = trace_buffer_iter(iter, cpu);
2854 if (buf_iter) {
2855 if (!ring_buffer_iter_empty(buf_iter))
Steven Rostedtd7690412008-10-01 00:29:53 -04002856 return 0;
2857 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05002858 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
Steven Rostedtd7690412008-10-01 00:29:53 -04002859 return 0;
2860 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002861 }
Steven Rostedtd7690412008-10-01 00:29:53 -04002862
Frederic Weisbecker797d3712008-09-30 18:13:45 +02002863 return 1;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02002864}
2865
Lai Jiangshan4f535962009-05-18 19:35:34 +08002866/* Called with trace_event_read_lock() held. */
Jason Wessel955b61e2010-08-05 09:22:23 -05002867enum print_line_t print_trace_line(struct trace_iterator *iter)
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002868{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002869 struct trace_array *tr = iter->tr;
2870 unsigned long trace_flags = tr->trace_flags;
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002871 enum print_line_t ret;
2872
Steven Rostedt (Red Hat)19a7fe22014-11-12 10:29:54 -05002873 if (iter->lost_events) {
2874 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2875 iter->cpu, iter->lost_events);
2876 if (trace_seq_has_overflowed(&iter->seq))
2877 return TRACE_TYPE_PARTIAL_LINE;
2878 }
Steven Rostedtbc21b472010-03-31 19:49:26 -04002879
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02002880 if (iter->trace && iter->trace->print_line) {
2881 ret = iter->trace->print_line(iter);
2882 if (ret != TRACE_TYPE_UNHANDLED)
2883 return ret;
2884 }
Thomas Gleixner72829bc2008-05-23 21:37:28 +02002885
Steven Rostedt (Red Hat)09ae7232013-03-08 21:02:34 -05002886 if (iter->ent->type == TRACE_BPUTS &&
2887 trace_flags & TRACE_ITER_PRINTK &&
2888 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2889 return trace_print_bputs_msg_only(iter);
2890
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002891 if (iter->ent->type == TRACE_BPRINT &&
2892 trace_flags & TRACE_ITER_PRINTK &&
2893 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002894 return trace_print_bprintk_msg_only(iter);
Frederic Weisbecker48ead022009-03-12 18:24:49 +01002895
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002896 if (iter->ent->type == TRACE_PRINT &&
2897 trace_flags & TRACE_ITER_PRINTK &&
2898 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
Steven Rostedt5ef841f2009-03-19 12:20:38 -04002899 return trace_print_printk_msg_only(iter);
Frederic Weisbecker66896a82008-12-13 20:18:13 +01002900
Ingo Molnarcb0f12a2008-05-12 21:20:47 +02002901 if (trace_flags & TRACE_ITER_BIN)
2902 return print_bin_fmt(iter);
2903
Ingo Molnar5e3ca0e2008-05-12 21:20:49 +02002904 if (trace_flags & TRACE_ITER_HEX)
2905 return print_hex_fmt(iter);
2906
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002907 if (trace_flags & TRACE_ITER_RAW)
2908 return print_raw_fmt(iter);
2909
Ingo Molnarf9896bf2008-05-12 21:20:47 +02002910 return print_trace_fmt(iter);
2911}
2912
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002913void trace_latency_header(struct seq_file *m)
2914{
2915 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002916 struct trace_array *tr = iter->tr;
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002917
2918 /* print nothing if the buffers are empty */
2919 if (trace_empty(iter))
2920 return;
2921
2922 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2923 print_trace_header(m, iter);
2924
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002925 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
Jiri Olsa7e9a49e2011-11-07 16:08:49 +01002926 print_lat_help_header(m);
2927}
2928
Jiri Olsa62b915f2010-04-02 19:01:22 +02002929void trace_default_header(struct seq_file *m)
2930{
2931 struct trace_iterator *iter = m->private;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04002932 struct trace_array *tr = iter->tr;
2933 unsigned long trace_flags = tr->trace_flags;
Jiri Olsa62b915f2010-04-02 19:01:22 +02002934
Jiri Olsaf56e7f82011-06-03 16:58:49 +02002935 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2936 return;
2937
Jiri Olsa62b915f2010-04-02 19:01:22 +02002938 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2939 /* print nothing if the buffers are empty */
2940 if (trace_empty(iter))
2941 return;
2942 print_trace_header(m, iter);
2943 if (!(trace_flags & TRACE_ITER_VERBOSE))
2944 print_lat_help_header(m);
2945 } else {
Steven Rostedt77271ce2011-11-17 09:34:33 -05002946 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2947 if (trace_flags & TRACE_ITER_IRQ_INFO)
Jamie Gennis6019e592012-11-21 15:04:25 -08002948 if (trace_flags & TRACE_ITER_TGID)
2949 print_func_help_header_irq_tgid(iter->trace_buffer, m);
2950 else
2951 print_func_help_header_irq(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002952 else
Jamie Gennis6019e592012-11-21 15:04:25 -08002953 if (trace_flags & TRACE_ITER_TGID)
2954 print_func_help_header_tgid(iter->trace_buffer, m);
2955 else
2956 print_func_help_header(iter->trace_buffer, m);
Steven Rostedt77271ce2011-11-17 09:34:33 -05002957 }
Jiri Olsa62b915f2010-04-02 19:01:22 +02002958 }
2959}
2960
Steven Rostedte0a413f2011-09-29 21:26:16 -04002961static void test_ftrace_alive(struct seq_file *m)
2962{
2963 if (!ftrace_is_dead())
2964 return;
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002965 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
2966 "# MAY BE MISSING FUNCTION EVENTS\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04002967}
2968
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002969#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002970static void show_snapshot_main_help(struct seq_file *m)
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002971{
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002972 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
2973 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2974 "# Takes a snapshot of the main buffer.\n"
2975 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
2976 "# (Doesn't have to be '2' works with any number that\n"
2977 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002978}
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002979
2980static void show_snapshot_percpu_help(struct seq_file *m)
2981{
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002982 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002983#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002984 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
2985 "# Takes a snapshot of the main buffer for this cpu.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002986#else
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002987 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
2988 "# Must use main snapshot file to allocate.\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002989#endif
Rasmus Villemoesd79ac282014-11-08 21:42:11 +01002990 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
2991 "# (Doesn't have to be '2' works with any number that\n"
2992 "# is not a '0' or '1')\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05002993}
2994
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002995static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
2996{
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05002997 if (iter->tr->allocated_snapshot)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01002998 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05002999 else
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003000 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003001
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003002 seq_puts(m, "# Snapshot commands:\n");
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05003003 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3004 show_snapshot_main_help(m);
3005 else
3006 show_snapshot_percpu_help(m);
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003007}
3008#else
3009/* Should never be called */
3010static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3011#endif
3012
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003013static int s_show(struct seq_file *m, void *v)
3014{
3015 struct trace_iterator *iter = v;
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003016 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003017
3018 if (iter->ent == NULL) {
3019 if (iter->tr) {
3020 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3021 seq_puts(m, "#\n");
Steven Rostedte0a413f2011-09-29 21:26:16 -04003022 test_ftrace_alive(m);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003023 }
Steven Rostedt (Red Hat)d8741e22013-03-05 10:25:16 -05003024 if (iter->snapshot && trace_empty(iter))
3025 print_snapshot_help(m, iter);
3026 else if (iter->trace && iter->trace->print_header)
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003027 iter->trace->print_header(m);
Jiri Olsa62b915f2010-04-02 19:01:22 +02003028 else
3029 trace_default_header(m);
3030
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003031 } else if (iter->leftover) {
3032 /*
3033 * If we filled the seq_file buffer earlier, we
3034 * want to just show it now.
3035 */
3036 ret = trace_print_seq(m, &iter->seq);
3037
3038 /* ret should this time be zero, but you never know */
3039 iter->leftover = ret;
3040
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003041 } else {
Ingo Molnarf9896bf2008-05-12 21:20:47 +02003042 print_trace_line(iter);
Steven Rostedta63ce5b2009-12-07 09:11:39 -05003043 ret = trace_print_seq(m, &iter->seq);
3044 /*
3045 * If we overflow the seq_file buffer, then it will
3046 * ask us for this data again at start up.
3047 * Use that instead.
3048 * ret is 0 if seq_file write succeeded.
3049 * -1 otherwise.
3050 */
3051 iter->leftover = ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003052 }
3053
3054 return 0;
3055}
3056
Oleg Nesterov649e9c72013-07-23 17:25:54 +02003057/*
3058 * Should be used after trace_array_get(), trace_types_lock
3059 * ensures that i_cdev was already initialized.
3060 */
3061static inline int tracing_get_cpu(struct inode *inode)
3062{
3063 if (inode->i_cdev) /* See trace_create_cpu_file() */
3064 return (long)inode->i_cdev - 1;
3065 return RING_BUFFER_ALL_CPUS;
3066}
3067
James Morris88e9d342009-09-22 16:43:43 -07003068static const struct seq_operations tracer_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003069 .start = s_start,
3070 .next = s_next,
3071 .stop = s_stop,
3072 .show = s_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003073};
3074
Ingo Molnare309b412008-05-12 21:20:51 +02003075static struct trace_iterator *
Oleg Nesterov6484c712013-07-23 17:26:10 +02003076__tracing_open(struct inode *inode, struct file *file, bool snapshot)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003077{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003078 struct trace_array *tr = inode->i_private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003079 struct trace_iterator *iter;
Jiri Olsa50e18b92012-04-25 10:23:39 +02003080 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003081
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003082 if (tracing_disabled)
3083 return ERR_PTR(-ENODEV);
Steven Rostedt60a11772008-05-12 21:20:44 +02003084
Jiri Olsa50e18b92012-04-25 10:23:39 +02003085 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003086 if (!iter)
3087 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003088
Gil Fruchter72917232015-06-09 10:32:35 +03003089 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
Steven Rostedt6d158a82012-06-27 20:46:14 -04003090 GFP_KERNEL);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003091 if (!iter->buffer_iter)
3092 goto release;
3093
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003094 /*
3095 * We make a copy of the current tracer to avoid concurrent
3096 * changes on it while we are reading.
3097 */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003098 mutex_lock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003099 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003100 if (!iter->trace)
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003101 goto fail;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003102
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003103 *iter->trace = *tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003104
Li Zefan79f55992009-06-15 14:58:26 +08003105 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003106 goto fail;
3107
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003108 iter->tr = tr;
3109
3110#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003111 /* Currently only the top directory has a snapshot */
3112 if (tr->current_trace->print_max || snapshot)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003113 iter->trace_buffer = &tr->max_buffer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003114 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003115#endif
3116 iter->trace_buffer = &tr->trace_buffer;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003117 iter->snapshot = snapshot;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003118 iter->pos = -1;
Oleg Nesterov6484c712013-07-23 17:26:10 +02003119 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003120 mutex_init(&iter->mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003121
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003122 /* Notify the tracer early; before we stop tracing. */
3123 if (iter->trace && iter->trace->open)
Markus Metzgera93751c2008-12-11 13:53:26 +01003124 iter->trace->open(iter);
Markus Metzger8bba1bf2008-11-25 09:12:31 +01003125
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003126 /* Annotate start of buffers if we had overruns */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003127 if (ring_buffer_overruns(iter->trace_buffer->buffer))
Steven Rostedt12ef7d42008-11-12 17:52:38 -05003128 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3129
David Sharp8be07092012-11-13 12:18:22 -08003130 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09003131 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08003132 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3133
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003134 /* stop the trace while dumping if we are not opening "snapshot" */
3135 if (!iter->snapshot)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003136 tracing_stop_tr(tr);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003137
Steven Rostedtae3b5092013-01-23 15:22:59 -05003138 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003139 for_each_tracing_cpu(cpu) {
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003140 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003141 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003142 }
3143 ring_buffer_read_prepare_sync();
3144 for_each_tracing_cpu(cpu) {
3145 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003146 tracing_iter_reset(iter, cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01003147 }
3148 } else {
3149 cpu = iter->cpu_file;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003150 iter->buffer_iter[cpu] =
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003151 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
David Miller72c9ddf2010-04-20 15:47:11 -07003152 ring_buffer_read_prepare_sync();
3153 ring_buffer_read_start(iter->buffer_iter[cpu]);
Steven Rostedt2f26ebd2009-09-01 11:06:29 -04003154 tracing_iter_reset(iter, cpu);
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003155 }
3156
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003157 mutex_unlock(&trace_types_lock);
3158
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003159 return iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003160
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003161 fail:
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003162 mutex_unlock(&trace_types_lock);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003163 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003164 kfree(iter->buffer_iter);
Dan Carpenter93574fc2012-07-11 09:35:08 +03003165release:
Jiri Olsa50e18b92012-04-25 10:23:39 +02003166 seq_release_private(inode, file);
3167 return ERR_PTR(-ENOMEM);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003168}
3169
3170int tracing_open_generic(struct inode *inode, struct file *filp)
3171{
Steven Rostedt60a11772008-05-12 21:20:44 +02003172 if (tracing_disabled)
3173 return -ENODEV;
3174
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003175 filp->private_data = inode->i_private;
3176 return 0;
3177}
3178
Geyslan G. Bem2e864212013-10-18 21:15:54 -03003179bool tracing_is_disabled(void)
3180{
3181 return (tracing_disabled) ? true: false;
3182}
3183
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003184/*
3185 * Open and update trace_array ref count.
3186 * Must have the current trace_array passed to it.
3187 */
Steven Rostedt (Red Hat)dcc30222013-07-02 20:30:52 -04003188static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003189{
3190 struct trace_array *tr = inode->i_private;
3191
3192 if (tracing_disabled)
3193 return -ENODEV;
3194
3195 if (trace_array_get(tr) < 0)
3196 return -ENODEV;
3197
3198 filp->private_data = inode->i_private;
3199
3200 return 0;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003201}
3202
Hannes Eder4fd27352009-02-10 19:44:12 +01003203static int tracing_release(struct inode *inode, struct file *file)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003204{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003205 struct trace_array *tr = inode->i_private;
matt mooney907f2782010-09-27 19:04:53 -07003206 struct seq_file *m = file->private_data;
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003207 struct trace_iterator *iter;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003208 int cpu;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003209
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003210 if (!(file->f_mode & FMODE_READ)) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003211 trace_array_put(tr);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003212 return 0;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003213 }
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003214
Oleg Nesterov6484c712013-07-23 17:26:10 +02003215 /* Writes do not use seq_file */
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003216 iter = m->private;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003217 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05003218
Steven Rostedt3928a8a2008-09-29 23:02:41 -04003219 for_each_tracing_cpu(cpu) {
3220 if (iter->buffer_iter[cpu])
3221 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3222 }
3223
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003224 if (iter->trace && iter->trace->close)
3225 iter->trace->close(iter);
3226
Hiraku Toyookadebdd572012-12-26 11:53:00 +09003227 if (!iter->snapshot)
3228 /* reenable tracing if it was previously enabled */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003229 tracing_start_tr(tr);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003230
3231 __trace_array_put(tr);
3232
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003233 mutex_unlock(&trace_types_lock);
3234
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003235 mutex_destroy(&iter->mutex);
Frederic Weisbeckerb0dfa972009-04-01 22:53:08 +02003236 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01003237 kfree(iter->trace);
Steven Rostedt6d158a82012-06-27 20:46:14 -04003238 kfree(iter->buffer_iter);
Jiri Olsa50e18b92012-04-25 10:23:39 +02003239 seq_release_private(inode, file);
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003240
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003241 return 0;
3242}
3243
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003244static int tracing_release_generic_tr(struct inode *inode, struct file *file)
3245{
3246 struct trace_array *tr = inode->i_private;
3247
3248 trace_array_put(tr);
3249 return 0;
3250}
3251
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003252static int tracing_single_release_tr(struct inode *inode, struct file *file)
3253{
3254 struct trace_array *tr = inode->i_private;
3255
3256 trace_array_put(tr);
3257
3258 return single_release(inode, file);
3259}
3260
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003261static int tracing_open(struct inode *inode, struct file *file)
3262{
Oleg Nesterov6484c712013-07-23 17:26:10 +02003263 struct trace_array *tr = inode->i_private;
Steven Rostedt85a2f9b2009-02-27 00:12:38 -05003264 struct trace_iterator *iter;
3265 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003266
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003267 if (trace_array_get(tr) < 0)
3268 return -ENODEV;
3269
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003270 /* If this file was open for write, then erase contents */
Oleg Nesterov6484c712013-07-23 17:26:10 +02003271 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
3272 int cpu = tracing_get_cpu(inode);
3273
3274 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003275 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003276 else
Oleg Nesterov6484c712013-07-23 17:26:10 +02003277 tracing_reset(&tr->trace_buffer, cpu);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003278 }
3279
3280 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02003281 iter = __tracing_open(inode, file, false);
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003282 if (IS_ERR(iter))
3283 ret = PTR_ERR(iter);
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003284 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003285 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3286 }
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04003287
3288 if (ret < 0)
3289 trace_array_put(tr);
3290
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003291 return ret;
3292}
3293
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003294/*
3295 * Some tracers are not suitable for instance buffers.
3296 * A tracer is always available for the global array (toplevel)
3297 * or if it explicitly states that it is.
3298 */
3299static bool
3300trace_ok_for_array(struct tracer *t, struct trace_array *tr)
3301{
3302 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
3303}
3304
3305/* Find the next tracer that this trace array may use */
3306static struct tracer *
3307get_tracer_for_array(struct trace_array *tr, struct tracer *t)
3308{
3309 while (t && !trace_ok_for_array(t, tr))
3310 t = t->next;
3311
3312 return t;
3313}
3314
Ingo Molnare309b412008-05-12 21:20:51 +02003315static void *
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003316t_next(struct seq_file *m, void *v, loff_t *pos)
3317{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003318 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003319 struct tracer *t = v;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003320
3321 (*pos)++;
3322
3323 if (t)
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003324 t = get_tracer_for_array(tr, t->next);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003325
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003326 return t;
3327}
3328
3329static void *t_start(struct seq_file *m, loff_t *pos)
3330{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003331 struct trace_array *tr = m->private;
Li Zefanf129e962009-06-24 09:53:44 +08003332 struct tracer *t;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003333 loff_t l = 0;
3334
3335 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003336
3337 t = get_tracer_for_array(tr, trace_types);
3338 for (; t && l < *pos; t = t_next(m, t, &l))
3339 ;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003340
3341 return t;
3342}
3343
3344static void t_stop(struct seq_file *m, void *p)
3345{
3346 mutex_unlock(&trace_types_lock);
3347}
3348
3349static int t_show(struct seq_file *m, void *v)
3350{
3351 struct tracer *t = v;
3352
3353 if (!t)
3354 return 0;
3355
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01003356 seq_puts(m, t->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003357 if (t->next)
3358 seq_putc(m, ' ');
3359 else
3360 seq_putc(m, '\n');
3361
3362 return 0;
3363}
3364
James Morris88e9d342009-09-22 16:43:43 -07003365static const struct seq_operations show_traces_seq_ops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003366 .start = t_start,
3367 .next = t_next,
3368 .stop = t_stop,
3369 .show = t_show,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003370};
3371
3372static int show_traces_open(struct inode *inode, struct file *file)
3373{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003374 struct trace_array *tr = inode->i_private;
3375 struct seq_file *m;
3376 int ret;
3377
Steven Rostedt60a11772008-05-12 21:20:44 +02003378 if (tracing_disabled)
3379 return -ENODEV;
3380
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05003381 ret = seq_open(file, &show_traces_seq_ops);
3382 if (ret)
3383 return ret;
3384
3385 m = file->private_data;
3386 m->private = tr;
3387
3388 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003389}
3390
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003391static ssize_t
3392tracing_write_stub(struct file *filp, const char __user *ubuf,
3393 size_t count, loff_t *ppos)
3394{
3395 return count;
3396}
3397
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003398loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
Slava Pestov364829b2010-11-24 15:13:16 -08003399{
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003400 int ret;
3401
Slava Pestov364829b2010-11-24 15:13:16 -08003402 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003403 ret = seq_lseek(file, offset, whence);
Slava Pestov364829b2010-11-24 15:13:16 -08003404 else
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003405 file->f_pos = ret = 0;
3406
3407 return ret;
Slava Pestov364829b2010-11-24 15:13:16 -08003408}
3409
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003410static const struct file_operations tracing_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003411 .open = tracing_open,
3412 .read = seq_read,
Steven Rostedt4acd4d02009-03-18 10:40:24 -04003413 .write = tracing_write_stub,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05003414 .llseek = tracing_lseek,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02003415 .release = tracing_release,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003416};
3417
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003418static const struct file_operations show_traces_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003419 .open = show_traces_open,
3420 .read = seq_read,
3421 .release = seq_release,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003422 .llseek = seq_lseek,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003423};
3424
Ingo Molnar36dfe922008-05-12 21:20:52 +02003425/*
Ingo Molnar36dfe922008-05-12 21:20:52 +02003426 * The tracer itself will not take this lock, but still we want
3427 * to provide a consistent cpumask to user-space:
3428 */
3429static DEFINE_MUTEX(tracing_cpumask_update_lock);
3430
3431/*
3432 * Temporary storage for the character representation of the
3433 * CPU bitmask (and one more byte for the newline):
3434 */
3435static char mask_str[NR_CPUS + 1];
3436
Ingo Molnarc7078de2008-05-12 21:20:52 +02003437static ssize_t
3438tracing_cpumask_read(struct file *filp, char __user *ubuf,
3439 size_t count, loff_t *ppos)
3440{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003441 struct trace_array *tr = file_inode(filp)->i_private;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003442 int len;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003443
3444 mutex_lock(&tracing_cpumask_update_lock);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003445
Tejun Heo1a402432015-02-13 14:37:39 -08003446 len = snprintf(mask_str, count, "%*pb\n",
3447 cpumask_pr_args(tr->tracing_cpumask));
3448 if (len >= count) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003449 count = -EINVAL;
3450 goto out_err;
3451 }
Ingo Molnar36dfe922008-05-12 21:20:52 +02003452 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
3453
3454out_err:
Ingo Molnarc7078de2008-05-12 21:20:52 +02003455 mutex_unlock(&tracing_cpumask_update_lock);
3456
3457 return count;
3458}
3459
3460static ssize_t
3461tracing_cpumask_write(struct file *filp, const char __user *ubuf,
3462 size_t count, loff_t *ppos)
3463{
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003464 struct trace_array *tr = file_inode(filp)->i_private;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303465 cpumask_var_t tracing_cpumask_new;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003466 int err, cpu;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303467
3468 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
3469 return -ENOMEM;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003470
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303471 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003472 if (err)
3473 goto err_unlock;
3474
Li Zefan215368e2009-06-15 10:56:42 +08003475 mutex_lock(&tracing_cpumask_update_lock);
3476
Steven Rostedta5e25882008-12-02 15:34:05 -05003477 local_irq_disable();
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003478 arch_spin_lock(&tr->max_lock);
Steven Rostedtab464282008-05-12 21:21:00 +02003479 for_each_tracing_cpu(cpu) {
Ingo Molnar36dfe922008-05-12 21:20:52 +02003480 /*
3481 * Increase/decrease the disabled counter if we are
3482 * about to flip a bit in the cpumask:
3483 */
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003484 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303485 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003486 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3487 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003488 }
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003489 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303490 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003491 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
3492 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003493 }
3494 }
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05003495 arch_spin_unlock(&tr->max_lock);
Steven Rostedta5e25882008-12-02 15:34:05 -05003496 local_irq_enable();
Ingo Molnar36dfe922008-05-12 21:20:52 +02003497
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003498 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003499
Ingo Molnarc7078de2008-05-12 21:20:52 +02003500 mutex_unlock(&tracing_cpumask_update_lock);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10303501 free_cpumask_var(tracing_cpumask_new);
Ingo Molnarc7078de2008-05-12 21:20:52 +02003502
Ingo Molnarc7078de2008-05-12 21:20:52 +02003503 return count;
Ingo Molnar36dfe922008-05-12 21:20:52 +02003504
3505err_unlock:
Li Zefan215368e2009-06-15 10:56:42 +08003506 free_cpumask_var(tracing_cpumask_new);
Ingo Molnar36dfe922008-05-12 21:20:52 +02003507
3508 return err;
Ingo Molnarc7078de2008-05-12 21:20:52 +02003509}
3510
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003511static const struct file_operations tracing_cpumask_fops = {
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003512 .open = tracing_open_generic_tr,
Ingo Molnarc7078de2008-05-12 21:20:52 +02003513 .read = tracing_cpumask_read,
3514 .write = tracing_cpumask_write,
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07003515 .release = tracing_release_generic_tr,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003516 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003517};
3518
Li Zefanfdb372e2009-12-08 11:15:59 +08003519static int tracing_trace_options_show(struct seq_file *m, void *v)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003520{
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003521 struct tracer_opt *trace_opts;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003522 struct trace_array *tr = m->private;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003523 u32 tracer_flags;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003524 int i;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003525
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003526 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003527 tracer_flags = tr->current_trace->flags->val;
3528 trace_opts = tr->current_trace->flags->opts;
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003529
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003530 for (i = 0; trace_options[i]; i++) {
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003531 if (tr->trace_flags & (1 << i))
Li Zefanfdb372e2009-12-08 11:15:59 +08003532 seq_printf(m, "%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003533 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003534 seq_printf(m, "no%s\n", trace_options[i]);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003535 }
3536
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003537 for (i = 0; trace_opts[i].name; i++) {
3538 if (tracer_flags & trace_opts[i].bit)
Li Zefanfdb372e2009-12-08 11:15:59 +08003539 seq_printf(m, "%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003540 else
Li Zefanfdb372e2009-12-08 11:15:59 +08003541 seq_printf(m, "no%s\n", trace_opts[i].name);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003542 }
Steven Rostedtd8e83d22009-02-26 23:55:58 -05003543 mutex_unlock(&trace_types_lock);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003544
Li Zefanfdb372e2009-12-08 11:15:59 +08003545 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003546}
3547
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003548static int __set_tracer_option(struct trace_array *tr,
Li Zefan8d18eaa2009-12-08 11:17:06 +08003549 struct tracer_flags *tracer_flags,
3550 struct tracer_opt *opts, int neg)
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003551{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003552 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003553 int ret;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003554
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003555 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003556 if (ret)
3557 return ret;
3558
3559 if (neg)
Zhaolei77708412009-08-07 18:53:21 +08003560 tracer_flags->val &= ~opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003561 else
Zhaolei77708412009-08-07 18:53:21 +08003562 tracer_flags->val |= opts->bit;
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003563 return 0;
3564}
3565
Li Zefan8d18eaa2009-12-08 11:17:06 +08003566/* Try to assign a tracer specific option */
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003567static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
Li Zefan8d18eaa2009-12-08 11:17:06 +08003568{
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003569 struct tracer *trace = tr->current_trace;
Li Zefan8d18eaa2009-12-08 11:17:06 +08003570 struct tracer_flags *tracer_flags = trace->flags;
3571 struct tracer_opt *opts = NULL;
3572 int i;
3573
3574 for (i = 0; tracer_flags->opts[i].name; i++) {
3575 opts = &tracer_flags->opts[i];
3576
3577 if (strcmp(cmp, opts->name) == 0)
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003578 return __set_tracer_option(tr, trace->flags, opts, neg);
Li Zefan8d18eaa2009-12-08 11:17:06 +08003579 }
3580
3581 return -EINVAL;
3582}
3583
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003584/* Some tracers require overwrite to stay enabled */
3585int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
3586{
3587 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
3588 return -1;
3589
3590 return 0;
3591}
3592
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003593int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003594{
3595 /* do nothing if flag is already set */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003596 if (!!(tr->trace_flags & mask) == !!enabled)
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003597 return 0;
3598
3599 /* Give the tracer a chance to approve the change */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003600 if (tr->current_trace->flag_changed)
Steven Rostedt (Red Hat)bf6065b2014-01-10 17:51:01 -05003601 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003602 return -EINVAL;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003603
3604 if (enabled)
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003605 tr->trace_flags |= mask;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003606 else
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04003607 tr->trace_flags &= ~mask;
Li Zefane870e9a2010-07-02 11:07:32 +08003608
3609 if (mask == TRACE_ITER_RECORD_CMD)
3610 trace_event_enable_cmd_record(enabled);
David Sharp750912f2010-12-08 13:46:47 -08003611
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003612 if (mask == TRACE_ITER_OVERWRITE) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003613 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003614#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05003615 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
Steven Rostedt (Red Hat)80902822013-03-14 14:20:54 -04003616#endif
3617 }
Steven Rostedt81698832012-10-11 10:15:05 -04003618
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003619 if (mask == TRACE_ITER_PRINTK) {
Steven Rostedt81698832012-10-11 10:15:05 -04003620 trace_printk_start_stop_comm(enabled);
Steven Rostedt (Red Hat)b9f91082015-09-29 18:21:35 -04003621 trace_printk_control(enabled);
3622 }
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003623
3624 return 0;
Steven Rostedtaf4617b2009-03-17 18:09:55 -04003625}
3626
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003627static int trace_set_options(struct trace_array *tr, char *option)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003628{
Li Zefan8d18eaa2009-12-08 11:17:06 +08003629 char *cmp;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003630 int neg = 0;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003631 int ret = -ENODEV;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003632 int i;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003633 size_t orig_len = strlen(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003634
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003635 cmp = strstrip(option);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003636
Li Zefan8d18eaa2009-12-08 11:17:06 +08003637 if (strncmp(cmp, "no", 2) == 0) {
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003638 neg = 1;
3639 cmp += 2;
3640 }
3641
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003642 mutex_lock(&trace_types_lock);
3643
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003644 for (i = 0; trace_options[i]; i++) {
Li Zefan8d18eaa2009-12-08 11:17:06 +08003645 if (strcmp(cmp, trace_options[i]) == 0) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003646 ret = set_tracer_flag(tr, 1 << i, !neg);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003647 break;
3648 }
3649 }
Frederic Weisbeckeradf9f192008-11-17 19:23:42 +01003650
3651 /* If no option could be set, test the specific tracer options */
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003652 if (!trace_options[i])
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05003653 ret = set_tracer_option(tr, cmp, neg);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04003654
3655 mutex_unlock(&trace_types_lock);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003656
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003657 /*
3658 * If the first trailing whitespace is replaced with '\0' by strstrip,
3659 * turn it back into a space.
3660 */
3661 if (orig_len > strlen(option))
3662 option[strlen(option)] = ' ';
3663
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003664 return ret;
3665}
3666
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003667static void __init apply_trace_boot_options(void)
3668{
3669 char *buf = trace_boot_options_buf;
3670 char *option;
3671
3672 while (true) {
3673 option = strsep(&buf, ",");
3674
3675 if (!option)
3676 break;
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003677
Steven Rostedt (Red Hat)43ed3842015-11-03 22:15:14 -05003678 if (*option)
3679 trace_set_options(&global_trace, option);
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08003680
3681 /* Put back the comma to allow this to be called again */
3682 if (buf)
3683 *(buf - 1) = ',';
3684 }
3685}
3686
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003687static ssize_t
3688tracing_trace_options_write(struct file *filp, const char __user *ubuf,
3689 size_t cnt, loff_t *ppos)
3690{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003691 struct seq_file *m = filp->private_data;
3692 struct trace_array *tr = m->private;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003693 char buf[64];
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003694 int ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003695
3696 if (cnt >= sizeof(buf))
3697 return -EINVAL;
3698
3699 if (copy_from_user(&buf, ubuf, cnt))
3700 return -EFAULT;
3701
Steven Rostedta8dd2172013-01-09 20:54:17 -05003702 buf[cnt] = 0;
3703
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003704 ret = trace_set_options(tr, buf);
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04003705 if (ret < 0)
3706 return ret;
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04003707
Jiri Olsacf8517c2009-10-23 19:36:16 -04003708 *ppos += cnt;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003709
3710 return cnt;
3711}
3712
Li Zefanfdb372e2009-12-08 11:15:59 +08003713static int tracing_trace_options_open(struct inode *inode, struct file *file)
3714{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003715 struct trace_array *tr = inode->i_private;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003716 int ret;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003717
Li Zefanfdb372e2009-12-08 11:15:59 +08003718 if (tracing_disabled)
3719 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04003720
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003721 if (trace_array_get(tr) < 0)
3722 return -ENODEV;
3723
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07003724 ret = single_open(file, tracing_trace_options_show, inode->i_private);
3725 if (ret < 0)
3726 trace_array_put(tr);
3727
3728 return ret;
Li Zefanfdb372e2009-12-08 11:15:59 +08003729}
3730
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003731static const struct file_operations tracing_iter_fops = {
Li Zefanfdb372e2009-12-08 11:15:59 +08003732 .open = tracing_trace_options_open,
3733 .read = seq_read,
3734 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04003735 .release = tracing_single_release_tr,
Steven Rostedtee6bce52008-11-12 17:52:37 -05003736 .write = tracing_trace_options_write,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02003737};
3738
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003739static const char readme_msg[] =
3740 "tracing mini-HOWTO:\n\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003741 "# echo 0 > tracing_on : quick way to disable tracing\n"
3742 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
3743 " Important files:\n"
3744 " trace\t\t\t- The static contents of the buffer\n"
3745 "\t\t\t To clear the buffer write into this file: echo > trace\n"
3746 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
3747 " current_tracer\t- function and latency tracers\n"
3748 " available_tracers\t- list of configured tracers for current_tracer\n"
3749 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
3750 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
3751 " trace_clock\t\t-change the clock used to order events\n"
3752 " local: Per cpu clock but may not be synced across CPUs\n"
3753 " global: Synced across CPUs but slows tracing down.\n"
3754 " counter: Not a clock, but just an increment\n"
3755 " uptime: Jiffy counter from time of boot\n"
3756 " perf: Same clock that perf events use\n"
3757#ifdef CONFIG_X86_64
3758 " x86-tsc: TSC cycle counter\n"
3759#endif
3760 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
3761 " tracing_cpumask\t- Limit which CPUs to trace\n"
3762 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
3763 "\t\t\t Remove sub-buffer with rmdir\n"
3764 " trace_options\t\t- Set format or modify how tracing happens\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003765 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
3766 "\t\t\t option name\n"
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003767 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003768#ifdef CONFIG_DYNAMIC_FTRACE
3769 "\n available_filter_functions - list of functions that can be filtered on\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003770 " set_ftrace_filter\t- echo function name in here to only trace these\n"
3771 "\t\t\t functions\n"
3772 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3773 "\t modules: Can select a group via module\n"
3774 "\t Format: :mod:<module-name>\n"
3775 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
3776 "\t triggers: a command to perform when function is hit\n"
3777 "\t Format: <function>:<trigger>[:count]\n"
3778 "\t trigger: traceon, traceoff\n"
3779 "\t\t enable_event:<system>:<event>\n"
3780 "\t\t disable_event:<system>:<event>\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003781#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003782 "\t\t stacktrace\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003783#endif
3784#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003785 "\t\t snapshot\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003786#endif
Steven Rostedt (Red Hat)17a280e2014-04-10 22:43:37 -04003787 "\t\t dump\n"
3788 "\t\t cpudump\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003789 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
3790 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
3791 "\t The first one will disable tracing every time do_fault is hit\n"
3792 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
3793 "\t The first time do trap is hit and it disables tracing, the\n"
3794 "\t counter will decrement to 2. If tracing is already disabled,\n"
3795 "\t the counter will not decrement. It only decrements when the\n"
3796 "\t trigger did work\n"
3797 "\t To remove trigger without count:\n"
3798 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
3799 "\t To remove trigger with a count:\n"
3800 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003801 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003802 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
3803 "\t modules: Can select a group via module command :mod:\n"
3804 "\t Does not accept triggers\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003805#endif /* CONFIG_DYNAMIC_FTRACE */
3806#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003807 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
3808 "\t\t (function)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003809#endif
3810#ifdef CONFIG_FUNCTION_GRAPH_TRACER
3811 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
Namhyung Kimd048a8c72014-06-13 01:23:53 +09003812 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003813 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
3814#endif
3815#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003816 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
3817 "\t\t\t snapshot buffer. Read the contents for more\n"
3818 "\t\t\t information\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003819#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003820#ifdef CONFIG_STACK_TRACER
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003821 " stack_trace\t\t- Shows the max stack trace when active\n"
3822 " stack_max_size\t- Shows current max stack size that was traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003823 "\t\t\t Write into this file to reset the max size (trigger a\n"
3824 "\t\t\t new trace)\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003825#ifdef CONFIG_DYNAMIC_FTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003826 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
3827 "\t\t\t traces\n"
Steven Rostedt (Red Hat)22f45642013-03-15 17:23:20 -04003828#endif
zhangwei(Jovi)991821c2013-07-15 16:32:34 +08003829#endif /* CONFIG_STACK_TRACER */
Tom Zanussi26f25562014-01-17 15:11:44 -06003830 " events/\t\t- Directory containing all trace event subsystems:\n"
3831 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
3832 " events/<system>/\t- Directory containing all trace events for <system>:\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003833 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
3834 "\t\t\t events\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003835 " filter\t\t- If set, only events passing filter are traced\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003836 " events/<system>/<event>/\t- Directory containing control files for\n"
3837 "\t\t\t <event>:\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003838 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
3839 " filter\t\t- If set, only events passing filter are traced\n"
3840 " trigger\t\t- If set, a command to perform when event is hit\n"
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003841 "\t Format: <trigger>[:count][if <filter>]\n"
3842 "\t trigger: traceon, traceoff\n"
3843 "\t enable_event:<system>:<event>\n"
3844 "\t disable_event:<system>:<event>\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003845#ifdef CONFIG_STACKTRACE
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003846 "\t\t stacktrace\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003847#endif
3848#ifdef CONFIG_TRACER_SNAPSHOT
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003849 "\t\t snapshot\n"
Tom Zanussi26f25562014-01-17 15:11:44 -06003850#endif
Steven Rostedt (Red Hat)71485c42014-01-23 00:10:04 -05003851 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
3852 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
3853 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
3854 "\t events/block/block_unplug/trigger\n"
3855 "\t The first disables tracing every time block_unplug is hit.\n"
3856 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
3857 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
3858 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
3859 "\t Like function triggers, the counter is only decremented if it\n"
3860 "\t enabled or disabled tracing.\n"
3861 "\t To remove a trigger without a count:\n"
3862 "\t echo '!<trigger> > <system>/<event>/trigger\n"
3863 "\t To remove a trigger with a count:\n"
3864 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
3865 "\t Filters can be ignored when removing a trigger.\n"
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003866;
3867
3868static ssize_t
3869tracing_readme_read(struct file *filp, char __user *ubuf,
3870 size_t cnt, loff_t *ppos)
3871{
3872 return simple_read_from_buffer(ubuf, cnt, ppos,
3873 readme_msg, strlen(readme_msg));
3874}
3875
Steven Rostedt5e2336a2009-03-05 21:44:55 -05003876static const struct file_operations tracing_readme_fops = {
Ingo Molnarc7078de2008-05-12 21:20:52 +02003877 .open = tracing_open_generic,
3878 .read = tracing_readme_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02003879 .llseek = generic_file_llseek,
Ingo Molnar7bd2f242008-05-12 21:20:45 +02003880};
3881
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003882static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003883{
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003884 unsigned int *ptr = v;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003885
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003886 if (*pos || m->count)
3887 ptr++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003888
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003889 (*pos)++;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003890
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003891 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
3892 ptr++) {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003893 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
Avadh Patel69abe6a2009-04-10 16:04:48 -04003894 continue;
3895
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003896 return ptr;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003897 }
3898
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003899 return NULL;
3900}
Avadh Patel69abe6a2009-04-10 16:04:48 -04003901
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003902static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
3903{
3904 void *v;
3905 loff_t l = 0;
Avadh Patel69abe6a2009-04-10 16:04:48 -04003906
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003907 preempt_disable();
3908 arch_spin_lock(&trace_cmdline_lock);
3909
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003910 v = &savedcmd->map_cmdline_to_pid[0];
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003911 while (l <= *pos) {
3912 v = saved_cmdlines_next(m, v, &l);
3913 if (!v)
3914 return NULL;
3915 }
3916
3917 return v;
3918}
3919
3920static void saved_cmdlines_stop(struct seq_file *m, void *v)
3921{
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003922 arch_spin_unlock(&trace_cmdline_lock);
3923 preempt_enable();
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003924}
3925
3926static int saved_cmdlines_show(struct seq_file *m, void *v)
3927{
3928 char buf[TASK_COMM_LEN];
3929 unsigned int *pid = v;
3930
Steven Rostedt (Red Hat)4c27e752014-05-30 10:49:46 -04003931 __trace_find_cmdline(*pid, buf);
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003932 seq_printf(m, "%d %s\n", *pid, buf);
3933 return 0;
3934}
3935
3936static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
3937 .start = saved_cmdlines_start,
3938 .next = saved_cmdlines_next,
3939 .stop = saved_cmdlines_stop,
3940 .show = saved_cmdlines_show,
3941};
3942
3943static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
3944{
3945 if (tracing_disabled)
3946 return -ENODEV;
3947
3948 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
Avadh Patel69abe6a2009-04-10 16:04:48 -04003949}
3950
3951static const struct file_operations tracing_saved_cmdlines_fops = {
Yoshihiro YUNOMAE42584c82014-02-20 17:44:31 +09003952 .open = tracing_saved_cmdlines_open,
3953 .read = seq_read,
3954 .llseek = seq_lseek,
3955 .release = seq_release,
Avadh Patel69abe6a2009-04-10 16:04:48 -04003956};
3957
3958static ssize_t
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003959tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
3960 size_t cnt, loff_t *ppos)
3961{
3962 char buf[64];
3963 int r;
3964
3965 arch_spin_lock(&trace_cmdline_lock);
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003966 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003967 arch_spin_unlock(&trace_cmdline_lock);
3968
3969 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3970}
3971
3972static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
3973{
3974 kfree(s->saved_cmdlines);
3975 kfree(s->map_cmdline_to_pid);
3976 kfree(s);
3977}
3978
3979static int tracing_resize_saved_cmdlines(unsigned int val)
3980{
3981 struct saved_cmdlines_buffer *s, *savedcmd_temp;
3982
Namhyung Kima6af8fb2014-06-10 16:11:35 +09003983 s = kmalloc(sizeof(*s), GFP_KERNEL);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09003984 if (!s)
3985 return -ENOMEM;
3986
3987 if (allocate_cmdlines_buffer(val, s) < 0) {
3988 kfree(s);
3989 return -ENOMEM;
3990 }
3991
3992 arch_spin_lock(&trace_cmdline_lock);
3993 savedcmd_temp = savedcmd;
3994 savedcmd = s;
3995 arch_spin_unlock(&trace_cmdline_lock);
3996 free_saved_cmdlines_buffer(savedcmd_temp);
3997
3998 return 0;
3999}
4000
4001static ssize_t
4002tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4003 size_t cnt, loff_t *ppos)
4004{
4005 unsigned long val;
4006 int ret;
4007
4008 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4009 if (ret)
4010 return ret;
4011
4012 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4013 if (!val || val > PID_MAX_DEFAULT)
4014 return -EINVAL;
4015
4016 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4017 if (ret < 0)
4018 return ret;
4019
4020 *ppos += cnt;
4021
4022 return cnt;
4023}
4024
4025static const struct file_operations tracing_saved_cmdlines_size_fops = {
4026 .open = tracing_open_generic,
4027 .read = tracing_saved_cmdlines_size_read,
4028 .write = tracing_saved_cmdlines_size_write,
4029};
4030
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004031#ifdef CONFIG_TRACE_ENUM_MAP_FILE
4032static union trace_enum_map_item *
4033update_enum_map(union trace_enum_map_item *ptr)
4034{
4035 if (!ptr->map.enum_string) {
4036 if (ptr->tail.next) {
4037 ptr = ptr->tail.next;
4038 /* Set ptr to the next real item (skip head) */
4039 ptr++;
4040 } else
4041 return NULL;
4042 }
4043 return ptr;
4044}
4045
4046static void *enum_map_next(struct seq_file *m, void *v, loff_t *pos)
4047{
4048 union trace_enum_map_item *ptr = v;
4049
4050 /*
4051 * Paranoid! If ptr points to end, we don't want to increment past it.
4052 * This really should never happen.
4053 */
4054 ptr = update_enum_map(ptr);
4055 if (WARN_ON_ONCE(!ptr))
4056 return NULL;
4057
4058 ptr++;
4059
4060 (*pos)++;
4061
4062 ptr = update_enum_map(ptr);
4063
4064 return ptr;
4065}
4066
4067static void *enum_map_start(struct seq_file *m, loff_t *pos)
4068{
4069 union trace_enum_map_item *v;
4070 loff_t l = 0;
4071
4072 mutex_lock(&trace_enum_mutex);
4073
4074 v = trace_enum_maps;
4075 if (v)
4076 v++;
4077
4078 while (v && l < *pos) {
4079 v = enum_map_next(m, v, &l);
4080 }
4081
4082 return v;
4083}
4084
4085static void enum_map_stop(struct seq_file *m, void *v)
4086{
4087 mutex_unlock(&trace_enum_mutex);
4088}
4089
4090static int enum_map_show(struct seq_file *m, void *v)
4091{
4092 union trace_enum_map_item *ptr = v;
4093
4094 seq_printf(m, "%s %ld (%s)\n",
4095 ptr->map.enum_string, ptr->map.enum_value,
4096 ptr->map.system);
4097
4098 return 0;
4099}
4100
4101static const struct seq_operations tracing_enum_map_seq_ops = {
4102 .start = enum_map_start,
4103 .next = enum_map_next,
4104 .stop = enum_map_stop,
4105 .show = enum_map_show,
4106};
4107
4108static int tracing_enum_map_open(struct inode *inode, struct file *filp)
4109{
4110 if (tracing_disabled)
4111 return -ENODEV;
4112
4113 return seq_open(filp, &tracing_enum_map_seq_ops);
4114}
4115
4116static const struct file_operations tracing_enum_map_fops = {
4117 .open = tracing_enum_map_open,
4118 .read = seq_read,
4119 .llseek = seq_lseek,
4120 .release = seq_release,
4121};
4122
4123static inline union trace_enum_map_item *
4124trace_enum_jmp_to_tail(union trace_enum_map_item *ptr)
4125{
4126 /* Return tail of array given the head */
4127 return ptr + ptr->head.length + 1;
4128}
4129
4130static void
4131trace_insert_enum_map_file(struct module *mod, struct trace_enum_map **start,
4132 int len)
4133{
4134 struct trace_enum_map **stop;
4135 struct trace_enum_map **map;
4136 union trace_enum_map_item *map_array;
4137 union trace_enum_map_item *ptr;
4138
4139 stop = start + len;
4140
4141 /*
4142 * The trace_enum_maps contains the map plus a head and tail item,
4143 * where the head holds the module and length of array, and the
4144 * tail holds a pointer to the next list.
4145 */
4146 map_array = kmalloc(sizeof(*map_array) * (len + 2), GFP_KERNEL);
4147 if (!map_array) {
4148 pr_warning("Unable to allocate trace enum mapping\n");
4149 return;
4150 }
4151
4152 mutex_lock(&trace_enum_mutex);
4153
4154 if (!trace_enum_maps)
4155 trace_enum_maps = map_array;
4156 else {
4157 ptr = trace_enum_maps;
4158 for (;;) {
4159 ptr = trace_enum_jmp_to_tail(ptr);
4160 if (!ptr->tail.next)
4161 break;
4162 ptr = ptr->tail.next;
4163
4164 }
4165 ptr->tail.next = map_array;
4166 }
4167 map_array->head.mod = mod;
4168 map_array->head.length = len;
4169 map_array++;
4170
4171 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
4172 map_array->map = **map;
4173 map_array++;
4174 }
4175 memset(map_array, 0, sizeof(*map_array));
4176
4177 mutex_unlock(&trace_enum_mutex);
4178}
4179
4180static void trace_create_enum_file(struct dentry *d_tracer)
4181{
4182 trace_create_file("enum_map", 0444, d_tracer,
4183 NULL, &tracing_enum_map_fops);
4184}
4185
4186#else /* CONFIG_TRACE_ENUM_MAP_FILE */
4187static inline void trace_create_enum_file(struct dentry *d_tracer) { }
4188static inline void trace_insert_enum_map_file(struct module *mod,
4189 struct trace_enum_map **start, int len) { }
4190#endif /* !CONFIG_TRACE_ENUM_MAP_FILE */
4191
4192static void trace_insert_enum_map(struct module *mod,
4193 struct trace_enum_map **start, int len)
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004194{
4195 struct trace_enum_map **map;
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004196
4197 if (len <= 0)
4198 return;
4199
4200 map = start;
4201
4202 trace_event_enum_update(map, len);
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04004203
4204 trace_insert_enum_map_file(mod, start, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04004205}
4206
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09004207static ssize_t
Jamie Gennis6019e592012-11-21 15:04:25 -08004208tracing_saved_tgids_read(struct file *file, char __user *ubuf,
4209 size_t cnt, loff_t *ppos)
4210{
4211 char *file_buf;
4212 char *buf;
4213 int len = 0;
4214 int pid;
4215 int i;
4216
Dmitry Shmidt99dd30a2015-10-28 10:45:04 -07004217 file_buf = kmalloc(SAVED_CMDLINES_DEFAULT*(16+1+16), GFP_KERNEL);
Jamie Gennis6019e592012-11-21 15:04:25 -08004218 if (!file_buf)
4219 return -ENOMEM;
4220
4221 buf = file_buf;
4222
Dmitry Shmidt99dd30a2015-10-28 10:45:04 -07004223 for (i = 0; i < SAVED_CMDLINES_DEFAULT; i++) {
Jamie Gennis6019e592012-11-21 15:04:25 -08004224 int tgid;
4225 int r;
4226
Dmitry Shmidt99dd30a2015-10-28 10:45:04 -07004227 pid = savedcmd->map_cmdline_to_pid[i];
Jamie Gennis6019e592012-11-21 15:04:25 -08004228 if (pid == -1 || pid == NO_CMDLINE_MAP)
4229 continue;
4230
4231 tgid = trace_find_tgid(pid);
4232 r = sprintf(buf, "%d %d\n", pid, tgid);
4233 buf += r;
4234 len += r;
4235 }
4236
4237 len = simple_read_from_buffer(ubuf, cnt, ppos,
4238 file_buf, len);
4239
4240 kfree(file_buf);
4241
4242 return len;
4243}
4244
4245static const struct file_operations tracing_saved_tgids_fops = {
4246 .open = tracing_open_generic,
4247 .read = tracing_saved_tgids_read,
4248 .llseek = generic_file_llseek,
4249};
4250
4251static ssize_t
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004252tracing_set_trace_read(struct file *filp, char __user *ubuf,
4253 size_t cnt, loff_t *ppos)
4254{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004255 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004256 char buf[MAX_TRACER_SIZE+2];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004257 int r;
4258
4259 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004260 r = sprintf(buf, "%s\n", tr->current_trace->name);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004261 mutex_unlock(&trace_types_lock);
4262
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004263 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004264}
4265
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004266int tracer_init(struct tracer *t, struct trace_array *tr)
4267{
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004268 tracing_reset_online_cpus(&tr->trace_buffer);
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004269 return t->init(tr);
4270}
4271
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004272static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004273{
4274 int cpu;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05004275
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004276 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004277 per_cpu_ptr(buf->data, cpu)->entries = val;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004278}
4279
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004280#ifdef CONFIG_TRACER_MAX_TRACE
Hiraku Toyookad60da502012-10-17 11:56:16 +09004281/* resize @tr's buffer to the size of @size_tr's entries */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004282static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
4283 struct trace_buffer *size_buf, int cpu_id)
Hiraku Toyookad60da502012-10-17 11:56:16 +09004284{
4285 int cpu, ret = 0;
4286
4287 if (cpu_id == RING_BUFFER_ALL_CPUS) {
4288 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004289 ret = ring_buffer_resize(trace_buf->buffer,
4290 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004291 if (ret < 0)
4292 break;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004293 per_cpu_ptr(trace_buf->data, cpu)->entries =
4294 per_cpu_ptr(size_buf->data, cpu)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004295 }
4296 } else {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004297 ret = ring_buffer_resize(trace_buf->buffer,
4298 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004299 if (ret == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004300 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
4301 per_cpu_ptr(size_buf->data, cpu_id)->entries;
Hiraku Toyookad60da502012-10-17 11:56:16 +09004302 }
4303
4304 return ret;
4305}
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004306#endif /* CONFIG_TRACER_MAX_TRACE */
Hiraku Toyookad60da502012-10-17 11:56:16 +09004307
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004308static int __tracing_resize_ring_buffer(struct trace_array *tr,
4309 unsigned long size, int cpu)
Steven Rostedt73c51622009-03-11 13:42:01 -04004310{
4311 int ret;
4312
4313 /*
4314 * If kernel or user changes the size of the ring buffer
Steven Rostedta123c522009-03-12 11:21:08 -04004315 * we use the size that was given, and we can forget about
4316 * expanding it later.
Steven Rostedt73c51622009-03-11 13:42:01 -04004317 */
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05004318 ring_buffer_expanded = true;
Steven Rostedt73c51622009-03-11 13:42:01 -04004319
Steven Rostedtb382ede62012-10-10 21:44:34 -04004320 /* May be called before buffers are initialized */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004321 if (!tr->trace_buffer.buffer)
Steven Rostedtb382ede62012-10-10 21:44:34 -04004322 return 0;
4323
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004324 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004325 if (ret < 0)
4326 return ret;
4327
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004328#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004329 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
4330 !tr->current_trace->use_max_tr)
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004331 goto out;
4332
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004333 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004334 if (ret < 0) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004335 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
4336 &tr->trace_buffer, cpu);
Steven Rostedt73c51622009-03-11 13:42:01 -04004337 if (r < 0) {
Steven Rostedta123c522009-03-12 11:21:08 -04004338 /*
4339 * AARGH! We are left with different
4340 * size max buffer!!!!
4341 * The max buffer is our "snapshot" buffer.
4342 * When a tracer needs a snapshot (one of the
4343 * latency tracers), it swaps the max buffer
4344 * with the saved snap shot. We succeeded to
4345 * update the size of the main buffer, but failed to
4346 * update the size of the max buffer. But when we tried
4347 * to reset the main buffer to the original size, we
4348 * failed there too. This is very unlikely to
4349 * happen, but if it does, warn and kill all
4350 * tracing.
4351 */
Steven Rostedt73c51622009-03-11 13:42:01 -04004352 WARN_ON(1);
4353 tracing_disabled = 1;
4354 }
4355 return ret;
4356 }
4357
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004358 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004359 set_buffer_entries(&tr->max_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004360 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004361 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004362
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004363 out:
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004364#endif /* CONFIG_TRACER_MAX_TRACE */
4365
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004366 if (cpu == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004367 set_buffer_entries(&tr->trace_buffer, size);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004368 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004369 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
Steven Rostedt73c51622009-03-11 13:42:01 -04004370
4371 return ret;
4372}
4373
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004374static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
4375 unsigned long size, int cpu_id)
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004376{
Vaibhav Nagarnaik83f40312012-05-03 18:59:50 -07004377 int ret = size;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004378
4379 mutex_lock(&trace_types_lock);
4380
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004381 if (cpu_id != RING_BUFFER_ALL_CPUS) {
4382 /* make sure, this cpu is enabled in the mask */
4383 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
4384 ret = -EINVAL;
4385 goto out;
4386 }
4387 }
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004388
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004389 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004390 if (ret < 0)
4391 ret = -ENOMEM;
4392
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004393out:
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07004394 mutex_unlock(&trace_types_lock);
4395
4396 return ret;
4397}
4398
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004399
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004400/**
4401 * tracing_update_buffers - used by tracing facility to expand ring buffers
4402 *
4403 * To save on memory when the tracing is never used on a system with it
4404 * configured in. The ring buffers are set to a minimum size. But once
4405 * a user starts to use the tracing facility, then they need to grow
4406 * to their default size.
4407 *
4408 * This function is to be called when a tracer is about to be used.
4409 */
4410int tracing_update_buffers(void)
4411{
4412 int ret = 0;
4413
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004414 mutex_lock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004415 if (!ring_buffer_expanded)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004416 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004417 RING_BUFFER_ALL_CPUS);
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004418 mutex_unlock(&trace_types_lock);
Steven Rostedt1852fcc2009-03-11 14:33:00 -04004419
4420 return ret;
4421}
4422
Steven Rostedt577b7852009-02-26 23:43:05 -05004423struct trace_option_dentry;
4424
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004425static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004426create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
Steven Rostedt577b7852009-02-26 23:43:05 -05004427
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004428/*
4429 * Used to clear out the tracer before deletion of an instance.
4430 * Must have trace_types_lock held.
4431 */
4432static void tracing_set_nop(struct trace_array *tr)
4433{
4434 if (tr->current_trace == &nop_trace)
4435 return;
4436
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004437 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05004438
4439 if (tr->current_trace->reset)
4440 tr->current_trace->reset(tr);
4441
4442 tr->current_trace = &nop_trace;
4443}
4444
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04004445static void add_tracer_options(struct trace_array *tr, struct tracer *t)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004446{
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004447 /* Only enable if the directory has been created already. */
4448 if (!tr->dir)
4449 return;
4450
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04004451 create_trace_option_files(tr, t);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05004452}
4453
4454static int tracing_set_tracer(struct trace_array *tr, const char *buf)
4455{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004456 struct tracer *t;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004457#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004458 bool had_max_tr;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004459#endif
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004460 int ret = 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004461
Steven Rostedt1027fcb2009-03-12 11:33:20 -04004462 mutex_lock(&trace_types_lock);
4463
Steven Rostedt73c51622009-03-11 13:42:01 -04004464 if (!ring_buffer_expanded) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004465 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08004466 RING_BUFFER_ALL_CPUS);
Steven Rostedt73c51622009-03-11 13:42:01 -04004467 if (ret < 0)
Frederic Weisbecker59f586d2009-03-15 22:10:39 +01004468 goto out;
Steven Rostedt73c51622009-03-11 13:42:01 -04004469 ret = 0;
4470 }
4471
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004472 for (t = trace_types; t; t = t->next) {
4473 if (strcmp(t->name, buf) == 0)
4474 break;
4475 }
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004476 if (!t) {
4477 ret = -EINVAL;
4478 goto out;
4479 }
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004480 if (t == tr->current_trace)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004481 goto out;
4482
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004483 /* Some tracers are only allowed for the top level buffer */
4484 if (!trace_ok_for_array(t, tr)) {
4485 ret = -EINVAL;
4486 goto out;
4487 }
4488
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004489 /* If trace pipe files are being read, we can't change the tracer */
4490 if (tr->current_trace->ref) {
4491 ret = -EBUSY;
4492 goto out;
4493 }
4494
Steven Rostedt9f029e82008-11-12 15:24:24 -05004495 trace_branch_disable();
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004496
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004497 tr->current_trace->enabled--;
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04004498
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004499 if (tr->current_trace->reset)
4500 tr->current_trace->reset(tr);
Steven Rostedt34600f02013-01-22 13:35:11 -05004501
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004502 /* Current trace needs to be nop_trace before synchronize_sched */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004503 tr->current_trace = &nop_trace;
Steven Rostedt34600f02013-01-22 13:35:11 -05004504
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05004505#ifdef CONFIG_TRACER_MAX_TRACE
4506 had_max_tr = tr->allocated_snapshot;
Steven Rostedt34600f02013-01-22 13:35:11 -05004507
4508 if (had_max_tr && !t->use_max_tr) {
4509 /*
4510 * We need to make sure that the update_max_tr sees that
4511 * current_trace changed to nop_trace to keep it from
4512 * swapping the buffers after we resize it.
4513 * The update_max_tr is called from interrupts disabled
4514 * so a synchronized_sched() is sufficient.
4515 */
4516 synchronize_sched();
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004517 free_snapshot(tr);
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004518 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004519#endif
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004520
4521#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt34600f02013-01-22 13:35:11 -05004522 if (t->use_max_tr && !had_max_tr) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04004523 ret = alloc_snapshot(tr);
Hiraku Toyookad60da502012-10-17 11:56:16 +09004524 if (ret < 0)
4525 goto out;
KOSAKI Motohiroef710e12010-07-01 14:34:35 +09004526 }
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004527#endif
Steven Rostedt577b7852009-02-26 23:43:05 -05004528
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004529 if (t->init) {
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -02004530 ret = tracer_init(t, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +01004531 if (ret)
4532 goto out;
4533 }
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004534
Steven Rostedt2b6080f2012-05-11 13:29:49 -04004535 tr->current_trace = t;
Steven Rostedt (Red Hat)50512ab2014-01-14 08:52:35 -05004536 tr->current_trace->enabled++;
Steven Rostedt9f029e82008-11-12 15:24:24 -05004537 trace_branch_enable(tr);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004538 out:
4539 mutex_unlock(&trace_types_lock);
4540
Peter Zijlstrad9e54072008-11-01 19:57:37 +01004541 return ret;
4542}
4543
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004544static ssize_t
4545tracing_set_trace_write(struct file *filp, const char __user *ubuf,
4546 size_t cnt, loff_t *ppos)
4547{
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004548 struct trace_array *tr = filp->private_data;
Li Zefanee6c2c12009-09-18 14:06:47 +08004549 char buf[MAX_TRACER_SIZE+1];
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004550 int i;
4551 size_t ret;
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004552 int err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004553
Steven Rostedt60063a62008-10-28 10:44:24 -04004554 ret = cnt;
4555
Li Zefanee6c2c12009-09-18 14:06:47 +08004556 if (cnt > MAX_TRACER_SIZE)
4557 cnt = MAX_TRACER_SIZE;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004558
4559 if (copy_from_user(&buf, ubuf, cnt))
4560 return -EFAULT;
4561
4562 buf[cnt] = 0;
4563
4564 /* strip ending whitespace. */
4565 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
4566 buf[i] = 0;
4567
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05004568 err = tracing_set_tracer(tr, buf);
Frederic Weisbeckere6e7a652008-11-16 05:53:19 +01004569 if (err)
4570 return err;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004571
Jiri Olsacf8517c2009-10-23 19:36:16 -04004572 *ppos += ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004573
Frederic Weisbeckerc2931e02008-10-04 22:04:44 +02004574 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004575}
4576
4577static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004578tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
4579 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004580{
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004581 char buf[64];
4582 int r;
4583
Steven Rostedtcffae432008-05-12 21:21:00 +02004584 r = snprintf(buf, sizeof(buf), "%ld\n",
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004585 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
Steven Rostedtcffae432008-05-12 21:21:00 +02004586 if (r > sizeof(buf))
4587 r = sizeof(buf);
Ingo Molnar4bf39a92008-05-12 21:20:46 +02004588 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004589}
4590
4591static ssize_t
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004592tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
4593 size_t cnt, loff_t *ppos)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004594{
Hannes Eder5e398412009-02-10 19:44:34 +01004595 unsigned long val;
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004596 int ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004597
Peter Huewe22fe9b52011-06-07 21:58:27 +02004598 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4599 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02004600 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02004601
4602 *ptr = val * 1000;
4603
4604 return cnt;
4605}
4606
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004607static ssize_t
4608tracing_thresh_read(struct file *filp, char __user *ubuf,
4609 size_t cnt, loff_t *ppos)
4610{
4611 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
4612}
4613
4614static ssize_t
4615tracing_thresh_write(struct file *filp, const char __user *ubuf,
4616 size_t cnt, loff_t *ppos)
4617{
4618 struct trace_array *tr = filp->private_data;
4619 int ret;
4620
4621 mutex_lock(&trace_types_lock);
4622 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
4623 if (ret < 0)
4624 goto out;
4625
4626 if (tr->current_trace->update_thresh) {
4627 ret = tr->current_trace->update_thresh(tr);
4628 if (ret < 0)
4629 goto out;
4630 }
4631
4632 ret = cnt;
4633out:
4634 mutex_unlock(&trace_types_lock);
4635
4636 return ret;
4637}
4638
Chen Gange428abb2015-11-10 05:15:15 +08004639#ifdef CONFIG_TRACER_MAX_TRACE
4640
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04004641static ssize_t
4642tracing_max_lat_read(struct file *filp, char __user *ubuf,
4643 size_t cnt, loff_t *ppos)
4644{
4645 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
4646}
4647
4648static ssize_t
4649tracing_max_lat_write(struct file *filp, const char __user *ubuf,
4650 size_t cnt, loff_t *ppos)
4651{
4652 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
4653}
4654
Chen Gange428abb2015-11-10 05:15:15 +08004655#endif
4656
Steven Rostedtb3806b42008-05-12 21:20:46 +02004657static int tracing_open_pipe(struct inode *inode, struct file *filp)
4658{
Oleg Nesterov15544202013-07-23 17:25:57 +02004659 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004660 struct trace_iterator *iter;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004661 int ret = 0;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004662
4663 if (tracing_disabled)
4664 return -ENODEV;
4665
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004666 if (trace_array_get(tr) < 0)
4667 return -ENODEV;
4668
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004669 mutex_lock(&trace_types_lock);
4670
Steven Rostedtb3806b42008-05-12 21:20:46 +02004671 /* create a buffer to store the information to pass to userspace */
4672 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004673 if (!iter) {
4674 ret = -ENOMEM;
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07004675 __trace_array_put(tr);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004676 goto out;
4677 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004678
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04004679 trace_seq_init(&iter->seq);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004680 iter->trace = tr->current_trace;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004681
4682 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
4683 ret = -ENOMEM;
4684 goto fail;
Rusty Russell44623442009-01-01 10:12:23 +10304685 }
4686
Steven Rostedta3097202008-11-07 22:36:02 -05004687 /* trace pipe does not show start of buffer */
Rusty Russell44623442009-01-01 10:12:23 +10304688 cpumask_setall(iter->started);
Steven Rostedta3097202008-11-07 22:36:02 -05004689
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004690 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
Steven Rostedt112f38a72009-06-01 15:16:05 -04004691 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4692
David Sharp8be07092012-11-13 12:18:22 -08004693 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09004694 if (trace_clocks[tr->clock_id].in_ns)
David Sharp8be07092012-11-13 12:18:22 -08004695 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
4696
Oleg Nesterov15544202013-07-23 17:25:57 +02004697 iter->tr = tr;
4698 iter->trace_buffer = &tr->trace_buffer;
4699 iter->cpu_file = tracing_get_cpu(inode);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004700 mutex_init(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004701 filp->private_data = iter;
4702
Steven Rostedt107bad82008-05-12 21:21:01 +02004703 if (iter->trace->pipe_open)
4704 iter->trace->pipe_open(iter);
Steven Rostedt107bad82008-05-12 21:21:01 +02004705
Arnd Bergmannb4447862010-07-07 23:40:11 +02004706 nonseekable_open(inode, filp);
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004707
4708 tr->current_trace->ref++;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004709out:
4710 mutex_unlock(&trace_types_lock);
4711 return ret;
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004712
4713fail:
4714 kfree(iter->trace);
4715 kfree(iter);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004716 __trace_array_put(tr);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004717 mutex_unlock(&trace_types_lock);
4718 return ret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004719}
4720
4721static int tracing_release_pipe(struct inode *inode, struct file *file)
4722{
4723 struct trace_iterator *iter = file->private_data;
Oleg Nesterov15544202013-07-23 17:25:57 +02004724 struct trace_array *tr = inode->i_private;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004725
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004726 mutex_lock(&trace_types_lock);
4727
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05004728 tr->current_trace->ref--;
4729
Steven Rostedt29bf4a52009-12-09 12:37:43 -05004730 if (iter->trace->pipe_close)
Steven Rostedtc521efd2009-12-07 09:06:24 -05004731 iter->trace->pipe_close(iter);
4732
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01004733 mutex_unlock(&trace_types_lock);
4734
Rusty Russell44623442009-01-01 10:12:23 +10304735 free_cpumask_var(iter->started);
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004736 mutex_destroy(&iter->mutex);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004737 kfree(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004738
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04004739 trace_array_put(tr);
4740
Steven Rostedtb3806b42008-05-12 21:20:46 +02004741 return 0;
4742}
4743
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004744static unsigned int
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004745trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004746{
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004747 struct trace_array *tr = iter->tr;
4748
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004749 /* Iterators are static, they should be filled or empty */
4750 if (trace_buffer_iter(iter, iter->cpu_file))
4751 return POLLIN | POLLRDNORM;
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004752
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04004753 if (tr->trace_flags & TRACE_ITER_BLOCK)
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004754 /*
4755 * Always select as readable when in blocking mode
4756 */
4757 return POLLIN | POLLRDNORM;
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004758 else
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05004759 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
Steven Rostedt (Red Hat)15693452013-02-28 19:59:17 -05004760 filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004761}
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004762
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05004763static unsigned int
4764tracing_poll_pipe(struct file *filp, poll_table *poll_table)
4765{
4766 struct trace_iterator *iter = filp->private_data;
4767
4768 return trace_poll(iter, filp, poll_table);
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02004769}
4770
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05004771/* Must be called with iter->mutex held. */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004772static int tracing_wait_pipe(struct file *filp)
4773{
4774 struct trace_iterator *iter = filp->private_data;
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004775 int ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004776
4777 while (trace_empty(iter)) {
4778
4779 if ((filp->f_flags & O_NONBLOCK)) {
4780 return -EAGAIN;
4781 }
4782
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004783 /*
Liu Bo250bfd32013-01-14 10:54:11 +08004784 * We block until we read something and tracing is disabled.
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004785 * We still block if tracing is disabled, but we have never
4786 * read anything. This allows a user to cat this file, and
4787 * then enable tracing. But after we have read something,
4788 * we give an EOF when tracing is again disabled.
4789 *
4790 * iter->pos will be 0 if we haven't read anything.
4791 */
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04004792 if (!tracing_is_on() && iter->pos)
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004793 break;
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004794
4795 mutex_unlock(&iter->mutex);
4796
Rabin Vincente30f53a2014-11-10 19:46:34 +01004797 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)f4874262014-04-29 16:07:28 -04004798
4799 mutex_lock(&iter->mutex);
4800
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04004801 if (ret)
4802 return ret;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004803 }
4804
4805 return 1;
4806}
4807
Steven Rostedtb3806b42008-05-12 21:20:46 +02004808/*
4809 * Consumer reader.
4810 */
4811static ssize_t
4812tracing_read_pipe(struct file *filp, char __user *ubuf,
4813 size_t cnt, loff_t *ppos)
4814{
4815 struct trace_iterator *iter = filp->private_data;
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004816 ssize_t sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004817
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004818 /*
4819 * Avoid more than one consumer on a single file descriptor
4820 * This is just a matter of traces coherency, the ring buffer itself
4821 * is protected.
4822 */
4823 mutex_lock(&iter->mutex);
Steven Rostedt (Red Hat)2ef33702016-09-23 22:57:13 -04004824
4825 /* return any leftover data */
4826 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
4827 if (sret != -EBUSY)
4828 goto out;
4829
4830 trace_seq_init(&iter->seq);
4831
Steven Rostedt107bad82008-05-12 21:21:01 +02004832 if (iter->trace->read) {
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004833 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
4834 if (sret)
Steven Rostedt107bad82008-05-12 21:21:01 +02004835 goto out;
Steven Rostedt107bad82008-05-12 21:21:01 +02004836 }
4837
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004838waitagain:
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004839 sret = tracing_wait_pipe(filp);
4840 if (sret <= 0)
4841 goto out;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004842
4843 /* stop when tracing is finished */
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004844 if (trace_empty(iter)) {
4845 sret = 0;
Steven Rostedt107bad82008-05-12 21:21:01 +02004846 goto out;
Eduard - Gabriel Munteanuff987812009-02-09 08:15:55 +02004847 }
Steven Rostedtb3806b42008-05-12 21:20:46 +02004848
4849 if (cnt >= PAGE_SIZE)
4850 cnt = PAGE_SIZE - 1;
4851
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004852 /* reset all but tr, trace, and overruns */
Steven Rostedt53d0aa72008-05-12 21:21:01 +02004853 memset(&iter->seq, 0,
4854 sizeof(struct trace_iterator) -
4855 offsetof(struct trace_iterator, seq));
Andrew Vagined5467d2013-08-02 21:16:43 +04004856 cpumask_clear(iter->started);
Steven Rostedt4823ed72008-05-12 21:21:01 +02004857 iter->pos = -1;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004858
Lai Jiangshan4f535962009-05-18 19:35:34 +08004859 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004860 trace_access_lock(iter->cpu_file);
Jason Wessel955b61e2010-08-05 09:22:23 -05004861 while (trace_find_next_entry_inc(iter) != NULL) {
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004862 enum print_line_t ret;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004863 int save_len = iter->seq.seq.len;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004864
Ingo Molnarf9896bf2008-05-12 21:20:47 +02004865 ret = print_trace_line(iter);
Frederic Weisbecker2c4f0352008-09-29 20:18:34 +02004866 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt088b1e422008-05-12 21:20:48 +02004867 /* don't print partial lines */
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004868 iter->seq.seq.len = save_len;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004869 break;
Steven Rostedt088b1e422008-05-12 21:20:48 +02004870 }
Frederic Weisbeckerb91facc2009-02-06 18:30:44 +01004871 if (ret != TRACE_TYPE_NO_CONSUME)
4872 trace_consume(iter);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004873
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004874 if (trace_seq_used(&iter->seq) >= cnt)
Steven Rostedtb3806b42008-05-12 21:20:46 +02004875 break;
Jiri Olsaee5e51f2011-03-25 12:05:18 +01004876
4877 /*
4878 * Setting the full flag means we reached the trace_seq buffer
4879 * size and we should leave by partial output condition above.
4880 * One of the trace_seq_* functions is not used properly.
4881 */
4882 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
4883 iter->ent->type);
Steven Rostedtb3806b42008-05-12 21:20:46 +02004884 }
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08004885 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08004886 trace_event_read_unlock();
Steven Rostedtb3806b42008-05-12 21:20:46 +02004887
Steven Rostedtb3806b42008-05-12 21:20:46 +02004888 /* Now copy what we have to the user */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004889 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004890 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
Steven Rostedtf9520752009-03-02 14:04:40 -05004891 trace_seq_init(&iter->seq);
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004892
4893 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03004894 * If there was nothing to send to user, in spite of consuming trace
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004895 * entries, go back to wait for more entries.
4896 */
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004897 if (sret == -EBUSY)
Pekka Paalanen9ff4b972008-09-29 20:23:48 +02004898 goto waitagain;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004899
Steven Rostedt107bad82008-05-12 21:21:01 +02004900out:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004901 mutex_unlock(&iter->mutex);
Steven Rostedt107bad82008-05-12 21:21:01 +02004902
Pekka Paalanen6c6c2792008-05-12 21:21:02 +02004903 return sret;
Steven Rostedtb3806b42008-05-12 21:20:46 +02004904}
4905
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004906static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
4907 unsigned int idx)
4908{
4909 __free_page(spd->pages[idx]);
4910}
4911
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08004912static const struct pipe_buf_operations tracing_pipe_buf_ops = {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004913 .can_merge = 0,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004914 .confirm = generic_pipe_buf_confirm,
Al Viro92fdd982014-01-17 07:53:39 -05004915 .release = generic_pipe_buf_release,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004916 .steal = generic_pipe_buf_steal,
4917 .get = generic_pipe_buf_get,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004918};
4919
Steven Rostedt34cd4992009-02-09 12:06:29 -05004920static size_t
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01004921tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004922{
4923 size_t count;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004924 int save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004925 int ret;
4926
4927 /* Seq buffer is page-sized, exactly what we need. */
4928 for (;;) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004929 save_len = iter->seq.seq.len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004930 ret = print_trace_line(iter);
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004931
4932 if (trace_seq_has_overflowed(&iter->seq)) {
4933 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004934 break;
4935 }
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004936
4937 /*
4938 * This should not be hit, because it should only
4939 * be set if the iter->seq overflowed. But check it
4940 * anyway to be safe.
4941 */
Steven Rostedt34cd4992009-02-09 12:06:29 -05004942 if (ret == TRACE_TYPE_PARTIAL_LINE) {
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004943 iter->seq.seq.len = save_len;
4944 break;
4945 }
4946
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05004947 count = trace_seq_used(&iter->seq) - save_len;
Steven Rostedt (Red Hat)74f06bb2014-11-17 13:12:22 -05004948 if (rem < count) {
4949 rem = 0;
4950 iter->seq.seq.len = save_len;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004951 break;
4952 }
4953
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08004954 if (ret != TRACE_TYPE_NO_CONSUME)
4955 trace_consume(iter);
Steven Rostedt34cd4992009-02-09 12:06:29 -05004956 rem -= count;
Jason Wessel955b61e2010-08-05 09:22:23 -05004957 if (!trace_find_next_entry_inc(iter)) {
Steven Rostedt34cd4992009-02-09 12:06:29 -05004958 rem = 0;
4959 iter->ent = NULL;
4960 break;
4961 }
4962 }
4963
4964 return rem;
4965}
4966
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004967static ssize_t tracing_splice_read_pipe(struct file *filp,
4968 loff_t *ppos,
4969 struct pipe_inode_info *pipe,
4970 size_t len,
4971 unsigned int flags)
4972{
Jens Axboe35f3d142010-05-20 10:43:18 +02004973 struct page *pages_def[PIPE_DEF_BUFFERS];
4974 struct partial_page partial_def[PIPE_DEF_BUFFERS];
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004975 struct trace_iterator *iter = filp->private_data;
4976 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02004977 .pages = pages_def,
4978 .partial = partial_def,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004979 .nr_pages = 0, /* This gets updated below. */
Eric Dumazet047fe362012-06-12 15:24:40 +02004980 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt34cd4992009-02-09 12:06:29 -05004981 .flags = flags,
4982 .ops = &tracing_pipe_buf_ops,
4983 .spd_release = tracing_spd_release_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004984 };
4985 ssize_t ret;
Steven Rostedt34cd4992009-02-09 12:06:29 -05004986 size_t rem;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004987 unsigned int i;
4988
Jens Axboe35f3d142010-05-20 10:43:18 +02004989 if (splice_grow_spd(pipe, &spd))
4990 return -ENOMEM;
4991
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01004992 mutex_lock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004993
4994 if (iter->trace->splice_read) {
4995 ret = iter->trace->splice_read(iter, filp,
4996 ppos, pipe, len, flags);
4997 if (ret)
Steven Rostedt34cd4992009-02-09 12:06:29 -05004998 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02004999 }
5000
5001 ret = tracing_wait_pipe(filp);
5002 if (ret <= 0)
Steven Rostedt34cd4992009-02-09 12:06:29 -05005003 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005004
Jason Wessel955b61e2010-08-05 09:22:23 -05005005 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005006 ret = -EFAULT;
Steven Rostedt34cd4992009-02-09 12:06:29 -05005007 goto out_err;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005008 }
5009
Lai Jiangshan4f535962009-05-18 19:35:34 +08005010 trace_event_read_lock();
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005011 trace_access_lock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005012
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005013 /* Fill as many pages as possible. */
Al Viroa786c062014-04-11 12:01:03 -04005014 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005015 spd.pages[i] = alloc_page(GFP_KERNEL);
5016 if (!spd.pages[i])
Steven Rostedt34cd4992009-02-09 12:06:29 -05005017 break;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005018
Frederic Weisbeckerfa7c7f62009-02-11 02:51:30 +01005019 rem = tracing_fill_pipe_page(rem, iter);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005020
5021 /* Copy the data into the page, so we can start over. */
5022 ret = trace_seq_to_buffer(&iter->seq,
Jens Axboe35f3d142010-05-20 10:43:18 +02005023 page_address(spd.pages[i]),
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005024 trace_seq_used(&iter->seq));
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005025 if (ret < 0) {
Jens Axboe35f3d142010-05-20 10:43:18 +02005026 __free_page(spd.pages[i]);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005027 break;
5028 }
Jens Axboe35f3d142010-05-20 10:43:18 +02005029 spd.partial[i].offset = 0;
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05005030 spd.partial[i].len = trace_seq_used(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005031
Steven Rostedtf9520752009-03-02 14:04:40 -05005032 trace_seq_init(&iter->seq);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005033 }
5034
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08005035 trace_access_unlock(iter->cpu_file);
Lai Jiangshan4f535962009-05-18 19:35:34 +08005036 trace_event_read_unlock();
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005037 mutex_unlock(&iter->mutex);
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005038
5039 spd.nr_pages = i;
5040
Steven Rostedt (Red Hat)aab3ba82016-03-18 15:46:48 -04005041 if (i)
5042 ret = splice_to_pipe(pipe, &spd);
5043 else
5044 ret = 0;
Jens Axboe35f3d142010-05-20 10:43:18 +02005045out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005046 splice_shrink_spd(&spd);
Jens Axboe35f3d142010-05-20 10:43:18 +02005047 return ret;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005048
Steven Rostedt34cd4992009-02-09 12:06:29 -05005049out_err:
Frederic Weisbeckerd7350c3f2009-02-25 06:13:16 +01005050 mutex_unlock(&iter->mutex);
Jens Axboe35f3d142010-05-20 10:43:18 +02005051 goto out;
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005052}
5053
Steven Rostedta98a3c32008-05-12 21:20:59 +02005054static ssize_t
5055tracing_entries_read(struct file *filp, char __user *ubuf,
5056 size_t cnt, loff_t *ppos)
5057{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005058 struct inode *inode = file_inode(filp);
5059 struct trace_array *tr = inode->i_private;
5060 int cpu = tracing_get_cpu(inode);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005061 char buf[64];
5062 int r = 0;
5063 ssize_t ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005064
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005065 mutex_lock(&trace_types_lock);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005066
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005067 if (cpu == RING_BUFFER_ALL_CPUS) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005068 int cpu, buf_size_same;
5069 unsigned long size;
5070
5071 size = 0;
5072 buf_size_same = 1;
5073 /* check if all cpu sizes are same */
5074 for_each_tracing_cpu(cpu) {
5075 /* fill in the size from first enabled cpu */
5076 if (size == 0)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005077 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5078 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005079 buf_size_same = 0;
5080 break;
5081 }
5082 }
5083
5084 if (buf_size_same) {
5085 if (!ring_buffer_expanded)
5086 r = sprintf(buf, "%lu (expanded: %lu)\n",
5087 size >> 10,
5088 trace_buf_size >> 10);
5089 else
5090 r = sprintf(buf, "%lu\n", size >> 10);
5091 } else
5092 r = sprintf(buf, "X\n");
5093 } else
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005094 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005095
Steven Rostedtdb526ca2009-03-12 13:53:25 -04005096 mutex_unlock(&trace_types_lock);
5097
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08005098 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5099 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005100}
5101
5102static ssize_t
5103tracing_entries_write(struct file *filp, const char __user *ubuf,
5104 size_t cnt, loff_t *ppos)
5105{
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005106 struct inode *inode = file_inode(filp);
5107 struct trace_array *tr = inode->i_private;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005108 unsigned long val;
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005109 int ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005110
Peter Huewe22fe9b52011-06-07 21:58:27 +02005111 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5112 if (ret)
Steven Rostedtc6caeeb2008-05-12 21:21:00 +02005113 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005114
5115 /* must have at least 1 entry */
5116 if (!val)
5117 return -EINVAL;
5118
Steven Rostedt1696b2b2008-11-13 00:09:35 -05005119 /* value is in KB */
5120 val <<= 10;
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005121 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005122 if (ret < 0)
5123 return ret;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005124
Jiri Olsacf8517c2009-10-23 19:36:16 -04005125 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005126
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005127 return cnt;
5128}
Steven Rostedtbf5e6512008-11-10 21:46:00 -05005129
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005130static ssize_t
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005131tracing_total_entries_read(struct file *filp, char __user *ubuf,
5132 size_t cnt, loff_t *ppos)
5133{
5134 struct trace_array *tr = filp->private_data;
5135 char buf[64];
5136 int r, cpu;
5137 unsigned long size = 0, expanded_size = 0;
5138
5139 mutex_lock(&trace_types_lock);
5140 for_each_tracing_cpu(cpu) {
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005141 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005142 if (!ring_buffer_expanded)
5143 expanded_size += trace_buf_size >> 10;
5144 }
5145 if (ring_buffer_expanded)
5146 r = sprintf(buf, "%lu\n", size);
5147 else
5148 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
5149 mutex_unlock(&trace_types_lock);
5150
5151 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5152}
5153
5154static ssize_t
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005155tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
5156 size_t cnt, loff_t *ppos)
5157{
5158 /*
5159 * There is no need to read what the user has written, this function
5160 * is just to make sure that there is no error when "echo" is used
5161 */
5162
5163 *ppos += cnt;
Steven Rostedta98a3c32008-05-12 21:20:59 +02005164
5165 return cnt;
5166}
5167
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005168static int
5169tracing_free_buffer_release(struct inode *inode, struct file *filp)
5170{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005171 struct trace_array *tr = inode->i_private;
5172
Steven Rostedtcf30cf62011-06-14 22:44:07 -04005173 /* disable tracing ? */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005174 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
Alexander Z Lam711e1242013-08-02 18:36:15 -07005175 tracer_tracing_off(tr);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005176 /* resize the ring buffer to 0 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005177 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005178
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005179 trace_array_put(tr);
5180
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005181 return 0;
5182}
5183
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005184static ssize_t
5185tracing_mark_write(struct file *filp, const char __user *ubuf,
5186 size_t cnt, loff_t *fpos)
5187{
Steven Rostedtd696b582011-09-22 11:50:27 -04005188 unsigned long addr = (unsigned long)ubuf;
Alexander Z Lam2d716192013-07-01 15:31:24 -07005189 struct trace_array *tr = filp->private_data;
Steven Rostedtd696b582011-09-22 11:50:27 -04005190 struct ring_buffer_event *event;
5191 struct ring_buffer *buffer;
5192 struct print_entry *entry;
5193 unsigned long irq_flags;
5194 struct page *pages[2];
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005195 void *map_page[2];
Steven Rostedtd696b582011-09-22 11:50:27 -04005196 int nr_pages = 1;
5197 ssize_t written;
Steven Rostedtd696b582011-09-22 11:50:27 -04005198 int offset;
5199 int size;
5200 int len;
5201 int ret;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005202 int i;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005203
Steven Rostedtc76f0692008-11-07 22:36:02 -05005204 if (tracing_disabled)
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005205 return -EINVAL;
5206
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04005207 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
Mandeep Singh Baines5224c3a2012-09-07 18:12:19 -07005208 return -EINVAL;
5209
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005210 if (cnt > TRACE_BUF_SIZE)
5211 cnt = TRACE_BUF_SIZE;
5212
Steven Rostedtd696b582011-09-22 11:50:27 -04005213 /*
5214 * Userspace is injecting traces into the kernel trace buffer.
5215 * We want to be as non intrusive as possible.
5216 * To do so, we do not want to allocate any special buffers
5217 * or take any locks, but instead write the userspace data
5218 * straight into the ring buffer.
5219 *
5220 * First we need to pin the userspace buffer into memory,
5221 * which, most likely it is, because it just referenced it.
5222 * But there's no guarantee that it is. By using get_user_pages_fast()
5223 * and kmap_atomic/kunmap_atomic() we can get access to the
5224 * pages directly. We then write the data directly into the
5225 * ring buffer.
5226 */
5227 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005228
Steven Rostedtd696b582011-09-22 11:50:27 -04005229 /* check if we cross pages */
5230 if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
5231 nr_pages = 2;
5232
5233 offset = addr & (PAGE_SIZE - 1);
5234 addr &= PAGE_MASK;
5235
5236 ret = get_user_pages_fast(addr, nr_pages, 0, pages);
5237 if (ret < nr_pages) {
5238 while (--ret >= 0)
5239 put_page(pages[ret]);
5240 written = -EFAULT;
5241 goto out;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005242 }
5243
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005244 for (i = 0; i < nr_pages; i++)
5245 map_page[i] = kmap_atomic(pages[i]);
Steven Rostedtd696b582011-09-22 11:50:27 -04005246
5247 local_save_flags(irq_flags);
5248 size = sizeof(*entry) + cnt + 2; /* possible \n added */
Alexander Z Lam2d716192013-07-01 15:31:24 -07005249 buffer = tr->trace_buffer.buffer;
Steven Rostedtd696b582011-09-22 11:50:27 -04005250 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
5251 irq_flags, preempt_count());
5252 if (!event) {
5253 /* Ring buffer disabled, return as if not open for write */
5254 written = -EBADF;
5255 goto out_unlock;
5256 }
5257
5258 entry = ring_buffer_event_data(event);
5259 entry->ip = _THIS_IP_;
5260
5261 if (nr_pages == 2) {
5262 len = PAGE_SIZE - offset;
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005263 memcpy(&entry->buf, map_page[0] + offset, len);
5264 memcpy(&entry->buf[len], map_page[1], cnt - len);
Steven Rostedtd696b582011-09-22 11:50:27 -04005265 } else
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005266 memcpy(&entry->buf, map_page[0] + offset, cnt);
Steven Rostedtd696b582011-09-22 11:50:27 -04005267
5268 if (entry->buf[cnt - 1] != '\n') {
5269 entry->buf[cnt] = '\n';
5270 entry->buf[cnt + 1] = '\0';
5271 } else
5272 entry->buf[cnt] = '\0';
5273
Steven Rostedt7ffbd482012-10-11 12:14:25 -04005274 __buffer_unlock_commit(buffer, event);
Steven Rostedtd696b582011-09-22 11:50:27 -04005275
5276 written = cnt;
5277
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005278 *fpos += written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005279
Steven Rostedtd696b582011-09-22 11:50:27 -04005280 out_unlock:
Vikram Mulukutla72158532014-12-17 18:50:56 -08005281 for (i = nr_pages - 1; i >= 0; i--) {
Steven Rostedt6edb2a82012-05-11 23:28:49 -04005282 kunmap_atomic(map_page[i]);
5283 put_page(pages[i]);
5284 }
Steven Rostedtd696b582011-09-22 11:50:27 -04005285 out:
Marcin Slusarz1aa54bc2010-07-28 01:18:01 +02005286 return written;
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005287}
5288
Li Zefan13f16d22009-12-08 11:16:11 +08005289static int tracing_clock_show(struct seq_file *m, void *v)
Zhaolei5079f322009-08-25 16:12:56 +08005290{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005291 struct trace_array *tr = m->private;
Zhaolei5079f322009-08-25 16:12:56 +08005292 int i;
5293
5294 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
Li Zefan13f16d22009-12-08 11:16:11 +08005295 seq_printf(m,
Zhaolei5079f322009-08-25 16:12:56 +08005296 "%s%s%s%s", i ? " " : "",
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005297 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
5298 i == tr->clock_id ? "]" : "");
Li Zefan13f16d22009-12-08 11:16:11 +08005299 seq_putc(m, '\n');
Zhaolei5079f322009-08-25 16:12:56 +08005300
Li Zefan13f16d22009-12-08 11:16:11 +08005301 return 0;
Zhaolei5079f322009-08-25 16:12:56 +08005302}
5303
Steven Rostedte1e232c2014-02-10 23:38:46 -05005304static int tracing_set_clock(struct trace_array *tr, const char *clockstr)
Zhaolei5079f322009-08-25 16:12:56 +08005305{
Zhaolei5079f322009-08-25 16:12:56 +08005306 int i;
5307
Zhaolei5079f322009-08-25 16:12:56 +08005308 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
5309 if (strcmp(trace_clocks[i].name, clockstr) == 0)
5310 break;
5311 }
5312 if (i == ARRAY_SIZE(trace_clocks))
5313 return -EINVAL;
5314
Zhaolei5079f322009-08-25 16:12:56 +08005315 mutex_lock(&trace_types_lock);
5316
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005317 tr->clock_id = i;
5318
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005319 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
Zhaolei5079f322009-08-25 16:12:56 +08005320
David Sharp60303ed2012-10-11 16:27:52 -07005321 /*
5322 * New clock may not be consistent with the previous clock.
5323 * Reset the buffer so that it doesn't have incomparable timestamps.
5324 */
Alexander Z Lam94571582013-08-02 18:36:16 -07005325 tracing_reset_online_cpus(&tr->trace_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005326
5327#ifdef CONFIG_TRACER_MAX_TRACE
5328 if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
5329 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
Alexander Z Lam94571582013-08-02 18:36:16 -07005330 tracing_reset_online_cpus(&tr->max_buffer);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005331#endif
David Sharp60303ed2012-10-11 16:27:52 -07005332
Zhaolei5079f322009-08-25 16:12:56 +08005333 mutex_unlock(&trace_types_lock);
5334
Steven Rostedte1e232c2014-02-10 23:38:46 -05005335 return 0;
5336}
5337
5338static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
5339 size_t cnt, loff_t *fpos)
5340{
5341 struct seq_file *m = filp->private_data;
5342 struct trace_array *tr = m->private;
5343 char buf[64];
5344 const char *clockstr;
5345 int ret;
5346
5347 if (cnt >= sizeof(buf))
5348 return -EINVAL;
5349
5350 if (copy_from_user(&buf, ubuf, cnt))
5351 return -EFAULT;
5352
5353 buf[cnt] = 0;
5354
5355 clockstr = strstrip(buf);
5356
5357 ret = tracing_set_clock(tr, clockstr);
5358 if (ret)
5359 return ret;
5360
Zhaolei5079f322009-08-25 16:12:56 +08005361 *fpos += cnt;
5362
5363 return cnt;
5364}
5365
Li Zefan13f16d22009-12-08 11:16:11 +08005366static int tracing_clock_open(struct inode *inode, struct file *file)
5367{
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005368 struct trace_array *tr = inode->i_private;
5369 int ret;
5370
Li Zefan13f16d22009-12-08 11:16:11 +08005371 if (tracing_disabled)
5372 return -ENODEV;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005373
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005374 if (trace_array_get(tr))
5375 return -ENODEV;
5376
5377 ret = single_open(file, tracing_clock_show, inode->i_private);
5378 if (ret < 0)
5379 trace_array_put(tr);
5380
5381 return ret;
Li Zefan13f16d22009-12-08 11:16:11 +08005382}
5383
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005384struct ftrace_buffer_info {
5385 struct trace_iterator iter;
5386 void *spare;
5387 unsigned int read;
5388};
5389
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005390#ifdef CONFIG_TRACER_SNAPSHOT
5391static int tracing_snapshot_open(struct inode *inode, struct file *file)
5392{
Oleg Nesterov6484c712013-07-23 17:26:10 +02005393 struct trace_array *tr = inode->i_private;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005394 struct trace_iterator *iter;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005395 struct seq_file *m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005396 int ret = 0;
5397
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005398 if (trace_array_get(tr) < 0)
5399 return -ENODEV;
5400
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005401 if (file->f_mode & FMODE_READ) {
Oleg Nesterov6484c712013-07-23 17:26:10 +02005402 iter = __tracing_open(inode, file, true);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005403 if (IS_ERR(iter))
5404 ret = PTR_ERR(iter);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005405 } else {
5406 /* Writes still need the seq_file to hold the private data */
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005407 ret = -ENOMEM;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005408 m = kzalloc(sizeof(*m), GFP_KERNEL);
5409 if (!m)
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005410 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005411 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5412 if (!iter) {
5413 kfree(m);
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005414 goto out;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005415 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005416 ret = 0;
5417
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005418 iter->tr = tr;
Oleg Nesterov6484c712013-07-23 17:26:10 +02005419 iter->trace_buffer = &tr->max_buffer;
5420 iter->cpu_file = tracing_get_cpu(inode);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005421 m->private = iter;
5422 file->private_data = m;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005423 }
Alexander Z Lamf77d09a2013-07-18 11:18:44 -07005424out:
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005425 if (ret < 0)
5426 trace_array_put(tr);
5427
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005428 return ret;
5429}
5430
5431static ssize_t
5432tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
5433 loff_t *ppos)
5434{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005435 struct seq_file *m = filp->private_data;
5436 struct trace_iterator *iter = m->private;
5437 struct trace_array *tr = iter->tr;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005438 unsigned long val;
5439 int ret;
5440
5441 ret = tracing_update_buffers();
5442 if (ret < 0)
5443 return ret;
5444
5445 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5446 if (ret)
5447 return ret;
5448
5449 mutex_lock(&trace_types_lock);
5450
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005451 if (tr->current_trace->use_max_tr) {
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005452 ret = -EBUSY;
5453 goto out;
5454 }
5455
5456 switch (val) {
5457 case 0:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005458 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5459 ret = -EINVAL;
5460 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005461 }
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005462 if (tr->allocated_snapshot)
5463 free_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005464 break;
5465 case 1:
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005466/* Only allow per-cpu swap if the ring buffer supports it */
5467#ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
5468 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
5469 ret = -EINVAL;
5470 break;
5471 }
5472#endif
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005473 if (!tr->allocated_snapshot) {
Steven Rostedt (Red Hat)3209cff2013-03-12 11:17:54 -04005474 ret = alloc_snapshot(tr);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005475 if (ret < 0)
5476 break;
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005477 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005478 local_irq_disable();
5479 /* Now, we're going to swap */
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005480 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005481 update_max_tr(tr, current, smp_processor_id());
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005482 else
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05005483 update_max_tr_single(tr, current, iter->cpu_file);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005484 local_irq_enable();
5485 break;
5486 default:
Steven Rostedt (Red Hat)45ad21c2013-03-05 18:25:02 -05005487 if (tr->allocated_snapshot) {
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05005488 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
5489 tracing_reset_online_cpus(&tr->max_buffer);
5490 else
5491 tracing_reset(&tr->max_buffer, iter->cpu_file);
5492 }
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005493 break;
5494 }
5495
5496 if (ret >= 0) {
5497 *ppos += cnt;
5498 ret = cnt;
5499 }
5500out:
5501 mutex_unlock(&trace_types_lock);
5502 return ret;
5503}
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005504
5505static int tracing_snapshot_release(struct inode *inode, struct file *file)
5506{
5507 struct seq_file *m = file->private_data;
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005508 int ret;
5509
5510 ret = tracing_release(inode, file);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005511
5512 if (file->f_mode & FMODE_READ)
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005513 return ret;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005514
5515 /* If write only, the seq_file is just a stub */
5516 if (m)
5517 kfree(m->private);
5518 kfree(m);
5519
5520 return 0;
5521}
5522
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005523static int tracing_buffers_open(struct inode *inode, struct file *filp);
5524static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
5525 size_t count, loff_t *ppos);
5526static int tracing_buffers_release(struct inode *inode, struct file *file);
5527static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5528 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
5529
5530static int snapshot_raw_open(struct inode *inode, struct file *filp)
5531{
5532 struct ftrace_buffer_info *info;
5533 int ret;
5534
5535 ret = tracing_buffers_open(inode, filp);
5536 if (ret < 0)
5537 return ret;
5538
5539 info = filp->private_data;
5540
5541 if (info->iter.trace->use_max_tr) {
5542 tracing_buffers_release(inode, filp);
5543 return -EBUSY;
5544 }
5545
5546 info->iter.snapshot = true;
5547 info->iter.trace_buffer = &info->iter.tr->max_buffer;
5548
5549 return ret;
5550}
5551
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005552#endif /* CONFIG_TRACER_SNAPSHOT */
5553
5554
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04005555static const struct file_operations tracing_thresh_fops = {
5556 .open = tracing_open_generic,
5557 .read = tracing_thresh_read,
5558 .write = tracing_thresh_write,
5559 .llseek = generic_file_llseek,
5560};
5561
Chen Gange428abb2015-11-10 05:15:15 +08005562#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005563static const struct file_operations tracing_max_lat_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005564 .open = tracing_open_generic,
5565 .read = tracing_max_lat_read,
5566 .write = tracing_max_lat_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005567 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005568};
Chen Gange428abb2015-11-10 05:15:15 +08005569#endif
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005570
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005571static const struct file_operations set_tracer_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005572 .open = tracing_open_generic,
5573 .read = tracing_set_trace_read,
5574 .write = tracing_set_trace_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005575 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02005576};
5577
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005578static const struct file_operations tracing_pipe_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005579 .open = tracing_open_pipe,
Soeren Sandmann Pedersen2a2cc8f2008-05-12 21:20:49 +02005580 .poll = tracing_poll_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005581 .read = tracing_read_pipe,
Eduard - Gabriel Munteanu3c568192009-02-09 08:15:56 +02005582 .splice_read = tracing_splice_read_pipe,
Ingo Molnar4bf39a92008-05-12 21:20:46 +02005583 .release = tracing_release_pipe,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005584 .llseek = no_llseek,
Steven Rostedtb3806b42008-05-12 21:20:46 +02005585};
5586
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005587static const struct file_operations tracing_entries_fops = {
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005588 .open = tracing_open_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005589 .read = tracing_entries_read,
5590 .write = tracing_entries_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005591 .llseek = generic_file_llseek,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02005592 .release = tracing_release_generic_tr,
Steven Rostedta98a3c32008-05-12 21:20:59 +02005593};
5594
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005595static const struct file_operations tracing_total_entries_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005596 .open = tracing_open_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005597 .read = tracing_total_entries_read,
5598 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005599 .release = tracing_release_generic_tr,
Vaibhav Nagarnaikf81ab072011-08-16 14:46:15 -07005600};
5601
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005602static const struct file_operations tracing_free_buffer_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005603 .open = tracing_open_generic_tr,
Vaibhav Nagarnaik4f271a22011-06-13 17:51:57 -07005604 .write = tracing_free_buffer_write,
5605 .release = tracing_free_buffer_release,
5606};
5607
Steven Rostedt5e2336a2009-03-05 21:44:55 -05005608static const struct file_operations tracing_mark_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005609 .open = tracing_open_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005610 .write = tracing_mark_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02005611 .llseek = generic_file_llseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005612 .release = tracing_release_generic_tr,
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03005613};
5614
Zhaolei5079f322009-08-25 16:12:56 +08005615static const struct file_operations trace_clock_fops = {
Li Zefan13f16d22009-12-08 11:16:11 +08005616 .open = tracing_clock_open,
5617 .read = seq_read,
5618 .llseek = seq_lseek,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005619 .release = tracing_single_release_tr,
Zhaolei5079f322009-08-25 16:12:56 +08005620 .write = tracing_clock_write,
5621};
5622
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005623#ifdef CONFIG_TRACER_SNAPSHOT
5624static const struct file_operations snapshot_fops = {
5625 .open = tracing_snapshot_open,
5626 .read = seq_read,
5627 .write = tracing_snapshot_write,
Steven Rostedt (Red Hat)098c8792013-12-21 17:39:40 -05005628 .llseek = tracing_lseek,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04005629 .release = tracing_snapshot_release,
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005630};
Hiraku Toyookadebdd572012-12-26 11:53:00 +09005631
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005632static const struct file_operations snapshot_raw_fops = {
5633 .open = snapshot_raw_open,
5634 .read = tracing_buffers_read,
5635 .release = tracing_buffers_release,
5636 .splice_read = tracing_buffers_splice_read,
5637 .llseek = no_llseek,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005638};
5639
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005640#endif /* CONFIG_TRACER_SNAPSHOT */
5641
Steven Rostedt2cadf912008-12-01 22:20:19 -05005642static int tracing_buffers_open(struct inode *inode, struct file *filp)
5643{
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005644 struct trace_array *tr = inode->i_private;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005645 struct ftrace_buffer_info *info;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005646 int ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005647
5648 if (tracing_disabled)
5649 return -ENODEV;
5650
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005651 if (trace_array_get(tr) < 0)
5652 return -ENODEV;
5653
Steven Rostedt2cadf912008-12-01 22:20:19 -05005654 info = kzalloc(sizeof(*info), GFP_KERNEL);
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005655 if (!info) {
5656 trace_array_put(tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005657 return -ENOMEM;
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005658 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005659
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005660 mutex_lock(&trace_types_lock);
5661
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005662 info->iter.tr = tr;
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02005663 info->iter.cpu_file = tracing_get_cpu(inode);
Steven Rostedtb6273442013-02-28 13:44:11 -05005664 info->iter.trace = tr->current_trace;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005665 info->iter.trace_buffer = &tr->trace_buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005666 info->spare = NULL;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005667 /* Force reading ring buffer for first read */
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005668 info->read = (unsigned int)-1;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005669
5670 filp->private_data = info;
5671
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005672 tr->current_trace->ref++;
5673
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005674 mutex_unlock(&trace_types_lock);
5675
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04005676 ret = nonseekable_open(inode, filp);
5677 if (ret < 0)
5678 trace_array_put(tr);
5679
5680 return ret;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005681}
5682
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005683static unsigned int
5684tracing_buffers_poll(struct file *filp, poll_table *poll_table)
5685{
5686 struct ftrace_buffer_info *info = filp->private_data;
5687 struct trace_iterator *iter = &info->iter;
5688
5689 return trace_poll(iter, filp, poll_table);
5690}
5691
Steven Rostedt2cadf912008-12-01 22:20:19 -05005692static ssize_t
5693tracing_buffers_read(struct file *filp, char __user *ubuf,
5694 size_t count, loff_t *ppos)
5695{
5696 struct ftrace_buffer_info *info = filp->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005697 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005698 ssize_t ret;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005699 ssize_t size;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005700
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005701 if (!count)
5702 return 0;
5703
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005704#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005705 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5706 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005707#endif
5708
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005709 if (!info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005710 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
5711 iter->cpu_file);
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005712 if (!info->spare)
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005713 return -ENOMEM;
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005714
Steven Rostedt2cadf912008-12-01 22:20:19 -05005715 /* Do we have previous read data to read? */
5716 if (info->read < PAGE_SIZE)
5717 goto read;
5718
Steven Rostedtb6273442013-02-28 13:44:11 -05005719 again:
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005720 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005721 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005722 &info->spare,
5723 count,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005724 iter->cpu_file, 0);
5725 trace_access_unlock(iter->cpu_file);
Steven Rostedtb6273442013-02-28 13:44:11 -05005726
5727 if (ret < 0) {
5728 if (trace_empty(iter)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005729 if ((filp->f_flags & O_NONBLOCK))
5730 return -EAGAIN;
5731
Rabin Vincente30f53a2014-11-10 19:46:34 +01005732 ret = wait_on_pipe(iter, false);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005733 if (ret)
5734 return ret;
5735
Steven Rostedtb6273442013-02-28 13:44:11 -05005736 goto again;
5737 }
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005738 return 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005739 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005740
Steven Rostedt436fc282011-10-14 10:44:25 -04005741 info->read = 0;
Steven Rostedtb6273442013-02-28 13:44:11 -05005742 read:
Steven Rostedt2cadf912008-12-01 22:20:19 -05005743 size = PAGE_SIZE - info->read;
5744 if (size > count)
5745 size = count;
5746
5747 ret = copy_to_user(ubuf, info->spare + info->read, size);
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005748 if (ret == size)
5749 return -EFAULT;
5750
Steven Rostedt2dc5d122009-03-04 19:10:05 -05005751 size -= ret;
5752
Steven Rostedt2cadf912008-12-01 22:20:19 -05005753 *ppos += size;
5754 info->read += size;
5755
5756 return size;
5757}
5758
5759static int tracing_buffers_release(struct inode *inode, struct file *file)
5760{
5761 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005762 struct trace_iterator *iter = &info->iter;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005763
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005764 mutex_lock(&trace_types_lock);
5765
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05005766 iter->tr->current_trace->ref--;
5767
Steven Rostedt (Red Hat)ff451962013-07-01 22:50:29 -04005768 __trace_array_put(iter->tr);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005769
Lai Jiangshanddd538f2009-04-02 15:16:59 +08005770 if (info->spare)
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005771 ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005772 kfree(info);
5773
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05005774 mutex_unlock(&trace_types_lock);
5775
Steven Rostedt2cadf912008-12-01 22:20:19 -05005776 return 0;
5777}
5778
5779struct buffer_ref {
5780 struct ring_buffer *buffer;
5781 void *page;
5782 int ref;
5783};
5784
5785static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
5786 struct pipe_buffer *buf)
5787{
5788 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5789
5790 if (--ref->ref)
5791 return;
5792
5793 ring_buffer_free_read_page(ref->buffer, ref->page);
5794 kfree(ref);
5795 buf->private = 0;
5796}
5797
Steven Rostedt2cadf912008-12-01 22:20:19 -05005798static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
5799 struct pipe_buffer *buf)
5800{
5801 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
5802
5803 ref->ref++;
5804}
5805
5806/* Pipe buffer operations for a buffer. */
Alexey Dobriyan28dfef82009-12-15 16:46:48 -08005807static const struct pipe_buf_operations buffer_pipe_buf_ops = {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005808 .can_merge = 0,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005809 .confirm = generic_pipe_buf_confirm,
5810 .release = buffer_pipe_buf_release,
Masami Hiramatsud55cb6c2012-08-09 21:31:10 +09005811 .steal = generic_pipe_buf_steal,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005812 .get = buffer_pipe_buf_get,
5813};
5814
5815/*
5816 * Callback from splice_to_pipe(), if we need to release some pages
5817 * at the end of the spd in case we error'ed out in filling the pipe.
5818 */
5819static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
5820{
5821 struct buffer_ref *ref =
5822 (struct buffer_ref *)spd->partial[i].private;
5823
5824 if (--ref->ref)
5825 return;
5826
5827 ring_buffer_free_read_page(ref->buffer, ref->page);
5828 kfree(ref);
5829 spd->partial[i].private = 0;
5830}
5831
5832static ssize_t
5833tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5834 struct pipe_inode_info *pipe, size_t len,
5835 unsigned int flags)
5836{
5837 struct ftrace_buffer_info *info = file->private_data;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005838 struct trace_iterator *iter = &info->iter;
Jens Axboe35f3d142010-05-20 10:43:18 +02005839 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5840 struct page *pages_def[PIPE_DEF_BUFFERS];
Steven Rostedt2cadf912008-12-01 22:20:19 -05005841 struct splice_pipe_desc spd = {
Jens Axboe35f3d142010-05-20 10:43:18 +02005842 .pages = pages_def,
5843 .partial = partial_def,
Eric Dumazet047fe362012-06-12 15:24:40 +02005844 .nr_pages_max = PIPE_DEF_BUFFERS,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005845 .flags = flags,
5846 .ops = &buffer_pipe_buf_ops,
5847 .spd_release = buffer_spd_release,
5848 };
5849 struct buffer_ref *ref;
Steven Rostedt93459c62009-04-29 00:23:13 -04005850 int entries, size, i;
Rabin Vincent07906da2014-11-06 22:26:07 +01005851 ssize_t ret = 0;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005852
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005853#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005854 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
5855 return -EBUSY;
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005856#endif
5857
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005858 if (*ppos & (PAGE_SIZE - 1))
5859 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005860
5861 if (len & (PAGE_SIZE - 1)) {
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005862 if (len < PAGE_SIZE)
5863 return -EINVAL;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005864 len &= PAGE_MASK;
5865 }
5866
Al Viroaad426c2016-09-17 18:31:46 -04005867 if (splice_grow_spd(pipe, &spd))
5868 return -ENOMEM;
5869
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005870 again:
5871 trace_access_lock(iter->cpu_file);
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005872 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt93459c62009-04-29 00:23:13 -04005873
Al Viroa786c062014-04-11 12:01:03 -04005874 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
Steven Rostedt2cadf912008-12-01 22:20:19 -05005875 struct page *page;
5876 int r;
5877
5878 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
Rabin Vincent07906da2014-11-06 22:26:07 +01005879 if (!ref) {
5880 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005881 break;
Rabin Vincent07906da2014-11-06 22:26:07 +01005882 }
Steven Rostedt2cadf912008-12-01 22:20:19 -05005883
Steven Rostedt7267fa62009-04-29 00:16:21 -04005884 ref->ref = 1;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005885 ref->buffer = iter->trace_buffer->buffer;
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005886 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005887 if (!ref->page) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005888 ret = -ENOMEM;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005889 kfree(ref);
5890 break;
5891 }
5892
5893 r = ring_buffer_read_page(ref->buffer, &ref->page,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005894 len, iter->cpu_file, 1);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005895 if (r < 0) {
Vaibhav Nagarnaik7ea59062011-05-03 17:56:42 -07005896 ring_buffer_free_read_page(ref->buffer, ref->page);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005897 kfree(ref);
5898 break;
5899 }
5900
5901 /*
5902 * zero out any left over data, this is going to
5903 * user land.
5904 */
5905 size = ring_buffer_page_len(ref->page);
5906 if (size < PAGE_SIZE)
5907 memset(ref->page + size, 0, PAGE_SIZE - size);
5908
5909 page = virt_to_page(ref->page);
5910
5911 spd.pages[i] = page;
5912 spd.partial[i].len = PAGE_SIZE;
5913 spd.partial[i].offset = 0;
5914 spd.partial[i].private = (unsigned long)ref;
5915 spd.nr_pages++;
Lai Jiangshan93cfb3c2009-04-02 15:17:08 +08005916 *ppos += PAGE_SIZE;
Steven Rostedt93459c62009-04-29 00:23:13 -04005917
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005918 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005919 }
5920
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005921 trace_access_unlock(iter->cpu_file);
Steven Rostedt2cadf912008-12-01 22:20:19 -05005922 spd.nr_pages = i;
5923
5924 /* did we read anything? */
5925 if (!spd.nr_pages) {
Rabin Vincent07906da2014-11-06 22:26:07 +01005926 if (ret)
Al Viroaad426c2016-09-17 18:31:46 -04005927 goto out;
Rabin Vincent07906da2014-11-06 22:26:07 +01005928
Al Viroaad426c2016-09-17 18:31:46 -04005929 ret = -EAGAIN;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005930 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
Al Viroaad426c2016-09-17 18:31:46 -04005931 goto out;
Steven Rostedt (Red Hat)d716ff72014-12-15 22:31:07 -05005932
Rabin Vincente30f53a2014-11-10 19:46:34 +01005933 ret = wait_on_pipe(iter, true);
Steven Rostedt (Red Hat)8b8b3682014-06-10 09:46:00 -04005934 if (ret)
Al Viroaad426c2016-09-17 18:31:46 -04005935 goto out;
Rabin Vincente30f53a2014-11-10 19:46:34 +01005936
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005937 goto again;
Steven Rostedt2cadf912008-12-01 22:20:19 -05005938 }
5939
5940 ret = splice_to_pipe(pipe, &spd);
Al Viroaad426c2016-09-17 18:31:46 -04005941out:
Eric Dumazet047fe362012-06-12 15:24:40 +02005942 splice_shrink_spd(&spd);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05005943
Steven Rostedt2cadf912008-12-01 22:20:19 -05005944 return ret;
5945}
5946
5947static const struct file_operations tracing_buffers_fops = {
5948 .open = tracing_buffers_open,
5949 .read = tracing_buffers_read,
Steven Rostedtcc60cdc2013-02-28 09:17:16 -05005950 .poll = tracing_buffers_poll,
Steven Rostedt2cadf912008-12-01 22:20:19 -05005951 .release = tracing_buffers_release,
5952 .splice_read = tracing_buffers_splice_read,
5953 .llseek = no_llseek,
5954};
5955
Steven Rostedtc8d77182009-04-29 18:03:45 -04005956static ssize_t
5957tracing_stats_read(struct file *filp, char __user *ubuf,
5958 size_t count, loff_t *ppos)
5959{
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005960 struct inode *inode = file_inode(filp);
5961 struct trace_array *tr = inode->i_private;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005962 struct trace_buffer *trace_buf = &tr->trace_buffer;
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02005963 int cpu = tracing_get_cpu(inode);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005964 struct trace_seq *s;
5965 unsigned long cnt;
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005966 unsigned long long t;
5967 unsigned long usec_rem;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005968
Li Zefane4f2d102009-06-15 10:57:28 +08005969 s = kmalloc(sizeof(*s), GFP_KERNEL);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005970 if (!s)
Roel Kluina6463652009-11-11 22:26:35 +01005971 return -ENOMEM;
Steven Rostedtc8d77182009-04-29 18:03:45 -04005972
5973 trace_seq_init(s);
5974
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005975 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005976 trace_seq_printf(s, "entries: %ld\n", cnt);
5977
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005978 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005979 trace_seq_printf(s, "overrun: %ld\n", cnt);
5980
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005981 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
Steven Rostedtc8d77182009-04-29 18:03:45 -04005982 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
5983
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005984 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005985 trace_seq_printf(s, "bytes: %ld\n", cnt);
5986
Yoshihiro YUNOMAE58e8eed2013-04-23 10:32:39 +09005987 if (trace_clocks[tr->clock_id].in_ns) {
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005988 /* local or global for trace_clock */
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005989 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005990 usec_rem = do_div(t, USEC_PER_SEC);
5991 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
5992 t, usec_rem);
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07005993
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05005994 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08005995 usec_rem = do_div(t, USEC_PER_SEC);
5996 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
5997 } else {
5998 /* counter or tsc mode for trace_clock */
5999 trace_seq_printf(s, "oldest event ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006000 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006001
6002 trace_seq_printf(s, "now ts: %llu\n",
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006003 ring_buffer_time_stamp(trace_buf->buffer, cpu));
Yoshihiro YUNOMAE11043d82012-11-13 12:18:23 -08006004 }
Vaibhav Nagarnaikc64e1482011-08-16 14:46:16 -07006005
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006006 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
Slava Pestov884bfe82011-07-15 14:23:58 -07006007 trace_seq_printf(s, "dropped events: %ld\n", cnt);
6008
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006009 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
Steven Rostedt (Red Hat)ad964702013-01-29 17:45:49 -05006010 trace_seq_printf(s, "read events: %ld\n", cnt);
6011
Steven Rostedt (Red Hat)5ac483782014-11-14 15:49:41 -05006012 count = simple_read_from_buffer(ubuf, count, ppos,
6013 s->buffer, trace_seq_used(s));
Steven Rostedtc8d77182009-04-29 18:03:45 -04006014
6015 kfree(s);
6016
6017 return count;
6018}
6019
6020static const struct file_operations tracing_stats_fops = {
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006021 .open = tracing_open_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006022 .read = tracing_stats_read,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006023 .llseek = generic_file_llseek,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006024 .release = tracing_release_generic_tr,
Steven Rostedtc8d77182009-04-29 18:03:45 -04006025};
6026
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006027#ifdef CONFIG_DYNAMIC_FTRACE
6028
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006029int __weak ftrace_arch_read_dyn_info(char *buf, int size)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006030{
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006031 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006032}
6033
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006034static ssize_t
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006035tracing_read_dyn_info(struct file *filp, char __user *ubuf,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006036 size_t cnt, loff_t *ppos)
6037{
Steven Rostedta26a2a22008-10-31 00:03:22 -04006038 static char ftrace_dyn_info_buffer[1024];
6039 static DEFINE_MUTEX(dyn_info_mutex);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006040 unsigned long *p = filp->private_data;
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006041 char *buf = ftrace_dyn_info_buffer;
Steven Rostedta26a2a22008-10-31 00:03:22 -04006042 int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006043 int r;
6044
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006045 mutex_lock(&dyn_info_mutex);
6046 r = sprintf(buf, "%ld ", *p);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006047
Steven Rostedta26a2a22008-10-31 00:03:22 -04006048 r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006049 buf[r++] = '\n';
6050
6051 r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6052
6053 mutex_unlock(&dyn_info_mutex);
6054
6055 return r;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006056}
6057
Steven Rostedt5e2336a2009-03-05 21:44:55 -05006058static const struct file_operations tracing_dyn_info_fops = {
Ingo Molnar4bf39a92008-05-12 21:20:46 +02006059 .open = tracing_open_generic,
Steven Rostedtb807c3d2008-10-30 16:08:33 -04006060 .read = tracing_read_dyn_info,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006061 .llseek = generic_file_llseek,
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006062};
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006063#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006064
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006065#if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
6066static void
6067ftrace_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006068{
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006069 tracing_snapshot();
6070}
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006071
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006072static void
6073ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip, void **data)
6074{
6075 unsigned long *count = (long *)data;
6076
6077 if (!*count)
6078 return;
6079
6080 if (*count != -1)
6081 (*count)--;
6082
6083 tracing_snapshot();
6084}
6085
6086static int
6087ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
6088 struct ftrace_probe_ops *ops, void *data)
6089{
6090 long count = (long)data;
6091
6092 seq_printf(m, "%ps:", (void *)ip);
6093
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006094 seq_puts(m, "snapshot");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006095
6096 if (count == -1)
Rasmus Villemoesfa6f0cc2014-11-08 21:42:10 +01006097 seq_puts(m, ":unlimited\n");
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006098 else
6099 seq_printf(m, ":count=%ld\n", count);
6100
6101 return 0;
6102}
6103
6104static struct ftrace_probe_ops snapshot_probe_ops = {
6105 .func = ftrace_snapshot,
6106 .print = ftrace_snapshot_print,
6107};
6108
6109static struct ftrace_probe_ops snapshot_count_probe_ops = {
6110 .func = ftrace_count_snapshot,
6111 .print = ftrace_snapshot_print,
6112};
6113
6114static int
6115ftrace_trace_snapshot_callback(struct ftrace_hash *hash,
6116 char *glob, char *cmd, char *param, int enable)
6117{
6118 struct ftrace_probe_ops *ops;
6119 void *count = (void *)-1;
6120 char *number;
6121 int ret;
6122
6123 /* hash funcs only work with set_ftrace_filter */
6124 if (!enable)
6125 return -EINVAL;
6126
6127 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
6128
6129 if (glob[0] == '!') {
6130 unregister_ftrace_function_probe_func(glob+1, ops);
6131 return 0;
6132 }
6133
6134 if (!param)
6135 goto out_reg;
6136
6137 number = strsep(&param, ":");
6138
6139 if (!strlen(number))
6140 goto out_reg;
6141
6142 /*
6143 * We use the callback data field (which is a pointer)
6144 * as our counter.
6145 */
6146 ret = kstrtoul(number, 0, (unsigned long *)&count);
6147 if (ret)
6148 return ret;
6149
6150 out_reg:
6151 ret = register_ftrace_function_probe(glob, ops, count);
6152
6153 if (ret >= 0)
6154 alloc_snapshot(&global_trace);
6155
6156 return ret < 0 ? ret : 0;
6157}
6158
6159static struct ftrace_func_command ftrace_snapshot_cmd = {
6160 .name = "snapshot",
6161 .func = ftrace_trace_snapshot_callback,
6162};
6163
Tom Zanussi38de93a2013-10-24 08:34:18 -05006164static __init int register_snapshot_cmd(void)
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006165{
6166 return register_ftrace_command(&ftrace_snapshot_cmd);
6167}
6168#else
Tom Zanussi38de93a2013-10-24 08:34:18 -05006169static inline __init int register_snapshot_cmd(void) { return 0; }
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04006170#endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006171
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006172static struct dentry *tracing_get_dentry(struct trace_array *tr)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006173{
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006174 if (WARN_ON(!tr->dir))
6175 return ERR_PTR(-ENODEV);
6176
6177 /* Top directory uses NULL as the parent */
6178 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
6179 return NULL;
6180
6181 /* All sub buffers have a descriptor */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006182 return tr->dir;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02006183}
6184
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006185static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
6186{
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006187 struct dentry *d_tracer;
6188
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006189 if (tr->percpu_dir)
6190 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006191
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006192 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006193 if (IS_ERR(d_tracer))
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006194 return NULL;
6195
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006196 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006197
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006198 WARN_ONCE(!tr->percpu_dir,
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006199 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006200
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006201 return tr->percpu_dir;
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006202}
6203
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006204static struct dentry *
6205trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
6206 void *data, long cpu, const struct file_operations *fops)
6207{
6208 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
6209
6210 if (ret) /* See tracing_get_cpu() */
David Howells7682c912015-03-17 22:26:16 +00006211 d_inode(ret)->i_cdev = (void *)(cpu + 1);
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006212 return ret;
6213}
6214
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006215static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006216tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006217{
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006218 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006219 struct dentry *d_cpu;
Steven Rostedtdd49a382010-10-20 21:51:26 -04006220 char cpu_dir[30]; /* 30 characters should be more than enough */
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006221
Namhyung Kim0a3d7ce2012-04-23 10:11:57 +09006222 if (!d_percpu)
6223 return;
6224
Steven Rostedtdd49a382010-10-20 21:51:26 -04006225 snprintf(cpu_dir, 30, "cpu%ld", cpu);
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006226 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006227 if (!d_cpu) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006228 pr_warning("Could not create tracefs '%s' entry\n", cpu_dir);
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006229 return;
6230 }
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006231
Frederic Weisbecker8656e7a2009-02-26 00:41:38 +01006232 /* per cpu trace_pipe */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006233 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
Oleg Nesterov15544202013-07-23 17:25:57 +02006234 tr, cpu, &tracing_pipe_fops);
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006235
6236 /* per cpu trace */
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006237 trace_create_cpu_file("trace", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006238 tr, cpu, &tracing_fops);
Steven Rostedt7f96f932009-03-13 00:37:42 -04006239
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006240 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006241 tr, cpu, &tracing_buffers_fops);
Steven Rostedtc8d77182009-04-29 18:03:45 -04006242
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006243 trace_create_cpu_file("stats", 0444, d_cpu,
Oleg Nesterov4d3435b2013-07-23 17:26:03 +02006244 tr, cpu, &tracing_stats_fops);
Vaibhav Nagarnaik438ced12012-02-02 12:00:41 -08006245
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006246 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006247 tr, cpu, &tracing_entries_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006248
6249#ifdef CONFIG_TRACER_SNAPSHOT
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006250 trace_create_cpu_file("snapshot", 0644, d_cpu,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006251 tr, cpu, &snapshot_fops);
Steven Rostedt (Red Hat)6de58e62013-03-05 16:18:16 -05006252
Oleg Nesterov649e9c72013-07-23 17:25:54 +02006253 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
Oleg Nesterov46ef2be2013-07-23 17:26:00 +02006254 tr, cpu, &snapshot_raw_fops);
Steven Rostedt (Red Hat)f1affca2013-03-05 14:35:11 -05006255#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01006256}
6257
Steven Rostedt60a11772008-05-12 21:20:44 +02006258#ifdef CONFIG_FTRACE_SELFTEST
6259/* Let selftest have access to static functions in this file */
6260#include "trace_selftest.c"
6261#endif
6262
Steven Rostedt577b7852009-02-26 23:43:05 -05006263static ssize_t
6264trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
6265 loff_t *ppos)
6266{
6267 struct trace_option_dentry *topt = filp->private_data;
6268 char *buf;
6269
6270 if (topt->flags->val & topt->opt->bit)
6271 buf = "1\n";
6272 else
6273 buf = "0\n";
6274
6275 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6276}
6277
6278static ssize_t
6279trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
6280 loff_t *ppos)
6281{
6282 struct trace_option_dentry *topt = filp->private_data;
6283 unsigned long val;
Steven Rostedt577b7852009-02-26 23:43:05 -05006284 int ret;
6285
Peter Huewe22fe9b52011-06-07 21:58:27 +02006286 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6287 if (ret)
Steven Rostedt577b7852009-02-26 23:43:05 -05006288 return ret;
6289
Li Zefan8d18eaa2009-12-08 11:17:06 +08006290 if (val != 0 && val != 1)
Steven Rostedt577b7852009-02-26 23:43:05 -05006291 return -EINVAL;
Li Zefan8d18eaa2009-12-08 11:17:06 +08006292
6293 if (!!(topt->flags->val & topt->opt->bit) != val) {
6294 mutex_lock(&trace_types_lock);
Steven Rostedt (Red Hat)8c1a49a2014-01-10 11:13:54 -05006295 ret = __set_tracer_option(topt->tr, topt->flags,
Steven Rostedtc757bea2009-12-21 22:35:16 -05006296 topt->opt, !val);
Li Zefan8d18eaa2009-12-08 11:17:06 +08006297 mutex_unlock(&trace_types_lock);
6298 if (ret)
6299 return ret;
Steven Rostedt577b7852009-02-26 23:43:05 -05006300 }
6301
6302 *ppos += cnt;
6303
6304 return cnt;
6305}
6306
6307
6308static const struct file_operations trace_options_fops = {
6309 .open = tracing_open_generic,
6310 .read = trace_options_read,
6311 .write = trace_options_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006312 .llseek = generic_file_llseek,
Steven Rostedt577b7852009-02-26 23:43:05 -05006313};
6314
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006315/*
6316 * In order to pass in both the trace_array descriptor as well as the index
6317 * to the flag that the trace option file represents, the trace_array
6318 * has a character array of trace_flags_index[], which holds the index
6319 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
6320 * The address of this character array is passed to the flag option file
6321 * read/write callbacks.
6322 *
6323 * In order to extract both the index and the trace_array descriptor,
6324 * get_tr_index() uses the following algorithm.
6325 *
6326 * idx = *ptr;
6327 *
6328 * As the pointer itself contains the address of the index (remember
6329 * index[1] == 1).
6330 *
6331 * Then to get the trace_array descriptor, by subtracting that index
6332 * from the ptr, we get to the start of the index itself.
6333 *
6334 * ptr - idx == &index[0]
6335 *
6336 * Then a simple container_of() from that pointer gets us to the
6337 * trace_array descriptor.
6338 */
6339static void get_tr_index(void *data, struct trace_array **ptr,
6340 unsigned int *pindex)
6341{
6342 *pindex = *(unsigned char *)data;
6343
6344 *ptr = container_of(data - *pindex, struct trace_array,
6345 trace_flags_index);
6346}
6347
Steven Rostedta8259072009-02-26 22:19:12 -05006348static ssize_t
6349trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
6350 loff_t *ppos)
6351{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006352 void *tr_index = filp->private_data;
6353 struct trace_array *tr;
6354 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006355 char *buf;
6356
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006357 get_tr_index(tr_index, &tr, &index);
6358
6359 if (tr->trace_flags & (1 << index))
Steven Rostedta8259072009-02-26 22:19:12 -05006360 buf = "1\n";
6361 else
6362 buf = "0\n";
6363
6364 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
6365}
6366
6367static ssize_t
6368trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
6369 loff_t *ppos)
6370{
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006371 void *tr_index = filp->private_data;
6372 struct trace_array *tr;
6373 unsigned int index;
Steven Rostedta8259072009-02-26 22:19:12 -05006374 unsigned long val;
6375 int ret;
6376
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006377 get_tr_index(tr_index, &tr, &index);
6378
Peter Huewe22fe9b52011-06-07 21:58:27 +02006379 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6380 if (ret)
Steven Rostedta8259072009-02-26 22:19:12 -05006381 return ret;
6382
Zhaoleif2d84b62009-08-07 18:55:48 +08006383 if (val != 0 && val != 1)
Steven Rostedta8259072009-02-26 22:19:12 -05006384 return -EINVAL;
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006385
6386 mutex_lock(&trace_types_lock);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006387 ret = set_tracer_flag(tr, 1 << index, val);
Steven Rostedt (Red Hat)69d34da2013-03-14 13:50:56 -04006388 mutex_unlock(&trace_types_lock);
Steven Rostedta8259072009-02-26 22:19:12 -05006389
Steven Rostedt (Red Hat)613f04a2013-03-14 15:03:53 -04006390 if (ret < 0)
6391 return ret;
6392
Steven Rostedta8259072009-02-26 22:19:12 -05006393 *ppos += cnt;
6394
6395 return cnt;
6396}
6397
Steven Rostedta8259072009-02-26 22:19:12 -05006398static const struct file_operations trace_options_core_fops = {
6399 .open = tracing_open_generic,
6400 .read = trace_options_core_read,
6401 .write = trace_options_core_write,
Arnd Bergmannb4447862010-07-07 23:40:11 +02006402 .llseek = generic_file_llseek,
Steven Rostedta8259072009-02-26 22:19:12 -05006403};
6404
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006405struct dentry *trace_create_file(const char *name,
Al Virof4ae40a2011-07-24 04:33:43 -04006406 umode_t mode,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006407 struct dentry *parent,
6408 void *data,
6409 const struct file_operations *fops)
6410{
6411 struct dentry *ret;
6412
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006413 ret = tracefs_create_file(name, mode, parent, data, fops);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006414 if (!ret)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006415 pr_warning("Could not create tracefs '%s' entry\n", name);
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006416
6417 return ret;
6418}
6419
6420
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006421static struct dentry *trace_options_init_dentry(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006422{
6423 struct dentry *d_tracer;
Steven Rostedta8259072009-02-26 22:19:12 -05006424
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006425 if (tr->options)
6426 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006427
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006428 d_tracer = tracing_get_dentry(tr);
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05006429 if (IS_ERR(d_tracer))
Steven Rostedta8259072009-02-26 22:19:12 -05006430 return NULL;
6431
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006432 tr->options = tracefs_create_dir("options", d_tracer);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006433 if (!tr->options) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006434 pr_warning("Could not create tracefs directory 'options'\n");
Steven Rostedta8259072009-02-26 22:19:12 -05006435 return NULL;
6436 }
6437
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006438 return tr->options;
Steven Rostedta8259072009-02-26 22:19:12 -05006439}
6440
Steven Rostedt577b7852009-02-26 23:43:05 -05006441static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006442create_trace_option_file(struct trace_array *tr,
6443 struct trace_option_dentry *topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006444 struct tracer_flags *flags,
6445 struct tracer_opt *opt)
6446{
6447 struct dentry *t_options;
Steven Rostedt577b7852009-02-26 23:43:05 -05006448
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006449 t_options = trace_options_init_dentry(tr);
Steven Rostedt577b7852009-02-26 23:43:05 -05006450 if (!t_options)
6451 return;
6452
6453 topt->flags = flags;
6454 topt->opt = opt;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006455 topt->tr = tr;
Steven Rostedt577b7852009-02-26 23:43:05 -05006456
Frederic Weisbecker5452af62009-03-27 00:25:38 +01006457 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
Steven Rostedt577b7852009-02-26 23:43:05 -05006458 &trace_options_fops);
6459
Steven Rostedt577b7852009-02-26 23:43:05 -05006460}
6461
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006462static void
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006463create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
Steven Rostedt577b7852009-02-26 23:43:05 -05006464{
6465 struct trace_option_dentry *topts;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006466 struct trace_options *tr_topts;
Steven Rostedt577b7852009-02-26 23:43:05 -05006467 struct tracer_flags *flags;
6468 struct tracer_opt *opts;
6469 int cnt;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006470 int i;
Steven Rostedt577b7852009-02-26 23:43:05 -05006471
6472 if (!tracer)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006473 return;
Steven Rostedt577b7852009-02-26 23:43:05 -05006474
6475 flags = tracer->flags;
6476
6477 if (!flags || !flags->opts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006478 return;
6479
6480 /*
6481 * If this is an instance, only create flags for tracers
6482 * the instance may have.
6483 */
6484 if (!trace_ok_for_array(tracer, tr))
6485 return;
6486
6487 for (i = 0; i < tr->nr_topts; i++) {
6488 /*
6489 * Check if these flags have already been added.
6490 * Some tracers share flags.
6491 */
6492 if (tr->topts[i].tracer->flags == tracer->flags)
6493 return;
6494 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006495
6496 opts = flags->opts;
6497
6498 for (cnt = 0; opts[cnt].name; cnt++)
6499 ;
6500
Steven Rostedt0cfe8242009-02-27 10:51:10 -05006501 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
Steven Rostedt577b7852009-02-26 23:43:05 -05006502 if (!topts)
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006503 return;
6504
6505 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
6506 GFP_KERNEL);
6507 if (!tr_topts) {
6508 kfree(topts);
6509 return;
6510 }
6511
6512 tr->topts = tr_topts;
6513 tr->topts[tr->nr_topts].tracer = tracer;
6514 tr->topts[tr->nr_topts].topts = topts;
6515 tr->nr_topts++;
Steven Rostedt577b7852009-02-26 23:43:05 -05006516
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006517 for (cnt = 0; opts[cnt].name; cnt++) {
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006518 create_trace_option_file(tr, &topts[cnt], flags,
Steven Rostedt577b7852009-02-26 23:43:05 -05006519 &opts[cnt]);
Steven Rostedt (Red Hat)41d9c0b2015-09-29 17:31:55 -04006520 WARN_ONCE(topts[cnt].entry == NULL,
6521 "Failed to create trace option: %s",
6522 opts[cnt].name);
6523 }
Steven Rostedt577b7852009-02-26 23:43:05 -05006524}
6525
Steven Rostedta8259072009-02-26 22:19:12 -05006526static struct dentry *
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006527create_trace_option_core_file(struct trace_array *tr,
6528 const char *option, long index)
Steven Rostedta8259072009-02-26 22:19:12 -05006529{
6530 struct dentry *t_options;
Steven Rostedta8259072009-02-26 22:19:12 -05006531
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006532 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006533 if (!t_options)
6534 return NULL;
6535
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006536 return trace_create_file(option, 0644, t_options,
6537 (void *)&tr->trace_flags_index[index],
6538 &trace_options_core_fops);
Steven Rostedta8259072009-02-26 22:19:12 -05006539}
6540
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006541static void create_trace_options_dir(struct trace_array *tr)
Steven Rostedta8259072009-02-26 22:19:12 -05006542{
6543 struct dentry *t_options;
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006544 bool top_level = tr == &global_trace;
Steven Rostedta8259072009-02-26 22:19:12 -05006545 int i;
6546
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006547 t_options = trace_options_init_dentry(tr);
Steven Rostedta8259072009-02-26 22:19:12 -05006548 if (!t_options)
6549 return;
6550
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006551 for (i = 0; trace_options[i]; i++) {
6552 if (top_level ||
6553 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
6554 create_trace_option_core_file(tr, trace_options[i], i);
6555 }
Steven Rostedta8259072009-02-26 22:19:12 -05006556}
6557
Steven Rostedt499e5472012-02-22 15:50:28 -05006558static ssize_t
6559rb_simple_read(struct file *filp, char __user *ubuf,
6560 size_t cnt, loff_t *ppos)
6561{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006562 struct trace_array *tr = filp->private_data;
Steven Rostedt499e5472012-02-22 15:50:28 -05006563 char buf[64];
6564 int r;
6565
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006566 r = tracer_tracing_is_on(tr);
Steven Rostedt499e5472012-02-22 15:50:28 -05006567 r = sprintf(buf, "%d\n", r);
6568
6569 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6570}
6571
6572static ssize_t
6573rb_simple_write(struct file *filp, const char __user *ubuf,
6574 size_t cnt, loff_t *ppos)
6575{
Steven Rostedt348f0fc2012-04-16 15:41:28 -04006576 struct trace_array *tr = filp->private_data;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05006577 struct ring_buffer *buffer = tr->trace_buffer.buffer;
Steven Rostedt499e5472012-02-22 15:50:28 -05006578 unsigned long val;
6579 int ret;
6580
6581 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6582 if (ret)
6583 return ret;
6584
6585 if (buffer) {
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006586 mutex_lock(&trace_types_lock);
6587 if (val) {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006588 tracer_tracing_on(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006589 if (tr->current_trace->start)
6590 tr->current_trace->start(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006591 } else {
Steven Rostedt (Red Hat)10246fa2013-07-01 15:58:24 -04006592 tracer_tracing_off(tr);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006593 if (tr->current_trace->stop)
6594 tr->current_trace->stop(tr);
Steven Rostedt2df8f8a2013-01-11 16:14:10 -05006595 }
6596 mutex_unlock(&trace_types_lock);
Steven Rostedt499e5472012-02-22 15:50:28 -05006597 }
6598
6599 (*ppos)++;
6600
6601 return cnt;
6602}
6603
6604static const struct file_operations rb_simple_fops = {
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006605 .open = tracing_open_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006606 .read = rb_simple_read,
6607 .write = rb_simple_write,
Steven Rostedt (Red Hat)7b85af62013-07-01 23:34:22 -04006608 .release = tracing_release_generic_tr,
Steven Rostedt499e5472012-02-22 15:50:28 -05006609 .llseek = default_llseek,
6610};
6611
Steven Rostedt277ba042012-08-03 16:10:49 -04006612struct dentry *trace_instance_dir;
6613
6614static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006615init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
Steven Rostedt277ba042012-08-03 16:10:49 -04006616
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006617static int
6618allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
Steven Rostedt277ba042012-08-03 16:10:49 -04006619{
6620 enum ring_buffer_flags rb_flags;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006621
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006622 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006623
Steven Rostedt (Red Hat)dced3412014-01-14 10:19:46 -05006624 buf->tr = tr;
6625
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006626 buf->buffer = ring_buffer_alloc(size, rb_flags);
6627 if (!buf->buffer)
6628 return -ENOMEM;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006629
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006630 buf->data = alloc_percpu(struct trace_array_cpu);
6631 if (!buf->data) {
6632 ring_buffer_free(buf->buffer);
6633 return -ENOMEM;
6634 }
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006635
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006636 /* Allocate the first page for all buffers */
6637 set_buffer_entries(&tr->trace_buffer,
6638 ring_buffer_size(tr->trace_buffer.buffer, 0));
6639
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006640 return 0;
6641}
6642
6643static int allocate_trace_buffers(struct trace_array *tr, int size)
6644{
6645 int ret;
6646
6647 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
6648 if (ret)
6649 return ret;
6650
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006651#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006652 ret = allocate_trace_buffer(tr, &tr->max_buffer,
6653 allocate_snapshot ? size : 1);
6654 if (WARN_ON(ret)) {
6655 ring_buffer_free(tr->trace_buffer.buffer);
6656 free_percpu(tr->trace_buffer.data);
6657 return -ENOMEM;
6658 }
6659 tr->allocated_snapshot = allocate_snapshot;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006660
Steven Rostedt (Red Hat)55034cd2013-03-07 22:48:09 -05006661 /*
6662 * Only the top level trace array gets its snapshot allocated
6663 * from the kernel command line.
6664 */
6665 allocate_snapshot = false;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006666#endif
6667 return 0;
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006668}
6669
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006670static void free_trace_buffer(struct trace_buffer *buf)
6671{
6672 if (buf->buffer) {
6673 ring_buffer_free(buf->buffer);
6674 buf->buffer = NULL;
6675 free_percpu(buf->data);
6676 buf->data = NULL;
6677 }
6678}
6679
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006680static void free_trace_buffers(struct trace_array *tr)
6681{
6682 if (!tr)
6683 return;
6684
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006685 free_trace_buffer(&tr->trace_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006686
6687#ifdef CONFIG_TRACER_MAX_TRACE
Steven Rostedt (Red Hat)f0b70cc2014-06-10 12:06:30 -04006688 free_trace_buffer(&tr->max_buffer);
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006689#endif
6690}
6691
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006692static void init_trace_flags_index(struct trace_array *tr)
6693{
6694 int i;
6695
6696 /* Used by the trace options files */
6697 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
6698 tr->trace_flags_index[i] = i;
6699}
6700
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006701static void __update_tracer_options(struct trace_array *tr)
6702{
6703 struct tracer *t;
6704
6705 for (t = trace_types; t; t = t->next)
6706 add_tracer_options(tr, t);
6707}
6708
6709static void update_tracer_options(struct trace_array *tr)
6710{
6711 mutex_lock(&trace_types_lock);
6712 __update_tracer_options(tr);
6713 mutex_unlock(&trace_types_lock);
6714}
6715
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006716static int instance_mkdir(const char *name)
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006717{
Steven Rostedt277ba042012-08-03 16:10:49 -04006718 struct trace_array *tr;
6719 int ret;
Steven Rostedt277ba042012-08-03 16:10:49 -04006720
6721 mutex_lock(&trace_types_lock);
6722
6723 ret = -EEXIST;
6724 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6725 if (tr->name && strcmp(tr->name, name) == 0)
6726 goto out_unlock;
6727 }
6728
6729 ret = -ENOMEM;
6730 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
6731 if (!tr)
6732 goto out_unlock;
6733
6734 tr->name = kstrdup(name, GFP_KERNEL);
6735 if (!tr->name)
6736 goto out_free_tr;
6737
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006738 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
6739 goto out_free_tr;
6740
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04006741 tr->trace_flags = global_trace.trace_flags;
6742
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006743 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
6744
Steven Rostedt277ba042012-08-03 16:10:49 -04006745 raw_spin_lock_init(&tr->start_lock);
6746
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05006747 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
6748
Steven Rostedt277ba042012-08-03 16:10:49 -04006749 tr->current_trace = &nop_trace;
6750
6751 INIT_LIST_HEAD(&tr->systems);
6752 INIT_LIST_HEAD(&tr->events);
6753
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05006754 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
Steven Rostedt277ba042012-08-03 16:10:49 -04006755 goto out_free_tr;
6756
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006757 tr->dir = tracefs_create_dir(name, trace_instance_dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006758 if (!tr->dir)
6759 goto out_free_tr;
6760
6761 ret = event_trace_add_tracer(tr->dir, tr);
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006762 if (ret) {
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006763 tracefs_remove_recursive(tr->dir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006764 goto out_free_tr;
Alexander Z Lam609e85a2013-07-10 17:34:34 -07006765 }
Steven Rostedt277ba042012-08-03 16:10:49 -04006766
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006767 init_tracer_tracefs(tr, tr->dir);
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04006768 init_trace_flags_index(tr);
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006769 __update_tracer_options(tr);
Steven Rostedt277ba042012-08-03 16:10:49 -04006770
6771 list_add(&tr->list, &ftrace_trace_arrays);
6772
6773 mutex_unlock(&trace_types_lock);
6774
6775 return 0;
6776
6777 out_free_tr:
Steven Rostedt (Red Hat)23aaa3c2014-06-06 00:01:46 -04006778 free_trace_buffers(tr);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006779 free_cpumask_var(tr->tracing_cpumask);
Steven Rostedt277ba042012-08-03 16:10:49 -04006780 kfree(tr->name);
6781 kfree(tr);
6782
6783 out_unlock:
6784 mutex_unlock(&trace_types_lock);
6785
6786 return ret;
6787
6788}
6789
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006790static int instance_rmdir(const char *name)
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006791{
6792 struct trace_array *tr;
6793 int found = 0;
6794 int ret;
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006795 int i;
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006796
6797 mutex_lock(&trace_types_lock);
6798
6799 ret = -ENODEV;
6800 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
6801 if (tr->name && strcmp(tr->name, name) == 0) {
6802 found = 1;
6803 break;
6804 }
6805 }
6806 if (!found)
6807 goto out_unlock;
6808
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006809 ret = -EBUSY;
Steven Rostedt (Red Hat)cf6ab6d2014-12-15 20:13:31 -05006810 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
Steven Rostedt (Red Hat)a695cb52013-03-06 15:27:24 -05006811 goto out_unlock;
6812
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006813 list_del(&tr->list);
6814
Steven Rostedt (Red Hat)6b450d22014-01-14 08:43:01 -05006815 tracing_set_nop(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006816 event_trace_del_tracer(tr);
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006817 ftrace_destroy_function_files(tr);
Jiaxing Wang681a4a22015-10-18 19:58:08 +08006818 tracefs_remove_recursive(tr->dir);
Steven Rostedt (Red Hat)a9fcaaa2014-06-06 23:17:28 -04006819 free_trace_buffers(tr);
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006820
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04006821 for (i = 0; i < tr->nr_topts; i++) {
6822 kfree(tr->topts[i].topts);
6823 }
6824 kfree(tr->topts);
6825
Steven Rostedt0c8916c2012-08-07 16:14:16 -04006826 kfree(tr->name);
6827 kfree(tr);
6828
6829 ret = 0;
6830
6831 out_unlock:
6832 mutex_unlock(&trace_types_lock);
6833
6834 return ret;
6835}
6836
Steven Rostedt277ba042012-08-03 16:10:49 -04006837static __init void create_trace_instances(struct dentry *d_tracer)
6838{
Steven Rostedt (Red Hat)eae47352015-01-21 10:01:39 -05006839 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
6840 instance_mkdir,
6841 instance_rmdir);
Steven Rostedt277ba042012-08-03 16:10:49 -04006842 if (WARN_ON(!trace_instance_dir))
6843 return;
Steven Rostedt277ba042012-08-03 16:10:49 -04006844}
6845
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006846static void
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006847init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006848{
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006849 int cpu;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006850
Steven Rostedt (Red Hat)607e2ea2013-11-06 22:42:48 -05006851 trace_create_file("available_tracers", 0444, d_tracer,
6852 tr, &show_traces_fops);
6853
6854 trace_create_file("current_tracer", 0644, d_tracer,
6855 tr, &set_tracer_fops);
6856
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07006857 trace_create_file("tracing_cpumask", 0644, d_tracer,
6858 tr, &tracing_cpumask_fops);
6859
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006860 trace_create_file("trace_options", 0644, d_tracer,
6861 tr, &tracing_iter_fops);
6862
6863 trace_create_file("trace", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006864 tr, &tracing_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006865
6866 trace_create_file("trace_pipe", 0444, d_tracer,
Oleg Nesterov15544202013-07-23 17:25:57 +02006867 tr, &tracing_pipe_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006868
6869 trace_create_file("buffer_size_kb", 0644, d_tracer,
Oleg Nesterov0bc392e2013-07-23 17:26:06 +02006870 tr, &tracing_entries_fops);
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006871
6872 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
6873 tr, &tracing_total_entries_fops);
6874
Wang YanQing238ae932013-05-26 16:52:01 +08006875 trace_create_file("free_buffer", 0200, d_tracer,
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006876 tr, &tracing_free_buffer_fops);
6877
6878 trace_create_file("trace_marker", 0220, d_tracer,
6879 tr, &tracing_mark_fops);
6880
Jamie Gennis6019e592012-11-21 15:04:25 -08006881 trace_create_file("saved_tgids", 0444, d_tracer,
6882 tr, &tracing_saved_tgids_fops);
6883
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006884 trace_create_file("trace_clock", 0644, d_tracer, tr,
6885 &trace_clock_fops);
6886
6887 trace_create_file("tracing_on", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006888 tr, &rb_simple_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006889
Steven Rostedt (Red Hat)16270142015-09-30 12:30:06 -04006890 create_trace_options_dir(tr);
6891
Steven Rostedt (Red Hat)6d9b3fa2014-01-14 11:28:38 -05006892#ifdef CONFIG_TRACER_MAX_TRACE
6893 trace_create_file("tracing_max_latency", 0644, d_tracer,
6894 &tr->max_latency, &tracing_max_lat_fops);
6895#endif
6896
Steven Rostedt (Red Hat)591dffd2014-01-10 16:17:45 -05006897 if (ftrace_create_function_files(tr, d_tracer))
6898 WARN(1, "Could not allocate function filter files");
6899
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006900#ifdef CONFIG_TRACER_SNAPSHOT
6901 trace_create_file("snapshot", 0644, d_tracer,
Oleg Nesterov6484c712013-07-23 17:26:10 +02006902 tr, &snapshot_fops);
Steven Rostedt (Red Hat)ce9bae52013-03-05 21:23:55 -05006903#endif
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006904
6905 for_each_tracing_cpu(cpu)
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006906 tracing_init_tracefs_percpu(tr, cpu);
Steven Rostedt (Red Hat)121aaee2013-03-05 21:52:25 -05006907
Steven Rostedt2b6080f2012-05-11 13:29:49 -04006908}
6909
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006910static struct vfsmount *trace_automount(void *ingore)
6911{
6912 struct vfsmount *mnt;
6913 struct file_system_type *type;
6914
6915 /*
6916 * To maintain backward compatibility for tools that mount
6917 * debugfs to get to the tracing facility, tracefs is automatically
6918 * mounted to the debugfs/tracing directory.
6919 */
6920 type = get_fs_type("tracefs");
6921 if (!type)
6922 return NULL;
6923 mnt = vfs_kern_mount(type, 0, "tracefs", NULL);
6924 put_filesystem(type);
6925 if (IS_ERR(mnt))
6926 return NULL;
6927 mntget(mnt);
6928
6929 return mnt;
6930}
6931
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006932/**
6933 * tracing_init_dentry - initialize top level trace array
6934 *
6935 * This is called when creating files or directories in the tracing
6936 * directory. It is called via fs_initcall() by any of the boot up code
6937 * and expects to return the dentry of the top level tracing directory.
6938 */
6939struct dentry *tracing_init_dentry(void)
6940{
6941 struct trace_array *tr = &global_trace;
6942
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006943 /* The top level trace array uses NULL as parent */
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006944 if (tr->dir)
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006945 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006946
Jiaxing Wang8b129192015-11-06 16:04:16 +08006947 if (WARN_ON(!tracefs_initialized()) ||
6948 (IS_ENABLED(CONFIG_DEBUG_FS) &&
6949 WARN_ON(!debugfs_initialized())))
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006950 return ERR_PTR(-ENODEV);
6951
Steven Rostedt (Red Hat)f76180b2015-01-20 15:48:46 -05006952 /*
6953 * As there may still be users that expect the tracing
6954 * files to exist in debugfs/tracing, we must automount
6955 * the tracefs file system there, so older tools still
6956 * work with the newer kerenl.
6957 */
6958 tr->dir = debugfs_create_automount("tracing", NULL,
6959 trace_automount, NULL);
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006960 if (!tr->dir) {
6961 pr_warn_once("Could not create debugfs directory 'tracing'\n");
6962 return ERR_PTR(-ENOMEM);
6963 }
6964
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05006965 return NULL;
Steven Rostedt (Red Hat)7eeafbc2015-01-26 21:00:48 -05006966}
6967
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006968extern struct trace_enum_map *__start_ftrace_enum_maps[];
6969extern struct trace_enum_map *__stop_ftrace_enum_maps[];
6970
6971static void __init trace_enum_init(void)
6972{
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006973 int len;
6974
6975 len = __stop_ftrace_enum_maps - __start_ftrace_enum_maps;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006976 trace_insert_enum_map(NULL, __start_ftrace_enum_maps, len);
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04006977}
6978
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006979#ifdef CONFIG_MODULES
6980static void trace_module_add_enums(struct module *mod)
6981{
6982 if (!mod->num_trace_enums)
6983 return;
6984
6985 /*
6986 * Modules with bad taint do not have events created, do
6987 * not bother with enums either.
6988 */
6989 if (trace_module_has_bad_taint(mod))
6990 return;
6991
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006992 trace_insert_enum_map(mod, mod->trace_enums, mod->num_trace_enums);
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04006993}
6994
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04006995#ifdef CONFIG_TRACE_ENUM_MAP_FILE
6996static void trace_module_remove_enums(struct module *mod)
6997{
6998 union trace_enum_map_item *map;
6999 union trace_enum_map_item **last = &trace_enum_maps;
7000
7001 if (!mod->num_trace_enums)
7002 return;
7003
7004 mutex_lock(&trace_enum_mutex);
7005
7006 map = trace_enum_maps;
7007
7008 while (map) {
7009 if (map->head.mod == mod)
7010 break;
7011 map = trace_enum_jmp_to_tail(map);
7012 last = &map->tail.next;
7013 map = map->tail.next;
7014 }
7015 if (!map)
7016 goto out;
7017
7018 *last = trace_enum_jmp_to_tail(map)->tail.next;
7019 kfree(map);
7020 out:
7021 mutex_unlock(&trace_enum_mutex);
7022}
7023#else
7024static inline void trace_module_remove_enums(struct module *mod) { }
7025#endif /* CONFIG_TRACE_ENUM_MAP_FILE */
7026
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007027static int trace_module_notify(struct notifier_block *self,
7028 unsigned long val, void *data)
7029{
7030 struct module *mod = data;
7031
7032 switch (val) {
7033 case MODULE_STATE_COMING:
7034 trace_module_add_enums(mod);
7035 break;
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007036 case MODULE_STATE_GOING:
7037 trace_module_remove_enums(mod);
7038 break;
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007039 }
7040
7041 return 0;
7042}
7043
7044static struct notifier_block trace_module_nb = {
7045 .notifier_call = trace_module_notify,
7046 .priority = 0,
7047};
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007048#endif /* CONFIG_MODULES */
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007049
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007050static __init int tracer_init_tracefs(void)
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007051{
7052 struct dentry *d_tracer;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007053
Lai Jiangshan7e53bd42010-01-06 20:08:50 +08007054 trace_access_lock_init();
7055
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007056 d_tracer = tracing_init_dentry();
Steven Rostedt (Red Hat)14a5ae42015-01-20 11:14:16 -05007057 if (IS_ERR(d_tracer))
Namhyung Kimed6f1c92013-04-10 09:18:12 +09007058 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007059
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007060 init_tracer_tracefs(&global_trace, d_tracer);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007061
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007062 trace_create_file("tracing_thresh", 0644, d_tracer,
Stanislav Fomichev6508fa72014-07-18 15:17:27 +04007063 &global_trace, &tracing_thresh_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007064
Li Zefan339ae5d2009-04-17 10:34:30 +08007065 trace_create_file("README", 0444, d_tracer,
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007066 NULL, &tracing_readme_fops);
Ingo Molnar7bd2f242008-05-12 21:20:45 +02007067
Avadh Patel69abe6a2009-04-10 16:04:48 -04007068 trace_create_file("saved_cmdlines", 0444, d_tracer,
7069 NULL, &tracing_saved_cmdlines_fops);
Pekka Paalanen5bf9a1e2008-09-16 22:06:42 +03007070
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007071 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
7072 NULL, &tracing_saved_cmdlines_size_fops);
7073
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007074 trace_enum_init();
7075
Steven Rostedt (Red Hat)98284132015-03-31 17:23:45 -04007076 trace_create_enum_file(d_tracer);
7077
Steven Rostedt (Red Hat)3673b8e2015-03-25 15:44:21 -04007078#ifdef CONFIG_MODULES
7079 register_module_notifier(&trace_module_nb);
7080#endif
7081
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007082#ifdef CONFIG_DYNAMIC_FTRACE
Frederic Weisbecker5452af62009-03-27 00:25:38 +01007083 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
7084 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007085#endif
Frederic Weisbeckerb04cc6b2009-02-25 03:22:28 +01007086
Steven Rostedt277ba042012-08-03 16:10:49 -04007087 create_trace_instances(d_tracer);
Hiraku Toyookadebdd572012-12-26 11:53:00 +09007088
Steven Rostedt (Red Hat)37aea982015-09-30 14:27:31 -04007089 update_tracer_options(&global_trace);
Steven Rostedt (Red Hat)09d23a12015-02-03 12:45:53 -05007090
Frédéric Weisbeckerb5ad3842008-09-23 11:34:32 +01007091 return 0;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007092}
7093
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007094static int trace_panic_handler(struct notifier_block *this,
7095 unsigned long event, void *unused)
7096{
Steven Rostedt944ac422008-10-23 19:26:08 -04007097 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007098 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007099 return NOTIFY_OK;
7100}
7101
7102static struct notifier_block trace_panic_notifier = {
7103 .notifier_call = trace_panic_handler,
7104 .next = NULL,
7105 .priority = 150 /* priority: INT_MAX >= x >= 0 */
7106};
7107
7108static int trace_die_handler(struct notifier_block *self,
7109 unsigned long val,
7110 void *data)
7111{
7112 switch (val) {
7113 case DIE_OOPS:
Steven Rostedt944ac422008-10-23 19:26:08 -04007114 if (ftrace_dump_on_oops)
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007115 ftrace_dump(ftrace_dump_on_oops);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007116 break;
7117 default:
7118 break;
7119 }
7120 return NOTIFY_OK;
7121}
7122
7123static struct notifier_block trace_die_notifier = {
7124 .notifier_call = trace_die_handler,
7125 .priority = 200
7126};
7127
7128/*
7129 * printk is set to max of 1024, we really don't need it that big.
7130 * Nothing should be printing 1000 characters anyway.
7131 */
7132#define TRACE_MAX_PRINT 1000
7133
7134/*
7135 * Define here KERN_TRACE so that we have one place to modify
7136 * it if we decide to change what log level the ftrace dump
7137 * should be at.
7138 */
Steven Rostedt428aee12009-01-14 12:24:42 -05007139#define KERN_TRACE KERN_EMERG
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007140
Jason Wessel955b61e2010-08-05 09:22:23 -05007141void
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007142trace_printk_seq(struct trace_seq *s)
7143{
7144 /* Probably should print a warning here. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007145 if (s->seq.len >= TRACE_MAX_PRINT)
7146 s->seq.len = TRACE_MAX_PRINT;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007147
Steven Rostedt (Red Hat)820b75f2014-11-19 10:56:41 -05007148 /*
7149 * More paranoid code. Although the buffer size is set to
7150 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
7151 * an extra layer of protection.
7152 */
7153 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
7154 s->seq.len = s->seq.size - 1;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007155
7156 /* should be zero ended, but we are paranoid. */
Steven Rostedt (Red Hat)3a161d92014-06-25 15:54:42 -04007157 s->buffer[s->seq.len] = 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007158
7159 printk(KERN_TRACE "%s", s->buffer);
7160
Steven Rostedtf9520752009-03-02 14:04:40 -05007161 trace_seq_init(s);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007162}
7163
Jason Wessel955b61e2010-08-05 09:22:23 -05007164void trace_init_global_iter(struct trace_iterator *iter)
7165{
7166 iter->tr = &global_trace;
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007167 iter->trace = iter->tr->current_trace;
Steven Rostedtae3b5092013-01-23 15:22:59 -05007168 iter->cpu_file = RING_BUFFER_ALL_CPUS;
Steven Rostedt (Red Hat)12883ef2013-03-05 09:24:35 -05007169 iter->trace_buffer = &global_trace.trace_buffer;
Cody P Schaferb2f974d2013-10-23 11:49:57 -07007170
7171 if (iter->trace && iter->trace->open)
7172 iter->trace->open(iter);
7173
7174 /* Annotate start of buffers if we had overruns */
7175 if (ring_buffer_overruns(iter->trace_buffer->buffer))
7176 iter->iter_flags |= TRACE_FILE_ANNOTATE;
7177
7178 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
7179 if (trace_clocks[iter->tr->clock_id].in_ns)
7180 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
Jason Wessel955b61e2010-08-05 09:22:23 -05007181}
7182
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007183void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007184{
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007185 /* use static because iter can be a bit big for the stack */
7186 static struct trace_iterator iter;
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007187 static atomic_t dump_running;
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007188 struct trace_array *tr = &global_trace;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007189 unsigned int old_userobj;
Steven Rostedtd7690412008-10-01 00:29:53 -04007190 unsigned long flags;
7191 int cnt = 0, cpu;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007192
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007193 /* Only allow one dump user at a time. */
7194 if (atomic_inc_return(&dump_running) != 1) {
7195 atomic_dec(&dump_running);
7196 return;
Steven Rostedte0a413f2011-09-29 21:26:16 -04007197 }
7198
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007199 /*
7200 * Always turn off tracing when we dump.
7201 * We don't need to show trace output of what happens
7202 * between multiple crashes.
7203 *
7204 * If the user does a sysrq-z, then they can re-enable
7205 * tracing with echo 1 > tracing_on.
7206 */
7207 tracing_off();
7208
7209 local_irq_save(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007210
Jovi Zhang38dbe0b2013-01-25 18:03:07 +08007211 /* Simulate the iterator */
Jason Wessel955b61e2010-08-05 09:22:23 -05007212 trace_init_global_iter(&iter);
7213
Steven Rostedtd7690412008-10-01 00:29:53 -04007214 for_each_tracing_cpu(cpu) {
Umesh Tiwari5e2d5ef2015-06-22 16:55:06 +05307215 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Steven Rostedtd7690412008-10-01 00:29:53 -04007216 }
7217
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007218 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007219
Török Edwinb54d3de2008-11-22 13:28:48 +02007220 /* don't look at user memory in panic mode */
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007221 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
Török Edwinb54d3de2008-11-22 13:28:48 +02007222
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007223 switch (oops_dump_mode) {
7224 case DUMP_ALL:
Steven Rostedtae3b5092013-01-23 15:22:59 -05007225 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007226 break;
7227 case DUMP_ORIG:
7228 iter.cpu_file = raw_smp_processor_id();
7229 break;
7230 case DUMP_NONE:
7231 goto out_enable;
7232 default:
7233 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
Steven Rostedtae3b5092013-01-23 15:22:59 -05007234 iter.cpu_file = RING_BUFFER_ALL_CPUS;
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007235 }
7236
7237 printk(KERN_TRACE "Dumping ftrace buffer:\n");
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007238
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007239 /* Did function tracer already get disabled? */
7240 if (ftrace_is_dead()) {
7241 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
7242 printk("# MAY BE MISSING FUNCTION EVENTS\n");
7243 }
7244
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007245 /*
7246 * We need to stop all tracing on all CPUS to read the
7247 * the next buffer. This is a bit expensive, but is
7248 * not done often. We fill all what we can read,
7249 * and then release the locks again.
7250 */
7251
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007252 while (!trace_empty(&iter)) {
7253
7254 if (!cnt)
7255 printk(KERN_TRACE "---------------------------------\n");
7256
7257 cnt++;
7258
7259 /* reset all but tr, trace, and overruns */
7260 memset(&iter.seq, 0,
7261 sizeof(struct trace_iterator) -
7262 offsetof(struct trace_iterator, seq));
7263 iter.iter_flags |= TRACE_FILE_LAT_FMT;
7264 iter.pos = -1;
7265
Jason Wessel955b61e2010-08-05 09:22:23 -05007266 if (trace_find_next_entry_inc(&iter) != NULL) {
Lai Jiangshan74e7ff82009-07-28 20:17:22 +08007267 int ret;
7268
7269 ret = print_trace_line(&iter);
7270 if (ret != TRACE_TYPE_NO_CONSUME)
7271 trace_consume(&iter);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007272 }
Steven Rostedtb892e5c2012-03-01 22:06:48 -05007273 touch_nmi_watchdog();
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007274
7275 trace_printk_seq(&iter.seq);
7276 }
7277
7278 if (!cnt)
7279 printk(KERN_TRACE " (ftrace buffer empty)\n");
7280 else
7281 printk(KERN_TRACE "---------------------------------\n");
7282
Frederic Weisbeckercecbca92010-04-18 19:08:41 +02007283 out_enable:
Steven Rostedt (Red Hat)983f9382015-09-30 09:42:05 -04007284 tr->trace_flags |= old_userobj;
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007285
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007286 for_each_tracing_cpu(cpu) {
7287 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007288 }
Steven Rostedt (Red Hat)7fe70b52013-03-15 13:10:35 -04007289 atomic_dec(&dump_running);
Steven Rostedtcd891ae2009-04-28 11:39:34 -04007290 local_irq_restore(flags);
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007291}
Paul E. McKenneya8eecf22011-10-02 11:01:15 -07007292EXPORT_SYMBOL_GPL(ftrace_dump);
Frederic Weisbeckercf586b62009-03-22 05:04:35 +01007293
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007294__init static int tracer_alloc_buffers(void)
7295{
Steven Rostedt73c51622009-03-11 13:42:01 -04007296 int ring_buf_size;
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307297 int ret = -ENOMEM;
7298
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007299 /*
7300 * Make sure we don't accidently add more trace options
7301 * than we have bits for.
7302 */
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007303 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
Steven Rostedt (Red Hat)b5e87c02015-09-29 18:13:33 -04007304
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307305 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
7306 goto out;
7307
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007308 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307309 goto out_free_buffer_mask;
7310
Steven Rostedt07d777f2011-09-22 14:01:55 -04007311 /* Only allocate trace_printk buffers if a trace_printk exists */
7312 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
Steven Rostedt81698832012-10-11 10:15:05 -04007313 /* Must be called before global_trace.buffer is allocated */
Steven Rostedt07d777f2011-09-22 14:01:55 -04007314 trace_printk_init_buffers();
7315
Steven Rostedt73c51622009-03-11 13:42:01 -04007316 /* To save memory, keep the ring buffer size to its minimum */
7317 if (ring_buffer_expanded)
7318 ring_buf_size = trace_buf_size;
7319 else
7320 ring_buf_size = 1;
7321
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307322 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007323 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007324
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007325 raw_spin_lock_init(&global_trace.start_lock);
7326
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007327 /* Used for event triggers */
7328 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
7329 if (!temp_buffer)
7330 goto out_free_cpumask;
7331
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007332 if (trace_create_savedcmd() < 0)
7333 goto out_free_temp_buffer;
7334
Steven Rostedtab464282008-05-12 21:21:00 +02007335 /* TODO: make the number of buffers hot pluggable with CPUS */
Steven Rostedt (Red Hat)737223f2013-03-05 21:13:47 -05007336 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007337 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
7338 WARN_ON(1);
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007339 goto out_free_savedcmd;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007340 }
Steven Rostedta7603ff2012-08-06 16:24:11 -04007341
Steven Rostedt499e5472012-02-22 15:50:28 -05007342 if (global_trace.buffer_disabled)
7343 tracing_off();
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007344
Steven Rostedte1e232c2014-02-10 23:38:46 -05007345 if (trace_boot_clock) {
7346 ret = tracing_set_clock(&global_trace, trace_boot_clock);
7347 if (ret < 0)
7348 pr_warning("Trace clock %s not defined, going back to default\n",
7349 trace_boot_clock);
7350 }
7351
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007352 /*
7353 * register_tracer() might reference current_trace, so it
7354 * needs to be set before we register anything. This is
7355 * just a bootstrap of current_trace anyway.
7356 */
Steven Rostedt2b6080f2012-05-11 13:29:49 -04007357 global_trace.current_trace = &nop_trace;
7358
Steven Rostedt (Red Hat)0b9b12c2014-01-14 10:04:59 -05007359 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7360
Steven Rostedt (Red Hat)4104d322014-01-10 17:01:58 -05007361 ftrace_init_global_array_ops(&global_trace);
7362
Steven Rostedt (Red Hat)9a38a882015-09-30 11:11:15 -04007363 init_trace_flags_index(&global_trace);
7364
Steven Rostedt (Red Hat)ca164312013-05-23 11:51:10 -04007365 register_tracer(&nop_trace);
7366
Steven Rostedt60a11772008-05-12 21:20:44 +02007367 /* All seems OK, enable tracing */
7368 tracing_disabled = 0;
Steven Rostedt3928a8a2008-09-29 23:02:41 -04007369
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007370 atomic_notifier_chain_register(&panic_notifier_list,
7371 &trace_panic_notifier);
7372
7373 register_die_notifier(&trace_die_notifier);
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007374
Steven Rostedtae63b312012-05-03 23:09:03 -04007375 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
7376
7377 INIT_LIST_HEAD(&global_trace.systems);
7378 INIT_LIST_HEAD(&global_trace.events);
7379 list_add(&global_trace.list, &ftrace_trace_arrays);
7380
Jiaxing Wanga4d1e682015-11-04 09:14:29 +08007381 apply_trace_boot_options();
Steven Rostedt7bcfaf52012-11-01 22:56:07 -04007382
Steven Rostedt (Red Hat)77fd5c12013-03-12 11:49:18 -04007383 register_snapshot_cmd();
7384
Frederic Weisbecker2fc1dfb2009-03-16 01:45:03 +01007385 return 0;
Steven Rostedt3f5a54e2008-07-30 22:36:46 -04007386
Yoshihiro YUNOMAE939c7a42014-06-05 10:24:27 +09007387out_free_savedcmd:
7388 free_saved_cmdlines_buffer(savedcmd);
Steven Rostedt (Red Hat)2c4a33a2014-03-25 23:39:41 -04007389out_free_temp_buffer:
7390 ring_buffer_free(temp_buffer);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307391out_free_cpumask:
Alexander Z Lamccfe9e42013-08-08 09:47:45 -07007392 free_cpumask_var(global_trace.tracing_cpumask);
Rusty Russell9e01c1b2009-01-01 10:12:22 +10307393out_free_buffer_mask:
7394 free_cpumask_var(tracing_buffer_mask);
7395out:
7396 return ret;
Steven Rostedtbc0c38d2008-05-12 21:20:42 +02007397}
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007398
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007399void __init trace_init(void)
7400{
Steven Rostedt (Red Hat)0daa23022014-12-12 22:27:10 -05007401 if (tracepoint_printk) {
7402 tracepoint_print_iter =
7403 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
7404 if (WARN_ON(!tracepoint_print_iter))
7405 tracepoint_printk = 0;
7406 }
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007407 tracer_alloc_buffers();
Steven Rostedt (Red Hat)0c564a52015-03-24 17:58:09 -04007408 trace_event_init();
Steven Rostedt (Red Hat)5f893b22014-12-12 20:05:10 -05007409}
7410
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007411__init static int clear_boot_tracer(void)
7412{
7413 /*
7414 * The default tracer at boot buffer is an init section.
7415 * This function is called in lateinit. If we did not
7416 * find the boot tracer, then clear it out, to prevent
7417 * later registration from accessing the buffer that is
7418 * about to be freed.
7419 */
7420 if (!default_bootup_tracer)
7421 return 0;
7422
7423 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
7424 default_bootup_tracer);
7425 default_bootup_tracer = NULL;
7426
7427 return 0;
7428}
7429
Steven Rostedt (Red Hat)8434dc92015-01-20 12:13:40 -05007430fs_initcall(tracer_init_tracefs);
Steven Rostedtb2821ae2009-02-02 21:38:32 -05007431late_initcall(clear_boot_tracer);