blob: 7238646b8723e388db9073249d82946c16fe6545 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
3#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02004#include <linux/delay.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02005
Ingo Molnare309b412008-05-12 21:20:51 +02006static inline int trace_valid_entry(struct trace_entry *entry)
Steven Rostedt60a11772008-05-12 21:20:44 +02007{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
Ingo Molnar57422792008-05-12 21:20:51 +020011 case TRACE_WAKE:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020012 case TRACE_STACK:
Steven Rostedtdd0e5452008-08-01 12:26:41 -040013 case TRACE_PRINT:
Steven Rostedt06fa75a2008-05-12 21:20:54 +020014 case TRACE_SPECIAL:
Steven Rostedt80e5ea42008-11-12 15:24:24 -050015 case TRACE_BRANCH:
Frederic Weisbecker7447dce2009-02-07 21:33:57 +010016 case TRACE_GRAPH_ENT:
17 case TRACE_GRAPH_RET:
Steven Rostedt60a11772008-05-12 21:20:44 +020018 return 1;
19 }
20 return 0;
21}
22
Steven Rostedt3928a8a2008-09-29 23:02:41 -040023static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
Steven Rostedt60a11772008-05-12 21:20:44 +020024{
Steven Rostedt3928a8a2008-09-29 23:02:41 -040025 struct ring_buffer_event *event;
26 struct trace_entry *entry;
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050027 unsigned int loops = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020028
Steven Rostedt3928a8a2008-09-29 23:02:41 -040029 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
30 entry = ring_buffer_event_data(event);
Steven Rostedt60a11772008-05-12 21:20:44 +020031
Steven Rostedt4b3e3d22009-02-18 22:50:01 -050032 /*
33 * The ring buffer is a size of trace_buf_size, if
34 * we loop more than the size, there's something wrong
35 * with the ring buffer.
36 */
37 if (loops++ > trace_buf_size) {
38 printk(KERN_CONT ".. bad ring buffer ");
39 goto failed;
40 }
Steven Rostedt3928a8a2008-09-29 23:02:41 -040041 if (!trace_valid_entry(entry)) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020042 printk(KERN_CONT ".. invalid entry %d ",
Steven Rostedt3928a8a2008-09-29 23:02:41 -040043 entry->type);
Steven Rostedt60a11772008-05-12 21:20:44 +020044 goto failed;
45 }
Steven Rostedt60a11772008-05-12 21:20:44 +020046 }
Steven Rostedt60a11772008-05-12 21:20:44 +020047 return 0;
48
49 failed:
Steven Rostedt08bafa02008-05-12 21:20:45 +020050 /* disable tracing */
51 tracing_disabled = 1;
Steven Rostedt60a11772008-05-12 21:20:44 +020052 printk(KERN_CONT ".. corrupted trace buffer .. ");
53 return -1;
54}
55
56/*
57 * Test the trace buffer to see if all the elements
58 * are still sane.
59 */
60static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
61{
Steven Rostedt30afdcb2008-05-12 21:20:56 +020062 unsigned long flags, cnt = 0;
63 int cpu, ret = 0;
Steven Rostedt60a11772008-05-12 21:20:44 +020064
Steven Rostedt30afdcb2008-05-12 21:20:56 +020065 /* Don't allow flipping of max traces now */
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050066 local_irq_save(flags);
Steven Rostedt30afdcb2008-05-12 21:20:56 +020067 __raw_spin_lock(&ftrace_max_lock);
Steven Rostedt3928a8a2008-09-29 23:02:41 -040068
69 cnt = ring_buffer_entries(tr->buffer);
70
Steven Rostedt0c5119c2009-02-18 18:33:57 -050071 /*
72 * The trace_test_buffer_cpu runs a while loop to consume all data.
73 * If the calling tracer is broken, and is constantly filling
74 * the buffer, this will run forever, and hard lock the box.
75 * We disable the ring buffer while we do this test to prevent
76 * a hard lock up.
77 */
78 tracing_off();
Steven Rostedt60a11772008-05-12 21:20:44 +020079 for_each_possible_cpu(cpu) {
Steven Rostedt3928a8a2008-09-29 23:02:41 -040080 ret = trace_test_buffer_cpu(tr, cpu);
Steven Rostedt60a11772008-05-12 21:20:44 +020081 if (ret)
82 break;
83 }
Steven Rostedt0c5119c2009-02-18 18:33:57 -050084 tracing_on();
Steven Rostedt30afdcb2008-05-12 21:20:56 +020085 __raw_spin_unlock(&ftrace_max_lock);
Steven Rostedtd51ad7a2008-11-15 15:48:29 -050086 local_irq_restore(flags);
Steven Rostedt60a11772008-05-12 21:20:44 +020087
88 if (count)
89 *count = cnt;
90
91 return ret;
92}
93
Frederic Weisbecker1c800252008-11-16 05:57:26 +010094static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
95{
96 printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n",
97 trace->name, init_ret);
98}
Steven Rostedt606576c2008-10-06 19:06:12 -040099#ifdef CONFIG_FUNCTION_TRACER
Steven Rostedt77a2b372008-05-12 21:20:45 +0200100
101#ifdef CONFIG_DYNAMIC_FTRACE
102
Steven Rostedt77a2b372008-05-12 21:20:45 +0200103#define __STR(x) #x
104#define STR(x) __STR(x)
Steven Rostedt77a2b372008-05-12 21:20:45 +0200105
106/* Test dynamic code modification and ftrace filters */
107int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
108 struct trace_array *tr,
109 int (*func)(void))
110{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200111 int save_ftrace_enabled = ftrace_enabled;
112 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400113 unsigned long count;
Steven Rostedt4e491d12008-05-14 23:49:44 -0400114 char *func_name;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400115 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200116
117 /* The ftrace test PASSED */
118 printk(KERN_CONT "PASSED\n");
119 pr_info("Testing dynamic ftrace: ");
120
121 /* enable tracing, and record the filter function */
122 ftrace_enabled = 1;
123 tracer_enabled = 1;
124
125 /* passed in by parameter to fool gcc from optimizing */
126 func();
127
Steven Rostedt4e491d12008-05-14 23:49:44 -0400128 /*
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500129 * Some archs *cough*PowerPC*cough* add characters to the
Steven Rostedt4e491d12008-05-14 23:49:44 -0400130 * start of the function names. We simply put a '*' to
Wenji Huang73d8b8b2009-02-17 01:10:02 -0500131 * accommodate them.
Steven Rostedt4e491d12008-05-14 23:49:44 -0400132 */
133 func_name = "*" STR(DYN_FTRACE_TEST_NAME);
134
Steven Rostedt77a2b372008-05-12 21:20:45 +0200135 /* filter only on our function */
Steven Rostedt4e491d12008-05-14 23:49:44 -0400136 ftrace_set_filter(func_name, strlen(func_name), 1);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200137
138 /* enable tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200139 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100140 if (ret) {
141 warn_failed_init_tracer(trace, ret);
142 goto out;
143 }
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400144
Steven Rostedt77a2b372008-05-12 21:20:45 +0200145 /* Sleep for a 1/10 of a second */
146 msleep(100);
147
148 /* we should have nothing in the buffer */
149 ret = trace_test_buffer(tr, &count);
150 if (ret)
151 goto out;
152
153 if (count) {
154 ret = -1;
155 printk(KERN_CONT ".. filter did not filter .. ");
156 goto out;
157 }
158
159 /* call our function again */
160 func();
161
162 /* sleep again */
163 msleep(100);
164
165 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500166 tracing_stop();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200167 ftrace_enabled = 0;
168
169 /* check the trace buffer */
170 ret = trace_test_buffer(tr, &count);
171 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500172 tracing_start();
Steven Rostedt77a2b372008-05-12 21:20:45 +0200173
174 /* we should only have one item */
175 if (!ret && count != 1) {
Steven Rostedt06fa75a2008-05-12 21:20:54 +0200176 printk(KERN_CONT ".. filter failed count=%ld ..", count);
Steven Rostedt77a2b372008-05-12 21:20:45 +0200177 ret = -1;
178 goto out;
179 }
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500180
Steven Rostedt77a2b372008-05-12 21:20:45 +0200181 out:
182 ftrace_enabled = save_ftrace_enabled;
183 tracer_enabled = save_tracer_enabled;
184
185 /* Enable tracing on all functions again */
186 ftrace_set_filter(NULL, 0, 1);
187
188 return ret;
189}
190#else
191# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
192#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedt60a11772008-05-12 21:20:44 +0200193/*
194 * Simple verification test of ftrace function tracer.
195 * Enable ftrace, sleep 1/10 second, and then read the trace
196 * buffer to see if all is in order.
197 */
198int
199trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
200{
Steven Rostedt77a2b372008-05-12 21:20:45 +0200201 int save_ftrace_enabled = ftrace_enabled;
202 int save_tracer_enabled = tracer_enabled;
Steven Rostedtdd0e5452008-08-01 12:26:41 -0400203 unsigned long count;
204 int ret;
Steven Rostedt60a11772008-05-12 21:20:44 +0200205
Steven Rostedt77a2b372008-05-12 21:20:45 +0200206 /* make sure msleep has been recorded */
207 msleep(1);
208
Steven Rostedt60a11772008-05-12 21:20:44 +0200209 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200210 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200211 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200212
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200213 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100214 if (ret) {
215 warn_failed_init_tracer(trace, ret);
216 goto out;
217 }
218
Steven Rostedt60a11772008-05-12 21:20:44 +0200219 /* Sleep for a 1/10 of a second */
220 msleep(100);
221 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500222 tracing_stop();
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200223 ftrace_enabled = 0;
224
Steven Rostedt60a11772008-05-12 21:20:44 +0200225 /* check the trace buffer */
226 ret = trace_test_buffer(tr, &count);
227 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500228 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200229
230 if (!ret && !count) {
231 printk(KERN_CONT ".. no entries found ..");
232 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200233 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200234 }
235
Steven Rostedt77a2b372008-05-12 21:20:45 +0200236 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
237 DYN_FTRACE_TEST_NAME);
238
239 out:
240 ftrace_enabled = save_ftrace_enabled;
241 tracer_enabled = save_tracer_enabled;
242
Steven Rostedt4eebcc82008-05-12 21:20:48 +0200243 /* kill ftrace totally if we failed */
244 if (ret)
245 ftrace_kill();
246
Steven Rostedt60a11772008-05-12 21:20:44 +0200247 return ret;
248}
Steven Rostedt606576c2008-10-06 19:06:12 -0400249#endif /* CONFIG_FUNCTION_TRACER */
Steven Rostedt60a11772008-05-12 21:20:44 +0200250
Frederic Weisbecker7447dce2009-02-07 21:33:57 +0100251
252#ifdef CONFIG_FUNCTION_GRAPH_TRACER
253/*
254 * Pretty much the same than for the function tracer from which the selftest
255 * has been borrowed.
256 */
257int
258trace_selftest_startup_function_graph(struct tracer *trace,
259 struct trace_array *tr)
260{
261 int ret;
262 unsigned long count;
263
264 ret = tracer_init(trace, tr);
265 if (ret) {
266 warn_failed_init_tracer(trace, ret);
267 goto out;
268 }
269
270 /* Sleep for a 1/10 of a second */
271 msleep(100);
272
273 tracing_stop();
274
275 /* check the trace buffer */
276 ret = trace_test_buffer(tr, &count);
277
278 trace->reset(tr);
279 tracing_start();
280
281 if (!ret && !count) {
282 printk(KERN_CONT ".. no entries found ..");
283 ret = -1;
284 goto out;
285 }
286
287 /* Don't test dynamic tracing, the function tracer already did */
288
289out:
290 /* Stop it if we failed */
291 if (ret)
292 ftrace_graph_stop();
293
294 return ret;
295}
296#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
297
298
Steven Rostedt60a11772008-05-12 21:20:44 +0200299#ifdef CONFIG_IRQSOFF_TRACER
300int
301trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
302{
303 unsigned long save_max = tracing_max_latency;
304 unsigned long count;
305 int ret;
306
307 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200308 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100309 if (ret) {
310 warn_failed_init_tracer(trace, ret);
311 return ret;
312 }
313
Steven Rostedt60a11772008-05-12 21:20:44 +0200314 /* reset the max latency */
315 tracing_max_latency = 0;
316 /* disable interrupts for a bit */
317 local_irq_disable();
318 udelay(100);
319 local_irq_enable();
320 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500321 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200322 /* check both trace buffers */
323 ret = trace_test_buffer(tr, NULL);
324 if (!ret)
325 ret = trace_test_buffer(&max_tr, &count);
326 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500327 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200328
329 if (!ret && !count) {
330 printk(KERN_CONT ".. no entries found ..");
331 ret = -1;
332 }
333
334 tracing_max_latency = save_max;
335
336 return ret;
337}
338#endif /* CONFIG_IRQSOFF_TRACER */
339
340#ifdef CONFIG_PREEMPT_TRACER
341int
342trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
343{
344 unsigned long save_max = tracing_max_latency;
345 unsigned long count;
346 int ret;
347
Steven Rostedt769c48e2008-11-07 22:36:02 -0500348 /*
349 * Now that the big kernel lock is no longer preemptable,
350 * and this is called with the BKL held, it will always
351 * fail. If preemption is already disabled, simply
352 * pass the test. When the BKL is removed, or becomes
353 * preemptible again, we will once again test this,
354 * so keep it in.
355 */
356 if (preempt_count()) {
357 printk(KERN_CONT "can not test ... force ");
358 return 0;
359 }
360
Steven Rostedt60a11772008-05-12 21:20:44 +0200361 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200362 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100363 if (ret) {
364 warn_failed_init_tracer(trace, ret);
365 return ret;
366 }
367
Steven Rostedt60a11772008-05-12 21:20:44 +0200368 /* reset the max latency */
369 tracing_max_latency = 0;
370 /* disable preemption for a bit */
371 preempt_disable();
372 udelay(100);
373 preempt_enable();
374 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500375 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200376 /* check both trace buffers */
377 ret = trace_test_buffer(tr, NULL);
378 if (!ret)
379 ret = trace_test_buffer(&max_tr, &count);
380 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500381 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200382
383 if (!ret && !count) {
384 printk(KERN_CONT ".. no entries found ..");
385 ret = -1;
386 }
387
388 tracing_max_latency = save_max;
389
390 return ret;
391}
392#endif /* CONFIG_PREEMPT_TRACER */
393
394#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
395int
396trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
397{
398 unsigned long save_max = tracing_max_latency;
399 unsigned long count;
400 int ret;
401
Steven Rostedt769c48e2008-11-07 22:36:02 -0500402 /*
403 * Now that the big kernel lock is no longer preemptable,
404 * and this is called with the BKL held, it will always
405 * fail. If preemption is already disabled, simply
406 * pass the test. When the BKL is removed, or becomes
407 * preemptible again, we will once again test this,
408 * so keep it in.
409 */
410 if (preempt_count()) {
411 printk(KERN_CONT "can not test ... force ");
412 return 0;
413 }
414
Steven Rostedt60a11772008-05-12 21:20:44 +0200415 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200416 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100417 if (ret) {
418 warn_failed_init_tracer(trace, ret);
419 goto out;
420 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200421
422 /* reset the max latency */
423 tracing_max_latency = 0;
424
425 /* disable preemption and interrupts for a bit */
426 preempt_disable();
427 local_irq_disable();
428 udelay(100);
429 preempt_enable();
430 /* reverse the order of preempt vs irqs */
431 local_irq_enable();
432
433 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500434 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200435 /* check both trace buffers */
436 ret = trace_test_buffer(tr, NULL);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500437 if (ret) {
438 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200439 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500440 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200441
442 ret = trace_test_buffer(&max_tr, &count);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500443 if (ret) {
444 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200445 goto out;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500446 }
Steven Rostedt60a11772008-05-12 21:20:44 +0200447
448 if (!ret && !count) {
449 printk(KERN_CONT ".. no entries found ..");
450 ret = -1;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500451 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200452 goto out;
453 }
454
455 /* do the test by disabling interrupts first this time */
456 tracing_max_latency = 0;
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500457 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200458 preempt_disable();
459 local_irq_disable();
460 udelay(100);
461 preempt_enable();
462 /* reverse the order of preempt vs irqs */
463 local_irq_enable();
464
465 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500466 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200467 /* check both trace buffers */
468 ret = trace_test_buffer(tr, NULL);
469 if (ret)
470 goto out;
471
472 ret = trace_test_buffer(&max_tr, &count);
473
474 if (!ret && !count) {
475 printk(KERN_CONT ".. no entries found ..");
476 ret = -1;
477 goto out;
478 }
479
480 out:
481 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500482 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200483 tracing_max_latency = save_max;
484
485 return ret;
486}
487#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
488
Steven Noonanfb1b6d82008-09-19 03:06:43 -0700489#ifdef CONFIG_NOP_TRACER
490int
491trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
492{
493 /* What could possibly go wrong? */
494 return 0;
495}
496#endif
497
Steven Rostedt60a11772008-05-12 21:20:44 +0200498#ifdef CONFIG_SCHED_TRACER
499static int trace_wakeup_test_thread(void *data)
500{
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200501 /* Make this a RT thread, doesn't need to be too high */
502 struct sched_param param = { .sched_priority = 5 };
Steven Rostedt60a11772008-05-12 21:20:44 +0200503 struct completion *x = data;
504
Steven Rostedt05bd68c2008-05-12 21:20:59 +0200505 sched_setscheduler(current, SCHED_FIFO, &param);
Steven Rostedt60a11772008-05-12 21:20:44 +0200506
507 /* Make it know we have a new prio */
508 complete(x);
509
510 /* now go to sleep and let the test wake us up */
511 set_current_state(TASK_INTERRUPTIBLE);
512 schedule();
513
514 /* we are awake, now wait to disappear */
515 while (!kthread_should_stop()) {
516 /*
517 * This is an RT task, do short sleeps to let
518 * others run.
519 */
520 msleep(100);
521 }
522
523 return 0;
524}
525
526int
527trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
528{
529 unsigned long save_max = tracing_max_latency;
530 struct task_struct *p;
531 struct completion isrt;
532 unsigned long count;
533 int ret;
534
535 init_completion(&isrt);
536
537 /* create a high prio thread */
538 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200539 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200540 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
541 return -1;
542 }
543
544 /* make sure the thread is running at an RT prio */
545 wait_for_completion(&isrt);
546
547 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200548 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100549 if (ret) {
550 warn_failed_init_tracer(trace, ret);
551 return ret;
552 }
553
Steven Rostedt60a11772008-05-12 21:20:44 +0200554 /* reset the max latency */
555 tracing_max_latency = 0;
556
557 /* sleep to let the RT thread sleep too */
558 msleep(100);
559
560 /*
561 * Yes this is slightly racy. It is possible that for some
562 * strange reason that the RT thread we created, did not
563 * call schedule for 100ms after doing the completion,
564 * and we do a wakeup on a task that already is awake.
565 * But that is extremely unlikely, and the worst thing that
566 * happens in such a case, is that we disable tracing.
567 * Honestly, if this race does happen something is horrible
568 * wrong with the system.
569 */
570
571 wake_up_process(p);
572
Steven Rostedt5aa60c62008-09-29 23:02:37 -0400573 /* give a little time to let the thread wake up */
574 msleep(100);
575
Steven Rostedt60a11772008-05-12 21:20:44 +0200576 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500577 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200578 /* check both trace buffers */
579 ret = trace_test_buffer(tr, NULL);
580 if (!ret)
581 ret = trace_test_buffer(&max_tr, &count);
582
583
584 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500585 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200586
587 tracing_max_latency = save_max;
588
589 /* kill the thread */
590 kthread_stop(p);
591
592 if (!ret && !count) {
593 printk(KERN_CONT ".. no entries found ..");
594 ret = -1;
595 }
596
597 return ret;
598}
599#endif /* CONFIG_SCHED_TRACER */
600
601#ifdef CONFIG_CONTEXT_SWITCH_TRACER
602int
603trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
604{
605 unsigned long count;
606 int ret;
607
608 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200609 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100610 if (ret) {
611 warn_failed_init_tracer(trace, ret);
612 return ret;
613 }
614
Steven Rostedt60a11772008-05-12 21:20:44 +0200615 /* Sleep for a 1/10 of a second */
616 msleep(100);
617 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500618 tracing_stop();
Steven Rostedt60a11772008-05-12 21:20:44 +0200619 /* check the trace buffer */
620 ret = trace_test_buffer(tr, &count);
621 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500622 tracing_start();
Steven Rostedt60a11772008-05-12 21:20:44 +0200623
624 if (!ret && !count) {
625 printk(KERN_CONT ".. no entries found ..");
626 ret = -1;
627 }
628
629 return ret;
630}
631#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200632
633#ifdef CONFIG_SYSPROF_TRACER
634int
635trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr)
636{
637 unsigned long count;
638 int ret;
639
640 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200641 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100642 if (ret) {
643 warn_failed_init_tracer(trace, ret);
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500644 return ret;
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100645 }
646
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200647 /* Sleep for a 1/10 of a second */
648 msleep(100);
649 /* stop the tracing. */
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500650 tracing_stop();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200651 /* check the trace buffer */
652 ret = trace_test_buffer(tr, &count);
653 trace->reset(tr);
Steven Rostedtbbf5b1a2008-11-07 22:36:02 -0500654 tracing_start();
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200655
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500656 if (!ret && !count) {
657 printk(KERN_CONT ".. no entries found ..");
658 ret = -1;
659 }
660
Ingo Molnara6dd24f2008-05-12 21:20:47 +0200661 return ret;
662}
663#endif /* CONFIG_SYSPROF_TRACER */
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500664
665#ifdef CONFIG_BRANCH_TRACER
666int
667trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr)
668{
669 unsigned long count;
670 int ret;
671
672 /* start the tracing */
Arnaldo Carvalho de Melob6f11df2009-02-05 18:02:00 -0200673 ret = tracer_init(trace, tr);
Frederic Weisbecker1c800252008-11-16 05:57:26 +0100674 if (ret) {
675 warn_failed_init_tracer(trace, ret);
676 return ret;
677 }
678
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500679 /* Sleep for a 1/10 of a second */
680 msleep(100);
681 /* stop the tracing. */
682 tracing_stop();
683 /* check the trace buffer */
684 ret = trace_test_buffer(tr, &count);
685 trace->reset(tr);
686 tracing_start();
687
Wenji Huangd2ef7c22009-02-17 01:09:47 -0500688 if (!ret && !count) {
689 printk(KERN_CONT ".. no entries found ..");
690 ret = -1;
691 }
692
Steven Rostedt80e5ea42008-11-12 15:24:24 -0500693 return ret;
694}
695#endif /* CONFIG_BRANCH_TRACER */