blob: 4c8a1b2d8231669326abcd2213a9c711d455ce07 [file] [log] [blame]
Steven Rostedt60a11772008-05-12 21:20:44 +02001/* Include in trace.c */
2
3#include <linux/kthread.h>
Ingo Molnarc7aafc52008-05-12 21:20:45 +02004#include <linux/delay.h>
Steven Rostedt60a11772008-05-12 21:20:44 +02005
6static inline int trace_valid_entry(struct trace_entry *entry)
7{
8 switch (entry->type) {
9 case TRACE_FN:
10 case TRACE_CTX:
11 return 1;
12 }
13 return 0;
14}
15
16static int
17trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
18{
Steven Rostedt60a11772008-05-12 21:20:44 +020019 struct trace_entry *entries;
Ingo Molnarc7aafc52008-05-12 21:20:45 +020020 struct page *page;
Steven Rostedt60a11772008-05-12 21:20:44 +020021 int idx = 0;
22 int i;
23
Ingo Molnarc7aafc52008-05-12 21:20:45 +020024 BUG_ON(list_empty(&data->trace_pages));
Steven Rostedt60a11772008-05-12 21:20:44 +020025 page = list_entry(data->trace_pages.next, struct page, lru);
26 entries = page_address(page);
27
Ingo Molnarc7aafc52008-05-12 21:20:45 +020028 if (head_page(data) != entries)
Steven Rostedt60a11772008-05-12 21:20:44 +020029 goto failed;
30
31 /*
32 * The starting trace buffer always has valid elements,
Ingo Molnarc7aafc52008-05-12 21:20:45 +020033 * if any element exists.
Steven Rostedt60a11772008-05-12 21:20:44 +020034 */
Ingo Molnarc7aafc52008-05-12 21:20:45 +020035 entries = head_page(data);
Steven Rostedt60a11772008-05-12 21:20:44 +020036
37 for (i = 0; i < tr->entries; i++) {
38
Ingo Molnarc7aafc52008-05-12 21:20:45 +020039 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
40 printk(KERN_CONT ".. invalid entry %d ",
41 entries[idx].type);
Steven Rostedt60a11772008-05-12 21:20:44 +020042 goto failed;
43 }
44
45 idx++;
46 if (idx >= ENTRIES_PER_PAGE) {
47 page = virt_to_page(entries);
48 if (page->lru.next == &data->trace_pages) {
49 if (i != tr->entries - 1) {
50 printk(KERN_CONT ".. entries buffer mismatch");
51 goto failed;
52 }
53 } else {
54 page = list_entry(page->lru.next, struct page, lru);
55 entries = page_address(page);
56 }
57 idx = 0;
58 }
59 }
60
61 page = virt_to_page(entries);
62 if (page->lru.next != &data->trace_pages) {
63 printk(KERN_CONT ".. too many entries");
64 goto failed;
65 }
66
67 return 0;
68
69 failed:
70 printk(KERN_CONT ".. corrupted trace buffer .. ");
71 return -1;
72}
73
74/*
75 * Test the trace buffer to see if all the elements
76 * are still sane.
77 */
78static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
79{
80 unsigned long cnt = 0;
81 int cpu;
82 int ret = 0;
83
84 for_each_possible_cpu(cpu) {
Ingo Molnarc7aafc52008-05-12 21:20:45 +020085 if (!head_page(tr->data[cpu]))
Steven Rostedt60a11772008-05-12 21:20:44 +020086 continue;
87
88 cnt += tr->data[cpu]->trace_idx;
Steven Rostedt60a11772008-05-12 21:20:44 +020089
90 ret = trace_test_buffer_cpu(tr, tr->data[cpu]);
91 if (ret)
92 break;
93 }
94
95 if (count)
96 *count = cnt;
97
98 return ret;
99}
100
101#ifdef CONFIG_FTRACE
Steven Rostedt77a2b372008-05-12 21:20:45 +0200102
103#ifdef CONFIG_DYNAMIC_FTRACE
104
105#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
106#define __STR(x) #x
107#define STR(x) __STR(x)
108static int DYN_FTRACE_TEST_NAME(void)
109{
110 /* used to call mcount */
111 return 0;
112}
113
114/* Test dynamic code modification and ftrace filters */
115int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
116 struct trace_array *tr,
117 int (*func)(void))
118{
119 unsigned long count;
120 int ret;
121 int save_ftrace_enabled = ftrace_enabled;
122 int save_tracer_enabled = tracer_enabled;
123
124 /* The ftrace test PASSED */
125 printk(KERN_CONT "PASSED\n");
126 pr_info("Testing dynamic ftrace: ");
127
128 /* enable tracing, and record the filter function */
129 ftrace_enabled = 1;
130 tracer_enabled = 1;
131
132 /* passed in by parameter to fool gcc from optimizing */
133 func();
134
135 /* update the records */
136 ret = ftrace_force_update();
137 if (ret) {
138 printk(KERN_CONT ".. ftraced failed .. ");
139 return ret;
140 }
141
142 /* filter only on our function */
143 ftrace_set_filter(STR(DYN_FTRACE_TEST_NAME),
144 sizeof(STR(DYN_FTRACE_TEST_NAME)), 1);
145
146 /* enable tracing */
147 tr->ctrl = 1;
148 trace->init(tr);
149 /* Sleep for a 1/10 of a second */
150 msleep(100);
151
152 /* we should have nothing in the buffer */
153 ret = trace_test_buffer(tr, &count);
154 if (ret)
155 goto out;
156
157 if (count) {
158 ret = -1;
159 printk(KERN_CONT ".. filter did not filter .. ");
160 goto out;
161 }
162
163 /* call our function again */
164 func();
165
166 /* sleep again */
167 msleep(100);
168
169 /* stop the tracing. */
170 tr->ctrl = 0;
171 trace->ctrl_update(tr);
172 ftrace_enabled = 0;
173
174 /* check the trace buffer */
175 ret = trace_test_buffer(tr, &count);
176 trace->reset(tr);
177
178 /* we should only have one item */
179 if (!ret && count != 1) {
180 printk(KERN_CONT ".. filter failed ..");
181 ret = -1;
182 goto out;
183 }
184 out:
185 ftrace_enabled = save_ftrace_enabled;
186 tracer_enabled = save_tracer_enabled;
187
188 /* Enable tracing on all functions again */
189 ftrace_set_filter(NULL, 0, 1);
190
191 return ret;
192}
193#else
194# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
195#endif /* CONFIG_DYNAMIC_FTRACE */
Steven Rostedt60a11772008-05-12 21:20:44 +0200196/*
197 * Simple verification test of ftrace function tracer.
198 * Enable ftrace, sleep 1/10 second, and then read the trace
199 * buffer to see if all is in order.
200 */
201int
202trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
203{
204 unsigned long count;
205 int ret;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200206 int save_ftrace_enabled = ftrace_enabled;
207 int save_tracer_enabled = tracer_enabled;
Steven Rostedt60a11772008-05-12 21:20:44 +0200208
Steven Rostedt77a2b372008-05-12 21:20:45 +0200209 /* make sure msleep has been recorded */
210 msleep(1);
211
212 /* force the recorded functions to be traced */
Steven Rostedt60a11772008-05-12 21:20:44 +0200213 ret = ftrace_force_update();
214 if (ret) {
215 printk(KERN_CONT ".. ftraced failed .. ");
216 return ret;
217 }
218
219 /* start the tracing */
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200220 ftrace_enabled = 1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200221 tracer_enabled = 1;
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200222
Steven Rostedt60a11772008-05-12 21:20:44 +0200223 tr->ctrl = 1;
224 trace->init(tr);
225 /* Sleep for a 1/10 of a second */
226 msleep(100);
227 /* stop the tracing. */
228 tr->ctrl = 0;
229 trace->ctrl_update(tr);
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200230 ftrace_enabled = 0;
231
Steven Rostedt60a11772008-05-12 21:20:44 +0200232 /* check the trace buffer */
233 ret = trace_test_buffer(tr, &count);
234 trace->reset(tr);
235
236 if (!ret && !count) {
237 printk(KERN_CONT ".. no entries found ..");
238 ret = -1;
Steven Rostedt77a2b372008-05-12 21:20:45 +0200239 goto out;
Steven Rostedt60a11772008-05-12 21:20:44 +0200240 }
241
Steven Rostedt77a2b372008-05-12 21:20:45 +0200242 ret = trace_selftest_startup_dynamic_tracing(trace, tr,
243 DYN_FTRACE_TEST_NAME);
244
245 out:
246 ftrace_enabled = save_ftrace_enabled;
247 tracer_enabled = save_tracer_enabled;
248
Steven Rostedt60a11772008-05-12 21:20:44 +0200249 return ret;
250}
251#endif /* CONFIG_FTRACE */
252
253#ifdef CONFIG_IRQSOFF_TRACER
254int
255trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr)
256{
257 unsigned long save_max = tracing_max_latency;
258 unsigned long count;
259 int ret;
260
261 /* start the tracing */
262 tr->ctrl = 1;
263 trace->init(tr);
264 /* reset the max latency */
265 tracing_max_latency = 0;
266 /* disable interrupts for a bit */
267 local_irq_disable();
268 udelay(100);
269 local_irq_enable();
270 /* stop the tracing. */
271 tr->ctrl = 0;
272 trace->ctrl_update(tr);
273 /* check both trace buffers */
274 ret = trace_test_buffer(tr, NULL);
275 if (!ret)
276 ret = trace_test_buffer(&max_tr, &count);
277 trace->reset(tr);
278
279 if (!ret && !count) {
280 printk(KERN_CONT ".. no entries found ..");
281 ret = -1;
282 }
283
284 tracing_max_latency = save_max;
285
286 return ret;
287}
288#endif /* CONFIG_IRQSOFF_TRACER */
289
290#ifdef CONFIG_PREEMPT_TRACER
291int
292trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr)
293{
294 unsigned long save_max = tracing_max_latency;
295 unsigned long count;
296 int ret;
297
298 /* start the tracing */
299 tr->ctrl = 1;
300 trace->init(tr);
301 /* reset the max latency */
302 tracing_max_latency = 0;
303 /* disable preemption for a bit */
304 preempt_disable();
305 udelay(100);
306 preempt_enable();
307 /* stop the tracing. */
308 tr->ctrl = 0;
309 trace->ctrl_update(tr);
310 /* check both trace buffers */
311 ret = trace_test_buffer(tr, NULL);
312 if (!ret)
313 ret = trace_test_buffer(&max_tr, &count);
314 trace->reset(tr);
315
316 if (!ret && !count) {
317 printk(KERN_CONT ".. no entries found ..");
318 ret = -1;
319 }
320
321 tracing_max_latency = save_max;
322
323 return ret;
324}
325#endif /* CONFIG_PREEMPT_TRACER */
326
327#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
328int
329trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr)
330{
331 unsigned long save_max = tracing_max_latency;
332 unsigned long count;
333 int ret;
334
335 /* start the tracing */
336 tr->ctrl = 1;
337 trace->init(tr);
338
339 /* reset the max latency */
340 tracing_max_latency = 0;
341
342 /* disable preemption and interrupts for a bit */
343 preempt_disable();
344 local_irq_disable();
345 udelay(100);
346 preempt_enable();
347 /* reverse the order of preempt vs irqs */
348 local_irq_enable();
349
350 /* stop the tracing. */
351 tr->ctrl = 0;
352 trace->ctrl_update(tr);
353 /* check both trace buffers */
354 ret = trace_test_buffer(tr, NULL);
355 if (ret)
356 goto out;
357
358 ret = trace_test_buffer(&max_tr, &count);
359 if (ret)
360 goto out;
361
362 if (!ret && !count) {
363 printk(KERN_CONT ".. no entries found ..");
364 ret = -1;
365 goto out;
366 }
367
368 /* do the test by disabling interrupts first this time */
369 tracing_max_latency = 0;
370 tr->ctrl = 1;
371 trace->ctrl_update(tr);
372 preempt_disable();
373 local_irq_disable();
374 udelay(100);
375 preempt_enable();
376 /* reverse the order of preempt vs irqs */
377 local_irq_enable();
378
379 /* stop the tracing. */
380 tr->ctrl = 0;
381 trace->ctrl_update(tr);
382 /* check both trace buffers */
383 ret = trace_test_buffer(tr, NULL);
384 if (ret)
385 goto out;
386
387 ret = trace_test_buffer(&max_tr, &count);
388
389 if (!ret && !count) {
390 printk(KERN_CONT ".. no entries found ..");
391 ret = -1;
392 goto out;
393 }
394
395 out:
396 trace->reset(tr);
397 tracing_max_latency = save_max;
398
399 return ret;
400}
401#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
402
403#ifdef CONFIG_SCHED_TRACER
404static int trace_wakeup_test_thread(void *data)
405{
406 struct completion *x = data;
407
408 /* Make this a RT thread, doesn't need to be too high */
409
410 rt_mutex_setprio(current, MAX_RT_PRIO - 5);
411
412 /* Make it know we have a new prio */
413 complete(x);
414
415 /* now go to sleep and let the test wake us up */
416 set_current_state(TASK_INTERRUPTIBLE);
417 schedule();
418
419 /* we are awake, now wait to disappear */
420 while (!kthread_should_stop()) {
421 /*
422 * This is an RT task, do short sleeps to let
423 * others run.
424 */
425 msleep(100);
426 }
427
428 return 0;
429}
430
431int
432trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
433{
434 unsigned long save_max = tracing_max_latency;
435 struct task_struct *p;
436 struct completion isrt;
437 unsigned long count;
438 int ret;
439
440 init_completion(&isrt);
441
442 /* create a high prio thread */
443 p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test");
Ingo Molnarc7aafc52008-05-12 21:20:45 +0200444 if (IS_ERR(p)) {
Steven Rostedt60a11772008-05-12 21:20:44 +0200445 printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
446 return -1;
447 }
448
449 /* make sure the thread is running at an RT prio */
450 wait_for_completion(&isrt);
451
452 /* start the tracing */
453 tr->ctrl = 1;
454 trace->init(tr);
455 /* reset the max latency */
456 tracing_max_latency = 0;
457
458 /* sleep to let the RT thread sleep too */
459 msleep(100);
460
461 /*
462 * Yes this is slightly racy. It is possible that for some
463 * strange reason that the RT thread we created, did not
464 * call schedule for 100ms after doing the completion,
465 * and we do a wakeup on a task that already is awake.
466 * But that is extremely unlikely, and the worst thing that
467 * happens in such a case, is that we disable tracing.
468 * Honestly, if this race does happen something is horrible
469 * wrong with the system.
470 */
471
472 wake_up_process(p);
473
474 /* stop the tracing. */
475 tr->ctrl = 0;
476 trace->ctrl_update(tr);
477 /* check both trace buffers */
478 ret = trace_test_buffer(tr, NULL);
479 if (!ret)
480 ret = trace_test_buffer(&max_tr, &count);
481
482
483 trace->reset(tr);
484
485 tracing_max_latency = save_max;
486
487 /* kill the thread */
488 kthread_stop(p);
489
490 if (!ret && !count) {
491 printk(KERN_CONT ".. no entries found ..");
492 ret = -1;
493 }
494
495 return ret;
496}
497#endif /* CONFIG_SCHED_TRACER */
498
499#ifdef CONFIG_CONTEXT_SWITCH_TRACER
500int
501trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr)
502{
503 unsigned long count;
504 int ret;
505
506 /* start the tracing */
507 tr->ctrl = 1;
508 trace->init(tr);
509 /* Sleep for a 1/10 of a second */
510 msleep(100);
511 /* stop the tracing. */
512 tr->ctrl = 0;
513 trace->ctrl_update(tr);
514 /* check the trace buffer */
515 ret = trace_test_buffer(tr, &count);
516 trace->reset(tr);
517
518 if (!ret && !count) {
519 printk(KERN_CONT ".. no entries found ..");
520 ret = -1;
521 }
522
523 return ret;
524}
525#endif /* CONFIG_CONTEXT_SWITCH_TRACER */