Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 1 | /* Include in trace.c */ |
| 2 | |
| 3 | #include <linux/kthread.h> |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 4 | #include <linux/delay.h> |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 5 | |
Ingo Molnar | e309b41 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 6 | static inline int trace_valid_entry(struct trace_entry *entry) |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 7 | { |
| 8 | switch (entry->type) { |
| 9 | case TRACE_FN: |
| 10 | case TRACE_CTX: |
Ingo Molnar | 5742279 | 2008-05-12 21:20:51 +0200 | [diff] [blame] | 11 | case TRACE_WAKE: |
Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 12 | case TRACE_STACK: |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 13 | case TRACE_PRINT: |
Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 14 | case TRACE_SPECIAL: |
Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 15 | case TRACE_BRANCH: |
Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 16 | case TRACE_GRAPH_ENT: |
| 17 | case TRACE_GRAPH_RET: |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 18 | return 1; |
| 19 | } |
| 20 | return 0; |
| 21 | } |
| 22 | |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 23 | static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 24 | { |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 25 | struct ring_buffer_event *event; |
| 26 | struct trace_entry *entry; |
Steven Rostedt | 4b3e3d2 | 2009-02-18 22:50:01 -0500 | [diff] [blame] | 27 | unsigned int loops = 0; |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 28 | |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 29 | while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { |
| 30 | entry = ring_buffer_event_data(event); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 31 | |
Steven Rostedt | 4b3e3d2 | 2009-02-18 22:50:01 -0500 | [diff] [blame] | 32 | /* |
| 33 | * The ring buffer is a size of trace_buf_size, if |
| 34 | * we loop more than the size, there's something wrong |
| 35 | * with the ring buffer. |
| 36 | */ |
| 37 | if (loops++ > trace_buf_size) { |
| 38 | printk(KERN_CONT ".. bad ring buffer "); |
| 39 | goto failed; |
| 40 | } |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 41 | if (!trace_valid_entry(entry)) { |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 42 | printk(KERN_CONT ".. invalid entry %d ", |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 43 | entry->type); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 44 | goto failed; |
| 45 | } |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 46 | } |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 47 | return 0; |
| 48 | |
| 49 | failed: |
Steven Rostedt | 08bafa0 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 50 | /* disable tracing */ |
| 51 | tracing_disabled = 1; |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 52 | printk(KERN_CONT ".. corrupted trace buffer .. "); |
| 53 | return -1; |
| 54 | } |
| 55 | |
| 56 | /* |
| 57 | * Test the trace buffer to see if all the elements |
| 58 | * are still sane. |
| 59 | */ |
| 60 | static int trace_test_buffer(struct trace_array *tr, unsigned long *count) |
| 61 | { |
Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 62 | unsigned long flags, cnt = 0; |
| 63 | int cpu, ret = 0; |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 64 | |
Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 65 | /* Don't allow flipping of max traces now */ |
Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 66 | local_irq_save(flags); |
Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 67 | __raw_spin_lock(&ftrace_max_lock); |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 68 | |
| 69 | cnt = ring_buffer_entries(tr->buffer); |
| 70 | |
Steven Rostedt | 0c5119c | 2009-02-18 18:33:57 -0500 | [diff] [blame] | 71 | /* |
| 72 | * The trace_test_buffer_cpu runs a while loop to consume all data. |
| 73 | * If the calling tracer is broken, and is constantly filling |
| 74 | * the buffer, this will run forever, and hard lock the box. |
| 75 | * We disable the ring buffer while we do this test to prevent |
| 76 | * a hard lock up. |
| 77 | */ |
| 78 | tracing_off(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 79 | for_each_possible_cpu(cpu) { |
Steven Rostedt | 3928a8a | 2008-09-29 23:02:41 -0400 | [diff] [blame] | 80 | ret = trace_test_buffer_cpu(tr, cpu); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 81 | if (ret) |
| 82 | break; |
| 83 | } |
Steven Rostedt | 0c5119c | 2009-02-18 18:33:57 -0500 | [diff] [blame] | 84 | tracing_on(); |
Steven Rostedt | 30afdcb | 2008-05-12 21:20:56 +0200 | [diff] [blame] | 85 | __raw_spin_unlock(&ftrace_max_lock); |
Steven Rostedt | d51ad7a | 2008-11-15 15:48:29 -0500 | [diff] [blame] | 86 | local_irq_restore(flags); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 87 | |
| 88 | if (count) |
| 89 | *count = cnt; |
| 90 | |
| 91 | return ret; |
| 92 | } |
| 93 | |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 94 | static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) |
| 95 | { |
| 96 | printk(KERN_WARNING "Failed to init %s tracer, init returned %d\n", |
| 97 | trace->name, init_ret); |
| 98 | } |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 99 | #ifdef CONFIG_FUNCTION_TRACER |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 100 | |
| 101 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 102 | |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 103 | #define __STR(x) #x |
| 104 | #define STR(x) __STR(x) |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 105 | |
| 106 | /* Test dynamic code modification and ftrace filters */ |
| 107 | int trace_selftest_startup_dynamic_tracing(struct tracer *trace, |
| 108 | struct trace_array *tr, |
| 109 | int (*func)(void)) |
| 110 | { |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 111 | int save_ftrace_enabled = ftrace_enabled; |
| 112 | int save_tracer_enabled = tracer_enabled; |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 113 | unsigned long count; |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 114 | char *func_name; |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 115 | int ret; |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 116 | |
| 117 | /* The ftrace test PASSED */ |
| 118 | printk(KERN_CONT "PASSED\n"); |
| 119 | pr_info("Testing dynamic ftrace: "); |
| 120 | |
| 121 | /* enable tracing, and record the filter function */ |
| 122 | ftrace_enabled = 1; |
| 123 | tracer_enabled = 1; |
| 124 | |
| 125 | /* passed in by parameter to fool gcc from optimizing */ |
| 126 | func(); |
| 127 | |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 128 | /* |
Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 129 | * Some archs *cough*PowerPC*cough* add characters to the |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 130 | * start of the function names. We simply put a '*' to |
Wenji Huang | 73d8b8b | 2009-02-17 01:10:02 -0500 | [diff] [blame] | 131 | * accommodate them. |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 132 | */ |
| 133 | func_name = "*" STR(DYN_FTRACE_TEST_NAME); |
| 134 | |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 135 | /* filter only on our function */ |
Steven Rostedt | 4e491d1 | 2008-05-14 23:49:44 -0400 | [diff] [blame] | 136 | ftrace_set_filter(func_name, strlen(func_name), 1); |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 137 | |
| 138 | /* enable tracing */ |
Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 139 | ret = tracer_init(trace, tr); |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 140 | if (ret) { |
| 141 | warn_failed_init_tracer(trace, ret); |
| 142 | goto out; |
| 143 | } |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 144 | |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 145 | /* Sleep for a 1/10 of a second */ |
| 146 | msleep(100); |
| 147 | |
| 148 | /* we should have nothing in the buffer */ |
| 149 | ret = trace_test_buffer(tr, &count); |
| 150 | if (ret) |
| 151 | goto out; |
| 152 | |
| 153 | if (count) { |
| 154 | ret = -1; |
| 155 | printk(KERN_CONT ".. filter did not filter .. "); |
| 156 | goto out; |
| 157 | } |
| 158 | |
| 159 | /* call our function again */ |
| 160 | func(); |
| 161 | |
| 162 | /* sleep again */ |
| 163 | msleep(100); |
| 164 | |
| 165 | /* stop the tracing. */ |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 166 | tracing_stop(); |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 167 | ftrace_enabled = 0; |
| 168 | |
| 169 | /* check the trace buffer */ |
| 170 | ret = trace_test_buffer(tr, &count); |
| 171 | trace->reset(tr); |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 172 | tracing_start(); |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 173 | |
| 174 | /* we should only have one item */ |
| 175 | if (!ret && count != 1) { |
Steven Rostedt | 06fa75a | 2008-05-12 21:20:54 +0200 | [diff] [blame] | 176 | printk(KERN_CONT ".. filter failed count=%ld ..", count); |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 177 | ret = -1; |
| 178 | goto out; |
| 179 | } |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 180 | |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 181 | out: |
| 182 | ftrace_enabled = save_ftrace_enabled; |
| 183 | tracer_enabled = save_tracer_enabled; |
| 184 | |
| 185 | /* Enable tracing on all functions again */ |
| 186 | ftrace_set_filter(NULL, 0, 1); |
| 187 | |
| 188 | return ret; |
| 189 | } |
| 190 | #else |
| 191 | # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) |
| 192 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 193 | /* |
| 194 | * Simple verification test of ftrace function tracer. |
| 195 | * Enable ftrace, sleep 1/10 second, and then read the trace |
| 196 | * buffer to see if all is in order. |
| 197 | */ |
| 198 | int |
| 199 | trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) |
| 200 | { |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 201 | int save_ftrace_enabled = ftrace_enabled; |
| 202 | int save_tracer_enabled = tracer_enabled; |
Steven Rostedt | dd0e545 | 2008-08-01 12:26:41 -0400 | [diff] [blame] | 203 | unsigned long count; |
| 204 | int ret; |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 205 | |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 206 | /* make sure msleep has been recorded */ |
| 207 | msleep(1); |
| 208 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 209 | /* start the tracing */ |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 210 | ftrace_enabled = 1; |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 211 | tracer_enabled = 1; |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 212 | |
Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 213 | ret = tracer_init(trace, tr); |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 214 | if (ret) { |
| 215 | warn_failed_init_tracer(trace, ret); |
| 216 | goto out; |
| 217 | } |
| 218 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 219 | /* Sleep for a 1/10 of a second */ |
| 220 | msleep(100); |
| 221 | /* stop the tracing. */ |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 222 | tracing_stop(); |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 223 | ftrace_enabled = 0; |
| 224 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 225 | /* check the trace buffer */ |
| 226 | ret = trace_test_buffer(tr, &count); |
| 227 | trace->reset(tr); |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 228 | tracing_start(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 229 | |
| 230 | if (!ret && !count) { |
| 231 | printk(KERN_CONT ".. no entries found .."); |
| 232 | ret = -1; |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 233 | goto out; |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 234 | } |
| 235 | |
Steven Rostedt | 77a2b37 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 236 | ret = trace_selftest_startup_dynamic_tracing(trace, tr, |
| 237 | DYN_FTRACE_TEST_NAME); |
| 238 | |
| 239 | out: |
| 240 | ftrace_enabled = save_ftrace_enabled; |
| 241 | tracer_enabled = save_tracer_enabled; |
| 242 | |
Steven Rostedt | 4eebcc8 | 2008-05-12 21:20:48 +0200 | [diff] [blame] | 243 | /* kill ftrace totally if we failed */ |
| 244 | if (ret) |
| 245 | ftrace_kill(); |
| 246 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 247 | return ret; |
| 248 | } |
Steven Rostedt | 606576c | 2008-10-06 19:06:12 -0400 | [diff] [blame] | 249 | #endif /* CONFIG_FUNCTION_TRACER */ |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 250 | |
Frederic Weisbecker | 7447dce | 2009-02-07 21:33:57 +0100 | [diff] [blame] | 251 | |
| 252 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 253 | /* |
| 254 | * Pretty much the same than for the function tracer from which the selftest |
| 255 | * has been borrowed. |
| 256 | */ |
| 257 | int |
| 258 | trace_selftest_startup_function_graph(struct tracer *trace, |
| 259 | struct trace_array *tr) |
| 260 | { |
| 261 | int ret; |
| 262 | unsigned long count; |
| 263 | |
| 264 | ret = tracer_init(trace, tr); |
| 265 | if (ret) { |
| 266 | warn_failed_init_tracer(trace, ret); |
| 267 | goto out; |
| 268 | } |
| 269 | |
| 270 | /* Sleep for a 1/10 of a second */ |
| 271 | msleep(100); |
| 272 | |
| 273 | tracing_stop(); |
| 274 | |
| 275 | /* check the trace buffer */ |
| 276 | ret = trace_test_buffer(tr, &count); |
| 277 | |
| 278 | trace->reset(tr); |
| 279 | tracing_start(); |
| 280 | |
| 281 | if (!ret && !count) { |
| 282 | printk(KERN_CONT ".. no entries found .."); |
| 283 | ret = -1; |
| 284 | goto out; |
| 285 | } |
| 286 | |
| 287 | /* Don't test dynamic tracing, the function tracer already did */ |
| 288 | |
| 289 | out: |
| 290 | /* Stop it if we failed */ |
| 291 | if (ret) |
| 292 | ftrace_graph_stop(); |
| 293 | |
| 294 | return ret; |
| 295 | } |
| 296 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 297 | |
| 298 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 299 | #ifdef CONFIG_IRQSOFF_TRACER |
| 300 | int |
| 301 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) |
| 302 | { |
| 303 | unsigned long save_max = tracing_max_latency; |
| 304 | unsigned long count; |
| 305 | int ret; |
| 306 | |
| 307 | /* start the tracing */ |
Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 308 | ret = tracer_init(trace, tr); |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 309 | if (ret) { |
| 310 | warn_failed_init_tracer(trace, ret); |
| 311 | return ret; |
| 312 | } |
| 313 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 314 | /* reset the max latency */ |
| 315 | tracing_max_latency = 0; |
| 316 | /* disable interrupts for a bit */ |
| 317 | local_irq_disable(); |
| 318 | udelay(100); |
| 319 | local_irq_enable(); |
| 320 | /* stop the tracing. */ |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 321 | tracing_stop(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 322 | /* check both trace buffers */ |
| 323 | ret = trace_test_buffer(tr, NULL); |
| 324 | if (!ret) |
| 325 | ret = trace_test_buffer(&max_tr, &count); |
| 326 | trace->reset(tr); |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 327 | tracing_start(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 328 | |
| 329 | if (!ret && !count) { |
| 330 | printk(KERN_CONT ".. no entries found .."); |
| 331 | ret = -1; |
| 332 | } |
| 333 | |
| 334 | tracing_max_latency = save_max; |
| 335 | |
| 336 | return ret; |
| 337 | } |
| 338 | #endif /* CONFIG_IRQSOFF_TRACER */ |
| 339 | |
| 340 | #ifdef CONFIG_PREEMPT_TRACER |
| 341 | int |
| 342 | trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) |
| 343 | { |
| 344 | unsigned long save_max = tracing_max_latency; |
| 345 | unsigned long count; |
| 346 | int ret; |
| 347 | |
Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 348 | /* |
| 349 | * Now that the big kernel lock is no longer preemptable, |
| 350 | * and this is called with the BKL held, it will always |
| 351 | * fail. If preemption is already disabled, simply |
| 352 | * pass the test. When the BKL is removed, or becomes |
| 353 | * preemptible again, we will once again test this, |
| 354 | * so keep it in. |
| 355 | */ |
| 356 | if (preempt_count()) { |
| 357 | printk(KERN_CONT "can not test ... force "); |
| 358 | return 0; |
| 359 | } |
| 360 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 361 | /* start the tracing */ |
Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 362 | ret = tracer_init(trace, tr); |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 363 | if (ret) { |
| 364 | warn_failed_init_tracer(trace, ret); |
| 365 | return ret; |
| 366 | } |
| 367 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 368 | /* reset the max latency */ |
| 369 | tracing_max_latency = 0; |
| 370 | /* disable preemption for a bit */ |
| 371 | preempt_disable(); |
| 372 | udelay(100); |
| 373 | preempt_enable(); |
| 374 | /* stop the tracing. */ |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 375 | tracing_stop(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 376 | /* check both trace buffers */ |
| 377 | ret = trace_test_buffer(tr, NULL); |
| 378 | if (!ret) |
| 379 | ret = trace_test_buffer(&max_tr, &count); |
| 380 | trace->reset(tr); |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 381 | tracing_start(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 382 | |
| 383 | if (!ret && !count) { |
| 384 | printk(KERN_CONT ".. no entries found .."); |
| 385 | ret = -1; |
| 386 | } |
| 387 | |
| 388 | tracing_max_latency = save_max; |
| 389 | |
| 390 | return ret; |
| 391 | } |
| 392 | #endif /* CONFIG_PREEMPT_TRACER */ |
| 393 | |
| 394 | #if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER) |
| 395 | int |
| 396 | trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *tr) |
| 397 | { |
| 398 | unsigned long save_max = tracing_max_latency; |
| 399 | unsigned long count; |
| 400 | int ret; |
| 401 | |
Steven Rostedt | 769c48e | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 402 | /* |
| 403 | * Now that the big kernel lock is no longer preemptable, |
| 404 | * and this is called with the BKL held, it will always |
| 405 | * fail. If preemption is already disabled, simply |
| 406 | * pass the test. When the BKL is removed, or becomes |
| 407 | * preemptible again, we will once again test this, |
| 408 | * so keep it in. |
| 409 | */ |
| 410 | if (preempt_count()) { |
| 411 | printk(KERN_CONT "can not test ... force "); |
| 412 | return 0; |
| 413 | } |
| 414 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 415 | /* start the tracing */ |
Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 416 | ret = tracer_init(trace, tr); |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 417 | if (ret) { |
| 418 | warn_failed_init_tracer(trace, ret); |
| 419 | goto out; |
| 420 | } |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 421 | |
| 422 | /* reset the max latency */ |
| 423 | tracing_max_latency = 0; |
| 424 | |
| 425 | /* disable preemption and interrupts for a bit */ |
| 426 | preempt_disable(); |
| 427 | local_irq_disable(); |
| 428 | udelay(100); |
| 429 | preempt_enable(); |
| 430 | /* reverse the order of preempt vs irqs */ |
| 431 | local_irq_enable(); |
| 432 | |
| 433 | /* stop the tracing. */ |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 434 | tracing_stop(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 435 | /* check both trace buffers */ |
| 436 | ret = trace_test_buffer(tr, NULL); |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 437 | if (ret) { |
| 438 | tracing_start(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 439 | goto out; |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 440 | } |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 441 | |
| 442 | ret = trace_test_buffer(&max_tr, &count); |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 443 | if (ret) { |
| 444 | tracing_start(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 445 | goto out; |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 446 | } |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 447 | |
| 448 | if (!ret && !count) { |
| 449 | printk(KERN_CONT ".. no entries found .."); |
| 450 | ret = -1; |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 451 | tracing_start(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 452 | goto out; |
| 453 | } |
| 454 | |
| 455 | /* do the test by disabling interrupts first this time */ |
| 456 | tracing_max_latency = 0; |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 457 | tracing_start(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 458 | preempt_disable(); |
| 459 | local_irq_disable(); |
| 460 | udelay(100); |
| 461 | preempt_enable(); |
| 462 | /* reverse the order of preempt vs irqs */ |
| 463 | local_irq_enable(); |
| 464 | |
| 465 | /* stop the tracing. */ |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 466 | tracing_stop(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 467 | /* check both trace buffers */ |
| 468 | ret = trace_test_buffer(tr, NULL); |
| 469 | if (ret) |
| 470 | goto out; |
| 471 | |
| 472 | ret = trace_test_buffer(&max_tr, &count); |
| 473 | |
| 474 | if (!ret && !count) { |
| 475 | printk(KERN_CONT ".. no entries found .."); |
| 476 | ret = -1; |
| 477 | goto out; |
| 478 | } |
| 479 | |
| 480 | out: |
| 481 | trace->reset(tr); |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 482 | tracing_start(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 483 | tracing_max_latency = save_max; |
| 484 | |
| 485 | return ret; |
| 486 | } |
| 487 | #endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ |
| 488 | |
Steven Noonan | fb1b6d8 | 2008-09-19 03:06:43 -0700 | [diff] [blame] | 489 | #ifdef CONFIG_NOP_TRACER |
| 490 | int |
| 491 | trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) |
| 492 | { |
| 493 | /* What could possibly go wrong? */ |
| 494 | return 0; |
| 495 | } |
| 496 | #endif |
| 497 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 498 | #ifdef CONFIG_SCHED_TRACER |
| 499 | static int trace_wakeup_test_thread(void *data) |
| 500 | { |
Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 501 | /* Make this a RT thread, doesn't need to be too high */ |
| 502 | struct sched_param param = { .sched_priority = 5 }; |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 503 | struct completion *x = data; |
| 504 | |
Steven Rostedt | 05bd68c | 2008-05-12 21:20:59 +0200 | [diff] [blame] | 505 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 506 | |
| 507 | /* Make it know we have a new prio */ |
| 508 | complete(x); |
| 509 | |
| 510 | /* now go to sleep and let the test wake us up */ |
| 511 | set_current_state(TASK_INTERRUPTIBLE); |
| 512 | schedule(); |
| 513 | |
| 514 | /* we are awake, now wait to disappear */ |
| 515 | while (!kthread_should_stop()) { |
| 516 | /* |
| 517 | * This is an RT task, do short sleeps to let |
| 518 | * others run. |
| 519 | */ |
| 520 | msleep(100); |
| 521 | } |
| 522 | |
| 523 | return 0; |
| 524 | } |
| 525 | |
| 526 | int |
| 527 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) |
| 528 | { |
| 529 | unsigned long save_max = tracing_max_latency; |
| 530 | struct task_struct *p; |
| 531 | struct completion isrt; |
| 532 | unsigned long count; |
| 533 | int ret; |
| 534 | |
| 535 | init_completion(&isrt); |
| 536 | |
| 537 | /* create a high prio thread */ |
| 538 | p = kthread_run(trace_wakeup_test_thread, &isrt, "ftrace-test"); |
Ingo Molnar | c7aafc5 | 2008-05-12 21:20:45 +0200 | [diff] [blame] | 539 | if (IS_ERR(p)) { |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 540 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
| 541 | return -1; |
| 542 | } |
| 543 | |
| 544 | /* make sure the thread is running at an RT prio */ |
| 545 | wait_for_completion(&isrt); |
| 546 | |
| 547 | /* start the tracing */ |
Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 548 | ret = tracer_init(trace, tr); |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 549 | if (ret) { |
| 550 | warn_failed_init_tracer(trace, ret); |
| 551 | return ret; |
| 552 | } |
| 553 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 554 | /* reset the max latency */ |
| 555 | tracing_max_latency = 0; |
| 556 | |
| 557 | /* sleep to let the RT thread sleep too */ |
| 558 | msleep(100); |
| 559 | |
| 560 | /* |
| 561 | * Yes this is slightly racy. It is possible that for some |
| 562 | * strange reason that the RT thread we created, did not |
| 563 | * call schedule for 100ms after doing the completion, |
| 564 | * and we do a wakeup on a task that already is awake. |
| 565 | * But that is extremely unlikely, and the worst thing that |
| 566 | * happens in such a case, is that we disable tracing. |
| 567 | * Honestly, if this race does happen something is horrible |
| 568 | * wrong with the system. |
| 569 | */ |
| 570 | |
| 571 | wake_up_process(p); |
| 572 | |
Steven Rostedt | 5aa60c6 | 2008-09-29 23:02:37 -0400 | [diff] [blame] | 573 | /* give a little time to let the thread wake up */ |
| 574 | msleep(100); |
| 575 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 576 | /* stop the tracing. */ |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 577 | tracing_stop(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 578 | /* check both trace buffers */ |
| 579 | ret = trace_test_buffer(tr, NULL); |
| 580 | if (!ret) |
| 581 | ret = trace_test_buffer(&max_tr, &count); |
| 582 | |
| 583 | |
| 584 | trace->reset(tr); |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 585 | tracing_start(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 586 | |
| 587 | tracing_max_latency = save_max; |
| 588 | |
| 589 | /* kill the thread */ |
| 590 | kthread_stop(p); |
| 591 | |
| 592 | if (!ret && !count) { |
| 593 | printk(KERN_CONT ".. no entries found .."); |
| 594 | ret = -1; |
| 595 | } |
| 596 | |
| 597 | return ret; |
| 598 | } |
| 599 | #endif /* CONFIG_SCHED_TRACER */ |
| 600 | |
| 601 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
| 602 | int |
| 603 | trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr) |
| 604 | { |
| 605 | unsigned long count; |
| 606 | int ret; |
| 607 | |
| 608 | /* start the tracing */ |
Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 609 | ret = tracer_init(trace, tr); |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 610 | if (ret) { |
| 611 | warn_failed_init_tracer(trace, ret); |
| 612 | return ret; |
| 613 | } |
| 614 | |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 615 | /* Sleep for a 1/10 of a second */ |
| 616 | msleep(100); |
| 617 | /* stop the tracing. */ |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 618 | tracing_stop(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 619 | /* check the trace buffer */ |
| 620 | ret = trace_test_buffer(tr, &count); |
| 621 | trace->reset(tr); |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 622 | tracing_start(); |
Steven Rostedt | 60a1177 | 2008-05-12 21:20:44 +0200 | [diff] [blame] | 623 | |
| 624 | if (!ret && !count) { |
| 625 | printk(KERN_CONT ".. no entries found .."); |
| 626 | ret = -1; |
| 627 | } |
| 628 | |
| 629 | return ret; |
| 630 | } |
| 631 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 632 | |
| 633 | #ifdef CONFIG_SYSPROF_TRACER |
| 634 | int |
| 635 | trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) |
| 636 | { |
| 637 | unsigned long count; |
| 638 | int ret; |
| 639 | |
| 640 | /* start the tracing */ |
Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 641 | ret = tracer_init(trace, tr); |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 642 | if (ret) { |
| 643 | warn_failed_init_tracer(trace, ret); |
Wenji Huang | d2ef7c2 | 2009-02-17 01:09:47 -0500 | [diff] [blame] | 644 | return ret; |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 645 | } |
| 646 | |
Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 647 | /* Sleep for a 1/10 of a second */ |
| 648 | msleep(100); |
| 649 | /* stop the tracing. */ |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 650 | tracing_stop(); |
Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 651 | /* check the trace buffer */ |
| 652 | ret = trace_test_buffer(tr, &count); |
| 653 | trace->reset(tr); |
Steven Rostedt | bbf5b1a | 2008-11-07 22:36:02 -0500 | [diff] [blame] | 654 | tracing_start(); |
Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 655 | |
Wenji Huang | d2ef7c2 | 2009-02-17 01:09:47 -0500 | [diff] [blame] | 656 | if (!ret && !count) { |
| 657 | printk(KERN_CONT ".. no entries found .."); |
| 658 | ret = -1; |
| 659 | } |
| 660 | |
Ingo Molnar | a6dd24f | 2008-05-12 21:20:47 +0200 | [diff] [blame] | 661 | return ret; |
| 662 | } |
| 663 | #endif /* CONFIG_SYSPROF_TRACER */ |
Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 664 | |
| 665 | #ifdef CONFIG_BRANCH_TRACER |
| 666 | int |
| 667 | trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) |
| 668 | { |
| 669 | unsigned long count; |
| 670 | int ret; |
| 671 | |
| 672 | /* start the tracing */ |
Arnaldo Carvalho de Melo | b6f11df | 2009-02-05 18:02:00 -0200 | [diff] [blame] | 673 | ret = tracer_init(trace, tr); |
Frederic Weisbecker | 1c80025 | 2008-11-16 05:57:26 +0100 | [diff] [blame] | 674 | if (ret) { |
| 675 | warn_failed_init_tracer(trace, ret); |
| 676 | return ret; |
| 677 | } |
| 678 | |
Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 679 | /* Sleep for a 1/10 of a second */ |
| 680 | msleep(100); |
| 681 | /* stop the tracing. */ |
| 682 | tracing_stop(); |
| 683 | /* check the trace buffer */ |
| 684 | ret = trace_test_buffer(tr, &count); |
| 685 | trace->reset(tr); |
| 686 | tracing_start(); |
| 687 | |
Wenji Huang | d2ef7c2 | 2009-02-17 01:09:47 -0500 | [diff] [blame] | 688 | if (!ret && !count) { |
| 689 | printk(KERN_CONT ".. no entries found .."); |
| 690 | ret = -1; |
| 691 | } |
| 692 | |
Steven Rostedt | 80e5ea4 | 2008-11-12 15:24:24 -0500 | [diff] [blame] | 693 | return ret; |
| 694 | } |
| 695 | #endif /* CONFIG_BRANCH_TRACER */ |