tracing/filters: add run-time field descriptions to TRACE_EVENT_FORMAT events
[linux-2.6.git] / kernel / trace / kmemtrace.c
1 /*
2  * Memory allocator tracing
3  *
4  * Copyright (C) 2008 Eduard - Gabriel Munteanu
5  * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
6  * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
7  */
8
9 #include <linux/tracepoint.h>
10 #include <linux/seq_file.h>
11 #include <linux/debugfs.h>
12 #include <linux/dcache.h>
13 #include <linux/fs.h>
14
15 #include <linux/kmemtrace.h>
16
17 #include "trace_output.h"
18 #include "trace.h"
19
20 /* Select an alternative, minimalistic output than the original one */
21 #define TRACE_KMEM_OPT_MINIMAL  0x1
22
23 static struct tracer_opt kmem_opts[] = {
24         /* Default disable the minimalistic output */
25         { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
26         { }
27 };
28
29 static struct tracer_flags kmem_tracer_flags = {
30         .val                    = 0,
31         .opts                   = kmem_opts
32 };
33
34 static struct trace_array *kmemtrace_array;
35
36 /* Trace allocations */
37 static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
38                                    unsigned long call_site,
39                                    const void *ptr,
40                                    size_t bytes_req,
41                                    size_t bytes_alloc,
42                                    gfp_t gfp_flags,
43                                    int node)
44 {
45         struct ftrace_event_call *call = &event_kmem_alloc;
46         struct trace_array *tr = kmemtrace_array;
47         struct kmemtrace_alloc_entry *entry;
48         struct ring_buffer_event *event;
49
50         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
51         if (!event)
52                 return;
53
54         entry = ring_buffer_event_data(event);
55         tracing_generic_entry_update(&entry->ent, 0, 0);
56
57         entry->ent.type         = TRACE_KMEM_ALLOC;
58         entry->type_id          = type_id;
59         entry->call_site        = call_site;
60         entry->ptr              = ptr;
61         entry->bytes_req        = bytes_req;
62         entry->bytes_alloc      = bytes_alloc;
63         entry->gfp_flags        = gfp_flags;
64         entry->node             = node;
65
66         filter_check_discard(call, entry, event);
67
68         ring_buffer_unlock_commit(tr->buffer, event);
69
70         trace_wake_up();
71 }
72
73 static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
74                                   unsigned long call_site,
75                                   const void *ptr)
76 {
77         struct ftrace_event_call *call = &event_kmem_free;
78         struct trace_array *tr = kmemtrace_array;
79         struct kmemtrace_free_entry *entry;
80         struct ring_buffer_event *event;
81
82         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
83         if (!event)
84                 return;
85         entry   = ring_buffer_event_data(event);
86         tracing_generic_entry_update(&entry->ent, 0, 0);
87
88         entry->ent.type         = TRACE_KMEM_FREE;
89         entry->type_id          = type_id;
90         entry->call_site        = call_site;
91         entry->ptr              = ptr;
92
93         filter_check_discard(call, entry, event);
94
95         ring_buffer_unlock_commit(tr->buffer, event);
96
97         trace_wake_up();
98 }
99
100 static void kmemtrace_kmalloc(unsigned long call_site,
101                               const void *ptr,
102                               size_t bytes_req,
103                               size_t bytes_alloc,
104                               gfp_t gfp_flags)
105 {
106         kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
107                         bytes_req, bytes_alloc, gfp_flags, -1);
108 }
109
110 static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
111                                        const void *ptr,
112                                        size_t bytes_req,
113                                        size_t bytes_alloc,
114                                        gfp_t gfp_flags)
115 {
116         kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
117                         bytes_req, bytes_alloc, gfp_flags, -1);
118 }
119
120 static void kmemtrace_kmalloc_node(unsigned long call_site,
121                                    const void *ptr,
122                                    size_t bytes_req,
123                                    size_t bytes_alloc,
124                                    gfp_t gfp_flags,
125                                    int node)
126 {
127         kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
128                         bytes_req, bytes_alloc, gfp_flags, node);
129 }
130
131 static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
132                                             const void *ptr,
133                                             size_t bytes_req,
134                                             size_t bytes_alloc,
135                                             gfp_t gfp_flags,
136                                             int node)
137 {
138         kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
139                         bytes_req, bytes_alloc, gfp_flags, node);
140 }
141
142 static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
143 {
144         kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
145 }
146
147 static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
148 {
149         kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
150 }
151
152 static int kmemtrace_start_probes(void)
153 {
154         int err;
155
156         err = register_trace_kmalloc(kmemtrace_kmalloc);
157         if (err)
158                 return err;
159         err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
160         if (err)
161                 return err;
162         err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
163         if (err)
164                 return err;
165         err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
166         if (err)
167                 return err;
168         err = register_trace_kfree(kmemtrace_kfree);
169         if (err)
170                 return err;
171         err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
172
173         return err;
174 }
175
176 static void kmemtrace_stop_probes(void)
177 {
178         unregister_trace_kmalloc(kmemtrace_kmalloc);
179         unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
180         unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
181         unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
182         unregister_trace_kfree(kmemtrace_kfree);
183         unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
184 }
185
186 static int kmem_trace_init(struct trace_array *tr)
187 {
188         int cpu;
189         kmemtrace_array = tr;
190
191         for_each_cpu_mask(cpu, cpu_possible_map)
192                 tracing_reset(tr, cpu);
193
194         kmemtrace_start_probes();
195
196         return 0;
197 }
198
199 static void kmem_trace_reset(struct trace_array *tr)
200 {
201         kmemtrace_stop_probes();
202 }
203
204 static void kmemtrace_headers(struct seq_file *s)
205 {
206         /* Don't need headers for the original kmemtrace output */
207         if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
208                 return;
209
210         seq_printf(s, "#\n");
211         seq_printf(s, "# ALLOC  TYPE  REQ   GIVEN  FLAGS     "
212                         "      POINTER         NODE    CALLER\n");
213         seq_printf(s, "# FREE   |      |     |       |       "
214                         "       |   |            |        |\n");
215         seq_printf(s, "# |\n\n");
216 }
217
218 /*
219  * The following functions give the original output from kmemtrace,
220  * plus the origin CPU, since reordering occurs in-kernel now.
221  */
222
223 #define KMEMTRACE_USER_ALLOC    0
224 #define KMEMTRACE_USER_FREE     1
225
226 struct kmemtrace_user_event {
227         u8                      event_id;
228         u8                      type_id;
229         u16                     event_size;
230         u32                     cpu;
231         u64                     timestamp;
232         unsigned long           call_site;
233         unsigned long           ptr;
234 };
235
236 struct kmemtrace_user_event_alloc {
237         size_t                  bytes_req;
238         size_t                  bytes_alloc;
239         unsigned                gfp_flags;
240         int                     node;
241 };
242
243 static enum print_line_t
244 kmemtrace_print_alloc_user(struct trace_iterator *iter,
245                            struct kmemtrace_alloc_entry *entry)
246 {
247         struct kmemtrace_user_event_alloc *ev_alloc;
248         struct trace_seq *s = &iter->seq;
249         struct kmemtrace_user_event *ev;
250
251         ev = trace_seq_reserve(s, sizeof(*ev));
252         if (!ev)
253                 return TRACE_TYPE_PARTIAL_LINE;
254
255         ev->event_id            = KMEMTRACE_USER_ALLOC;
256         ev->type_id             = entry->type_id;
257         ev->event_size          = sizeof(*ev) + sizeof(*ev_alloc);
258         ev->cpu                 = iter->cpu;
259         ev->timestamp           = iter->ts;
260         ev->call_site           = entry->call_site;
261         ev->ptr                 = (unsigned long)entry->ptr;
262
263         ev_alloc = trace_seq_reserve(s, sizeof(*ev_alloc));
264         if (!ev_alloc)
265                 return TRACE_TYPE_PARTIAL_LINE;
266
267         ev_alloc->bytes_req     = entry->bytes_req;
268         ev_alloc->bytes_alloc   = entry->bytes_alloc;
269         ev_alloc->gfp_flags     = entry->gfp_flags;
270         ev_alloc->node          = entry->node;
271
272         return TRACE_TYPE_HANDLED;
273 }
274
275 static enum print_line_t
276 kmemtrace_print_free_user(struct trace_iterator *iter,
277                           struct kmemtrace_free_entry *entry)
278 {
279         struct trace_seq *s = &iter->seq;
280         struct kmemtrace_user_event *ev;
281
282         ev = trace_seq_reserve(s, sizeof(*ev));
283         if (!ev)
284                 return TRACE_TYPE_PARTIAL_LINE;
285
286         ev->event_id            = KMEMTRACE_USER_FREE;
287         ev->type_id             = entry->type_id;
288         ev->event_size          = sizeof(*ev);
289         ev->cpu                 = iter->cpu;
290         ev->timestamp           = iter->ts;
291         ev->call_site           = entry->call_site;
292         ev->ptr                 = (unsigned long)entry->ptr;
293
294         return TRACE_TYPE_HANDLED;
295 }
296
297 /* The two other following provide a more minimalistic output */
298 static enum print_line_t
299 kmemtrace_print_alloc_compress(struct trace_iterator *iter,
300                                         struct kmemtrace_alloc_entry *entry)
301 {
302         struct trace_seq *s = &iter->seq;
303         int ret;
304
305         /* Alloc entry */
306         ret = trace_seq_printf(s, "  +      ");
307         if (!ret)
308                 return TRACE_TYPE_PARTIAL_LINE;
309
310         /* Type */
311         switch (entry->type_id) {
312         case KMEMTRACE_TYPE_KMALLOC:
313                 ret = trace_seq_printf(s, "K   ");
314                 break;
315         case KMEMTRACE_TYPE_CACHE:
316                 ret = trace_seq_printf(s, "C   ");
317                 break;
318         case KMEMTRACE_TYPE_PAGES:
319                 ret = trace_seq_printf(s, "P   ");
320                 break;
321         default:
322                 ret = trace_seq_printf(s, "?   ");
323         }
324
325         if (!ret)
326                 return TRACE_TYPE_PARTIAL_LINE;
327
328         /* Requested */
329         ret = trace_seq_printf(s, "%4zu   ", entry->bytes_req);
330         if (!ret)
331                 return TRACE_TYPE_PARTIAL_LINE;
332
333         /* Allocated */
334         ret = trace_seq_printf(s, "%4zu   ", entry->bytes_alloc);
335         if (!ret)
336                 return TRACE_TYPE_PARTIAL_LINE;
337
338         /* Flags
339          * TODO: would be better to see the name of the GFP flag names
340          */
341         ret = trace_seq_printf(s, "%08x   ", entry->gfp_flags);
342         if (!ret)
343                 return TRACE_TYPE_PARTIAL_LINE;
344
345         /* Pointer to allocated */
346         ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
347         if (!ret)
348                 return TRACE_TYPE_PARTIAL_LINE;
349
350         /* Node */
351         ret = trace_seq_printf(s, "%4d   ", entry->node);
352         if (!ret)
353                 return TRACE_TYPE_PARTIAL_LINE;
354
355         /* Call site */
356         ret = seq_print_ip_sym(s, entry->call_site, 0);
357         if (!ret)
358                 return TRACE_TYPE_PARTIAL_LINE;
359
360         if (!trace_seq_printf(s, "\n"))
361                 return TRACE_TYPE_PARTIAL_LINE;
362
363         return TRACE_TYPE_HANDLED;
364 }
365
366 static enum print_line_t
367 kmemtrace_print_free_compress(struct trace_iterator *iter,
368                               struct kmemtrace_free_entry *entry)
369 {
370         struct trace_seq *s = &iter->seq;
371         int ret;
372
373         /* Free entry */
374         ret = trace_seq_printf(s, "  -      ");
375         if (!ret)
376                 return TRACE_TYPE_PARTIAL_LINE;
377
378         /* Type */
379         switch (entry->type_id) {
380         case KMEMTRACE_TYPE_KMALLOC:
381                 ret = trace_seq_printf(s, "K     ");
382                 break;
383         case KMEMTRACE_TYPE_CACHE:
384                 ret = trace_seq_printf(s, "C     ");
385                 break;
386         case KMEMTRACE_TYPE_PAGES:
387                 ret = trace_seq_printf(s, "P     ");
388                 break;
389         default:
390                 ret = trace_seq_printf(s, "?     ");
391         }
392
393         if (!ret)
394                 return TRACE_TYPE_PARTIAL_LINE;
395
396         /* Skip requested/allocated/flags */
397         ret = trace_seq_printf(s, "                       ");
398         if (!ret)
399                 return TRACE_TYPE_PARTIAL_LINE;
400
401         /* Pointer to allocated */
402         ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
403         if (!ret)
404                 return TRACE_TYPE_PARTIAL_LINE;
405
406         /* Skip node */
407         ret = trace_seq_printf(s, "       ");
408         if (!ret)
409                 return TRACE_TYPE_PARTIAL_LINE;
410
411         /* Call site */
412         ret = seq_print_ip_sym(s, entry->call_site, 0);
413         if (!ret)
414                 return TRACE_TYPE_PARTIAL_LINE;
415
416         if (!trace_seq_printf(s, "\n"))
417                 return TRACE_TYPE_PARTIAL_LINE;
418
419         return TRACE_TYPE_HANDLED;
420 }
421
422 static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
423 {
424         struct trace_entry *entry = iter->ent;
425
426         switch (entry->type) {
427         case TRACE_KMEM_ALLOC: {
428                 struct kmemtrace_alloc_entry *field;
429
430                 trace_assign_type(field, entry);
431                 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
432                         return kmemtrace_print_alloc_compress(iter, field);
433                 else
434                         return kmemtrace_print_alloc_user(iter, field);
435         }
436
437         case TRACE_KMEM_FREE: {
438                 struct kmemtrace_free_entry *field;
439
440                 trace_assign_type(field, entry);
441                 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
442                         return kmemtrace_print_free_compress(iter, field);
443                 else
444                         return kmemtrace_print_free_user(iter, field);
445         }
446
447         default:
448                 return TRACE_TYPE_UNHANDLED;
449         }
450 }
451
452 static struct tracer kmem_tracer __read_mostly = {
453         .name                   = "kmemtrace",
454         .init                   = kmem_trace_init,
455         .reset                  = kmem_trace_reset,
456         .print_line             = kmemtrace_print_line,
457         .print_header           = kmemtrace_headers,
458         .flags                  = &kmem_tracer_flags
459 };
460
461 void kmemtrace_init(void)
462 {
463         /* earliest opportunity to start kmem tracing */
464 }
465
466 static int __init init_kmem_tracer(void)
467 {
468         return register_tracer(&kmem_tracer);
469 }
470 device_initcall(init_kmem_tracer);