kmemtrace: kmemtrace_alloc() must fill type_id
[linux-2.6.git] / kernel / trace / kmemtrace.c
1 /*
2  * Memory allocator tracing
3  *
4  * Copyright (C) 2008 Eduard - Gabriel Munteanu
5  * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
6  * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
7  */
8
9 #include <linux/dcache.h>
10 #include <linux/debugfs.h>
11 #include <linux/fs.h>
12 #include <linux/seq_file.h>
13 #include <linux/tracepoint.h>
14 #include <trace/kmemtrace.h>
15
16 #include "trace.h"
17 #include "trace_output.h"
18
19 /* Select an alternative, minimalistic output than the original one */
20 #define TRACE_KMEM_OPT_MINIMAL  0x1
21
22 static struct tracer_opt kmem_opts[] = {
23         /* Default disable the minimalistic output */
24         { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
25         { }
26 };
27
28 static struct tracer_flags kmem_tracer_flags = {
29         .val = 0,
30         .opts = kmem_opts
31 };
32
33 static struct trace_array *kmemtrace_array;
34
35 /* Trace allocations */
36 static inline void kmemtrace_alloc(enum kmemtrace_type_id type_id,
37                                    unsigned long call_site,
38                                    const void *ptr,
39                                    size_t bytes_req,
40                                    size_t bytes_alloc,
41                                    gfp_t gfp_flags,
42                                    int node)
43 {
44         struct ring_buffer_event *event;
45         struct kmemtrace_alloc_entry *entry;
46         struct trace_array *tr = kmemtrace_array;
47
48         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
49         if (!event)
50                 return;
51         entry   = ring_buffer_event_data(event);
52         tracing_generic_entry_update(&entry->ent, 0, 0);
53
54         entry->ent.type = TRACE_KMEM_ALLOC;
55         entry->type_id = type_id;
56         entry->call_site = call_site;
57         entry->ptr = ptr;
58         entry->bytes_req = bytes_req;
59         entry->bytes_alloc = bytes_alloc;
60         entry->gfp_flags = gfp_flags;
61         entry->node     =       node;
62
63         ring_buffer_unlock_commit(tr->buffer, event);
64
65         trace_wake_up();
66 }
67
68 static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
69                                   unsigned long call_site,
70                                   const void *ptr)
71 {
72         struct ring_buffer_event *event;
73         struct kmemtrace_free_entry *entry;
74         struct trace_array *tr = kmemtrace_array;
75
76         event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry));
77         if (!event)
78                 return;
79         entry   = ring_buffer_event_data(event);
80         tracing_generic_entry_update(&entry->ent, 0, 0);
81
82         entry->ent.type = TRACE_KMEM_FREE;
83         entry->type_id  = type_id;
84         entry->call_site = call_site;
85         entry->ptr = ptr;
86
87         ring_buffer_unlock_commit(tr->buffer, event);
88
89         trace_wake_up();
90 }
91
92 static void kmemtrace_kmalloc(unsigned long call_site,
93                               const void *ptr,
94                               size_t bytes_req,
95                               size_t bytes_alloc,
96                               gfp_t gfp_flags)
97 {
98         kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
99                         bytes_req, bytes_alloc, gfp_flags, -1);
100 }
101
102 static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
103                                        const void *ptr,
104                                        size_t bytes_req,
105                                        size_t bytes_alloc,
106                                        gfp_t gfp_flags)
107 {
108         kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
109                         bytes_req, bytes_alloc, gfp_flags, -1);
110 }
111
112 static void kmemtrace_kmalloc_node(unsigned long call_site,
113                                    const void *ptr,
114                                    size_t bytes_req,
115                                    size_t bytes_alloc,
116                                    gfp_t gfp_flags,
117                                    int node)
118 {
119         kmemtrace_alloc(KMEMTRACE_TYPE_KMALLOC, call_site, ptr,
120                         bytes_req, bytes_alloc, gfp_flags, node);
121 }
122
123 static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
124                                             const void *ptr,
125                                             size_t bytes_req,
126                                             size_t bytes_alloc,
127                                             gfp_t gfp_flags,
128                                             int node)
129 {
130         kmemtrace_alloc(KMEMTRACE_TYPE_CACHE, call_site, ptr,
131                         bytes_req, bytes_alloc, gfp_flags, node);
132 }
133
134 static void kmemtrace_kfree(unsigned long call_site, const void *ptr)
135 {
136         kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
137 }
138
139 static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr)
140 {
141         kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
142 }
143
144 static int kmemtrace_start_probes(void)
145 {
146         int err;
147
148         err = register_trace_kmalloc(kmemtrace_kmalloc);
149         if (err)
150                 return err;
151         err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
152         if (err)
153                 return err;
154         err = register_trace_kmalloc_node(kmemtrace_kmalloc_node);
155         if (err)
156                 return err;
157         err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
158         if (err)
159                 return err;
160         err = register_trace_kfree(kmemtrace_kfree);
161         if (err)
162                 return err;
163         err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
164
165         return err;
166 }
167
168 static void kmemtrace_stop_probes(void)
169 {
170         unregister_trace_kmalloc(kmemtrace_kmalloc);
171         unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc);
172         unregister_trace_kmalloc_node(kmemtrace_kmalloc_node);
173         unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node);
174         unregister_trace_kfree(kmemtrace_kfree);
175         unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free);
176 }
177
178 static int kmem_trace_init(struct trace_array *tr)
179 {
180         int cpu;
181         kmemtrace_array = tr;
182
183         for_each_cpu_mask(cpu, cpu_possible_map)
184                 tracing_reset(tr, cpu);
185
186         kmemtrace_start_probes();
187
188         return 0;
189 }
190
191 static void kmem_trace_reset(struct trace_array *tr)
192 {
193         kmemtrace_stop_probes();
194 }
195
196 static void kmemtrace_headers(struct seq_file *s)
197 {
198         /* Don't need headers for the original kmemtrace output */
199         if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
200                 return;
201
202         seq_printf(s, "#\n");
203         seq_printf(s, "# ALLOC  TYPE  REQ   GIVEN  FLAGS     "
204                         "      POINTER         NODE    CALLER\n");
205         seq_printf(s, "# FREE   |      |     |       |       "
206                         "       |   |            |        |\n");
207         seq_printf(s, "# |\n\n");
208 }
209
210 /*
211  * The two following functions give the original output from kmemtrace,
212  * or something close to....perhaps they need some missing things
213  */
214 static enum print_line_t
215 kmemtrace_print_alloc_original(struct trace_iterator *iter,
216                                 struct kmemtrace_alloc_entry *entry)
217 {
218         struct trace_seq *s = &iter->seq;
219         int ret;
220
221         /* Taken from the old linux/kmemtrace.h */
222         ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu "
223           "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
224            entry->type_id, entry->call_site, (unsigned long) entry->ptr,
225            (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc,
226            (unsigned long) entry->gfp_flags, entry->node);
227
228         if (!ret)
229                 return TRACE_TYPE_PARTIAL_LINE;
230
231         return TRACE_TYPE_HANDLED;
232 }
233
234 static enum print_line_t
235 kmemtrace_print_free_original(struct trace_iterator *iter,
236                                 struct kmemtrace_free_entry *entry)
237 {
238         struct trace_seq *s = &iter->seq;
239         int ret;
240
241         /* Taken from the old linux/kmemtrace.h */
242         ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n",
243            entry->type_id, entry->call_site, (unsigned long) entry->ptr);
244
245         if (!ret)
246                 return TRACE_TYPE_PARTIAL_LINE;
247
248         return TRACE_TYPE_HANDLED;
249 }
250
251
252 /* The two other following provide a more minimalistic output */
253 static enum print_line_t
254 kmemtrace_print_alloc_compress(struct trace_iterator *iter,
255                                         struct kmemtrace_alloc_entry *entry)
256 {
257         struct trace_seq *s = &iter->seq;
258         int ret;
259
260         /* Alloc entry */
261         ret = trace_seq_printf(s, "  +      ");
262         if (!ret)
263                 return TRACE_TYPE_PARTIAL_LINE;
264
265         /* Type */
266         switch (entry->type_id) {
267         case KMEMTRACE_TYPE_KMALLOC:
268                 ret = trace_seq_printf(s, "K   ");
269                 break;
270         case KMEMTRACE_TYPE_CACHE:
271                 ret = trace_seq_printf(s, "C   ");
272                 break;
273         case KMEMTRACE_TYPE_PAGES:
274                 ret = trace_seq_printf(s, "P   ");
275                 break;
276         default:
277                 ret = trace_seq_printf(s, "?   ");
278         }
279
280         if (!ret)
281                 return TRACE_TYPE_PARTIAL_LINE;
282
283         /* Requested */
284         ret = trace_seq_printf(s, "%4zu   ", entry->bytes_req);
285         if (!ret)
286                 return TRACE_TYPE_PARTIAL_LINE;
287
288         /* Allocated */
289         ret = trace_seq_printf(s, "%4zu   ", entry->bytes_alloc);
290         if (!ret)
291                 return TRACE_TYPE_PARTIAL_LINE;
292
293         /* Flags
294          * TODO: would be better to see the name of the GFP flag names
295          */
296         ret = trace_seq_printf(s, "%08x   ", entry->gfp_flags);
297         if (!ret)
298                 return TRACE_TYPE_PARTIAL_LINE;
299
300         /* Pointer to allocated */
301         ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
302         if (!ret)
303                 return TRACE_TYPE_PARTIAL_LINE;
304
305         /* Node */
306         ret = trace_seq_printf(s, "%4d   ", entry->node);
307         if (!ret)
308                 return TRACE_TYPE_PARTIAL_LINE;
309
310         /* Call site */
311         ret = seq_print_ip_sym(s, entry->call_site, 0);
312         if (!ret)
313                 return TRACE_TYPE_PARTIAL_LINE;
314
315         if (!trace_seq_printf(s, "\n"))
316                 return TRACE_TYPE_PARTIAL_LINE;
317
318         return TRACE_TYPE_HANDLED;
319 }
320
321 static enum print_line_t
322 kmemtrace_print_free_compress(struct trace_iterator *iter,
323                                 struct kmemtrace_free_entry *entry)
324 {
325         struct trace_seq *s = &iter->seq;
326         int ret;
327
328         /* Free entry */
329         ret = trace_seq_printf(s, "  -      ");
330         if (!ret)
331                 return TRACE_TYPE_PARTIAL_LINE;
332
333         /* Type */
334         switch (entry->type_id) {
335         case KMEMTRACE_TYPE_KMALLOC:
336                 ret = trace_seq_printf(s, "K     ");
337                 break;
338         case KMEMTRACE_TYPE_CACHE:
339                 ret = trace_seq_printf(s, "C     ");
340                 break;
341         case KMEMTRACE_TYPE_PAGES:
342                 ret = trace_seq_printf(s, "P     ");
343                 break;
344         default:
345                 ret = trace_seq_printf(s, "?     ");
346         }
347
348         if (!ret)
349                 return TRACE_TYPE_PARTIAL_LINE;
350
351         /* Skip requested/allocated/flags */
352         ret = trace_seq_printf(s, "                       ");
353         if (!ret)
354                 return TRACE_TYPE_PARTIAL_LINE;
355
356         /* Pointer to allocated */
357         ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
358         if (!ret)
359                 return TRACE_TYPE_PARTIAL_LINE;
360
361         /* Skip node */
362         ret = trace_seq_printf(s, "       ");
363         if (!ret)
364                 return TRACE_TYPE_PARTIAL_LINE;
365
366         /* Call site */
367         ret = seq_print_ip_sym(s, entry->call_site, 0);
368         if (!ret)
369                 return TRACE_TYPE_PARTIAL_LINE;
370
371         if (!trace_seq_printf(s, "\n"))
372                 return TRACE_TYPE_PARTIAL_LINE;
373
374         return TRACE_TYPE_HANDLED;
375 }
376
377 static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
378 {
379         struct trace_entry *entry = iter->ent;
380
381         switch (entry->type) {
382         case TRACE_KMEM_ALLOC: {
383                 struct kmemtrace_alloc_entry *field;
384                 trace_assign_type(field, entry);
385                 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
386                         return kmemtrace_print_alloc_compress(iter, field);
387                 else
388                         return kmemtrace_print_alloc_original(iter, field);
389         }
390
391         case TRACE_KMEM_FREE: {
392                 struct kmemtrace_free_entry *field;
393                 trace_assign_type(field, entry);
394                 if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
395                         return kmemtrace_print_free_compress(iter, field);
396                 else
397                         return kmemtrace_print_free_original(iter, field);
398         }
399
400         default:
401                 return TRACE_TYPE_UNHANDLED;
402         }
403 }
404
405 static struct tracer kmem_tracer __read_mostly = {
406         .name           = "kmemtrace",
407         .init           = kmem_trace_init,
408         .reset          = kmem_trace_reset,
409         .print_line     = kmemtrace_print_line,
410         .print_header = kmemtrace_headers,
411         .flags          = &kmem_tracer_flags
412 };
413
414 void kmemtrace_init(void)
415 {
416         /* earliest opportunity to start kmem tracing */
417 }
418
419 static int __init init_kmem_tracer(void)
420 {
421         return register_tracer(&kmem_tracer);
422 }
423
424 device_initcall(init_kmem_tracer);