tracing/kmemtrace: normalize the raw tracer event to the unified tracing API
Frederic Weisbecker [Mon, 29 Dec 2008 21:42:23 +0000 (13:42 -0800)]
Impact: new tracer plugin

This patch adapts kmemtrace raw events tracing to the unified tracing API.

To enable and use this tracer, just do the following:

 echo kmemtrace > /debugfs/tracing/current_tracer
 cat /debugfs/tracing/trace

You will have the following output:

 # tracer: kmemtrace
 #
 #
 # ALLOC  TYPE  REQ   GIVEN  FLAGS           POINTER         NODE    CALLER
 # FREE   |      |     |       |              |   |            |        |
 # |

type_id 1 call_site 18446744071565527833 ptr 18446612134395152256
type_id 0 call_site 18446744071565585597 ptr 18446612134405955584 bytes_req 4096 bytes_alloc 4096 gfp_flags 208 node -1
type_id 1 call_site 18446744071565585534 ptr 18446612134405955584
type_id 0 call_site 18446744071565585597 ptr 18446612134405955584 bytes_req 4096 bytes_alloc 4096 gfp_flags 208 node -1
type_id 0 call_site 18446744071565636711 ptr 18446612134345164672 bytes_req 240 bytes_alloc 240 gfp_flags 208 node -1
type_id 1 call_site 18446744071565585534 ptr 18446612134405955584
type_id 0 call_site 18446744071565585597 ptr 18446612134405955584 bytes_req 4096 bytes_alloc 4096 gfp_flags 208 node -1
type_id 0 call_site 18446744071565636711 ptr 18446612134345164912 bytes_req 240 bytes_alloc 240 gfp_flags 208 node -1
type_id 1 call_site 18446744071565585534 ptr 18446612134405955584
type_id 0 call_site 18446744071565585597 ptr 18446612134405955584 bytes_req 4096 bytes_alloc 4096 gfp_flags 208 node -1
type_id 0 call_site 18446744071565636711 ptr 18446612134345165152 bytes_req 240 bytes_alloc 240 gfp_flags 208 node -1
type_id 0 call_site 18446744071566144042 ptr 18446612134346191680 bytes_req 1304 bytes_alloc 1312 gfp_flags 208 node -1
type_id 1 call_site 18446744071565585534 ptr 18446612134405955584
type_id 0 call_site 18446744071565585597 ptr 18446612134405955584 bytes_req 4096 bytes_alloc 4096 gfp_flags 208 node -1
type_id 1 call_site 18446744071565585534 ptr 18446612134405955584

That was to stay backward compatible with the format output produced in
inux/tracepoint.h.

This is the default ouput, but note that I tried something else.

If you change an option:

echo kmem_minimalistic > /debugfs/trace_options

and then cat /debugfs/trace, you will have the following output:

 # tracer: kmemtrace
 #
 #
 # ALLOC  TYPE  REQ   GIVEN  FLAGS           POINTER         NODE    CALLER
 # FREE   |      |     |       |              |   |            |        |
 # |

   -      C                            0xffff88007c088780          file_free_rcu
   +      K   4096   4096   000000d0   0xffff88007cad6000     -1   getname
   -      C                            0xffff88007cad6000          putname
   +      K   4096   4096   000000d0   0xffff88007cad6000     -1   getname
   +      K    240    240   000000d0   0xffff8800790dc780     -1   d_alloc
   -      C                            0xffff88007cad6000          putname
   +      K   4096   4096   000000d0   0xffff88007cad6000     -1   getname
   +      K    240    240   000000d0   0xffff8800790dc870     -1   d_alloc
   -      C                            0xffff88007cad6000          putname
   +      K   4096   4096   000000d0   0xffff88007cad6000     -1   getname
   +      K    240    240   000000d0   0xffff8800790dc960     -1   d_alloc
   +      K   1304   1312   000000d0   0xffff8800791d7340     -1   reiserfs_alloc_inode
   -      C                            0xffff88007cad6000          putname
   +      K   4096   4096   000000d0   0xffff88007cad6000     -1   getname
   -      C                            0xffff88007cad6000          putname
   +      K    992   1000   000000d0   0xffff880079045b58     -1   alloc_inode
   +      K    768   1024   000080d0   0xffff88007c096400     -1   alloc_pipe_info
   +      K    240    240   000000d0   0xffff8800790dca50     -1   d_alloc
   +      K    272    320   000080d0   0xffff88007c088780     -1   get_empty_filp
   +      K    272    320   000080d0   0xffff88007c088000     -1   get_empty_filp

Yeah I shall confess kmem_minimalistic should be: kmem_alternative.

Whatever, I find it more readable but this a personal opinion of course.
We can drop it if you want.

On the ALLOC/FREE column, + means an allocation and - a free.

On the type column, you have K = kmalloc, C = cache, P = page

I would like the flags to be GFP_* strings but that would not be easy to not
break the column with strings....

About the node...it seems to always be -1. I don't know why but that shouldn't
be difficult to find.

I moved linux/tracepoint.h to trace/tracepoint.h as well. I think that would
be more easy to find the tracer headers if they are all in their common
directory.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

12 files changed:
include/linux/slab_def.h
include/linux/slub_def.h
include/trace/kmemtrace.h [moved from include/linux/kmemtrace.h with 72% similarity]
init/main.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/kmemtrace.c [new file with mode: 0644]
kernel/trace/trace.h
lib/Kconfig.debug
mm/kmemtrace.c
mm/slob.c
mm/slub.c

index 7555ce9..455f9af 100644 (file)
@@ -14,7 +14,7 @@
 #include <asm/page.h>          /* kmalloc_sizes.h needs PAGE_SIZE */
 #include <asm/cache.h>         /* kmalloc_sizes.h needs L1_CACHE_BYTES */
 #include <linux/compiler.h>
-#include <linux/kmemtrace.h>
+#include <trace/kmemtrace.h>
 
 /* Size description struct for general caches. */
 struct cache_sizes {
index dc28432..6b657f7 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/gfp.h>
 #include <linux/workqueue.h>
 #include <linux/kobject.h>
-#include <linux/kmemtrace.h>
+#include <trace/kmemtrace.h>
 
 enum stat_item {
        ALLOC_FASTPATH,         /* Allocation from cpu slab */
similarity index 72%
rename from include/linux/kmemtrace.h
rename to include/trace/kmemtrace.h
index 5bea8ea..ad8b785 100644 (file)
@@ -22,28 +22,17 @@ enum kmemtrace_type_id {
 
 extern void kmemtrace_init(void);
 
-static inline void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
+extern void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
                                             unsigned long call_site,
                                             const void *ptr,
                                             size_t bytes_req,
                                             size_t bytes_alloc,
                                             gfp_t gfp_flags,
-                                            int node)
-{
-       trace_mark(kmemtrace_alloc, "type_id %d call_site %lu ptr %lu "
-                  "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d",
-                  type_id, call_site, (unsigned long) ptr,
-                  (unsigned long) bytes_req, (unsigned long) bytes_alloc,
-                  (unsigned long) gfp_flags, node);
-}
+                                            int node);
 
-static inline void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
+extern void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
                                       unsigned long call_site,
-                                      const void *ptr)
-{
-       trace_mark(kmemtrace_free, "type_id %d call_site %lu ptr %lu",
-                  type_id, call_site, (unsigned long) ptr);
-}
+                                      const void *ptr);
 
 #else /* CONFIG_KMEMTRACE */
 
index 9711586..beca7aa 100644 (file)
@@ -70,7 +70,7 @@
 #include <asm/setup.h>
 #include <asm/sections.h>
 #include <asm/cacheflush.h>
-#include <linux/kmemtrace.h>
+#include <trace/kmemtrace.h>
 
 #ifdef CONFIG_X86_LOCAL_APIC
 #include <asm/smp.h>
index e2a4ff6..27fb74b 100644 (file)
@@ -264,6 +264,28 @@ config HW_BRANCH_TRACER
          This tracer records all branches on the system in a circular
          buffer giving access to the last N branches for each cpu.
 
+config KMEMTRACE
+       bool "Trace SLAB allocations"
+       select TRACING
+       depends on RELAY
+       help
+         kmemtrace provides tracing for slab allocator functions, such as
+         kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
+         data is then fed to the userspace application in order to analyse
+         allocation hotspots, internal fragmentation and so on, making it
+         possible to see how well an allocator performs, as well as debug
+         and profile kernel code.
+
+         This requires an userspace application to use. See
+         Documentation/vm/kmemtrace.txt for more information.
+
+         Saying Y will make the kernel somewhat larger and slower. However,
+         if you disable kmemtrace at run-time or boot-time, the performance
+         impact is minimal (depending on the arch the kernel is built for).
+
+         If unsure, say N.
+
+
 config DYNAMIC_FTRACE
        bool "enable/disable ftrace tracepoints dynamically"
        depends on FUNCTION_TRACER
index 349d5a9..513dc86 100644 (file)
@@ -33,5 +33,6 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
 obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
 obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
 obj-$(CONFIG_POWER_TRACER) += trace_power.o
+obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
 
 libftrace-y := ftrace.o
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c
new file mode 100644 (file)
index 0000000..d69cbe3
--- /dev/null
@@ -0,0 +1,343 @@
+/*
+ * Memory allocator tracing
+ *
+ * Copyright (C) 2008 Eduard - Gabriel Munteanu
+ * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi>
+ * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
+ */
+
+#include <linux/dcache.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/seq_file.h>
+#include <trace/kmemtrace.h>
+
+#include "trace.h"
+#include "trace_output.h"
+
+/* Select an alternative, minimalistic output than the original one */
+#define TRACE_KMEM_OPT_MINIMAL 0x1
+
+static struct tracer_opt kmem_opts[] = {
+       /* Default disable the minimalistic output */
+       { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) },
+       { }
+};
+
+static struct tracer_flags kmem_tracer_flags = {
+       .val = 0,
+       .opts = kmem_opts
+};
+
+
+static bool kmem_tracing_enabled __read_mostly;
+static struct trace_array *kmemtrace_array;
+
+static int kmem_trace_init(struct trace_array *tr)
+{
+       int cpu;
+       kmemtrace_array = tr;
+
+       for_each_cpu_mask(cpu, cpu_possible_map)
+               tracing_reset(tr, cpu);
+
+       kmem_tracing_enabled = true;
+
+       return 0;
+}
+
+static void kmem_trace_reset(struct trace_array *tr)
+{
+       kmem_tracing_enabled = false;
+}
+
+static void kmemtrace_headers(struct seq_file *s)
+{
+       /* Don't need headers for the original kmemtrace output */
+       if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL))
+               return;
+
+       seq_printf(s, "#\n");
+       seq_printf(s, "# ALLOC  TYPE  REQ   GIVEN  FLAGS     "
+                       "      POINTER         NODE    CALLER\n");
+       seq_printf(s, "# FREE   |      |     |       |       "
+                       "       |   |            |        |\n");
+       seq_printf(s, "# |\n\n");
+}
+
+/*
+ * The two following functions give the original output from kmemtrace,
+ * or something close to....perhaps they need some missing things
+ */
+static enum print_line_t
+kmemtrace_print_alloc_original(struct trace_iterator *iter,
+                               struct kmemtrace_alloc_entry *entry)
+{
+       struct trace_seq *s = &iter->seq;
+       int ret;
+
+       /* Taken from the old linux/kmemtrace.h */
+       ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu "
+         "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n",
+          entry->type_id, entry->call_site, (unsigned long) entry->ptr,
+          (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc,
+          (unsigned long) entry->gfp_flags, entry->node);
+
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_free_original(struct trace_iterator *iter,
+                               struct kmemtrace_free_entry *entry)
+{
+       struct trace_seq *s = &iter->seq;
+       int ret;
+
+       /* Taken from the old linux/kmemtrace.h */
+       ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n",
+          entry->type_id, entry->call_site, (unsigned long) entry->ptr);
+
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+
+/* The two other following provide a more minimalistic output */
+static enum print_line_t
+kmemtrace_print_alloc_compress(struct trace_iterator *iter,
+                                       struct kmemtrace_alloc_entry *entry)
+{
+       struct trace_seq *s = &iter->seq;
+       int ret;
+
+       /* Alloc entry */
+       ret = trace_seq_printf(s, "  +      ");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Type */
+       switch (entry->type_id) {
+       case KMEMTRACE_TYPE_KMALLOC:
+               ret = trace_seq_printf(s, "K   ");
+               break;
+       case KMEMTRACE_TYPE_CACHE:
+               ret = trace_seq_printf(s, "C   ");
+               break;
+       case KMEMTRACE_TYPE_PAGES:
+               ret = trace_seq_printf(s, "P   ");
+               break;
+       default:
+               ret = trace_seq_printf(s, "?   ");
+       }
+
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Requested */
+       ret = trace_seq_printf(s, "%4d   ", entry->bytes_req);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Allocated */
+       ret = trace_seq_printf(s, "%4d   ", entry->bytes_alloc);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Flags
+        * TODO: would be better to see the name of the GFP flag names
+        */
+       ret = trace_seq_printf(s, "%08x   ", entry->gfp_flags);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Pointer to allocated */
+       ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Node */
+       ret = trace_seq_printf(s, "%4d   ", entry->node);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Call site */
+       ret = seq_print_ip_sym(s, entry->call_site, 0);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (!trace_seq_printf(s, "\n"))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t
+kmemtrace_print_free_compress(struct trace_iterator *iter,
+                               struct kmemtrace_free_entry *entry)
+{
+       struct trace_seq *s = &iter->seq;
+       int ret;
+
+       /* Free entry */
+       ret = trace_seq_printf(s, "  -      ");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Type */
+       switch (entry->type_id) {
+       case KMEMTRACE_TYPE_KMALLOC:
+               ret = trace_seq_printf(s, "K     ");
+               break;
+       case KMEMTRACE_TYPE_CACHE:
+               ret = trace_seq_printf(s, "C     ");
+               break;
+       case KMEMTRACE_TYPE_PAGES:
+               ret = trace_seq_printf(s, "P     ");
+               break;
+       default:
+               ret = trace_seq_printf(s, "?     ");
+       }
+
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Skip requested/allocated/flags */
+       ret = trace_seq_printf(s, "                       ");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Pointer to allocated */
+       ret = trace_seq_printf(s, "0x%tx   ", (ptrdiff_t)entry->ptr);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Skip node */
+       ret = trace_seq_printf(s, "       ");
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       /* Call site */
+       ret = seq_print_ip_sym(s, entry->call_site, 0);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       if (!trace_seq_printf(s, "\n"))
+               return TRACE_TYPE_PARTIAL_LINE;
+
+       return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
+{
+       struct trace_entry *entry = iter->ent;
+
+       switch (entry->type) {
+       case TRACE_KMEM_ALLOC: {
+               struct kmemtrace_alloc_entry *field;
+               trace_assign_type(field, entry);
+               if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
+                       return kmemtrace_print_alloc_compress(iter, field);
+               else
+                       return kmemtrace_print_alloc_original(iter, field);
+       }
+
+       case TRACE_KMEM_FREE: {
+               struct kmemtrace_free_entry *field;
+               trace_assign_type(field, entry);
+               if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)
+                       return kmemtrace_print_free_compress(iter, field);
+               else
+                       return kmemtrace_print_free_original(iter, field);
+       }
+
+       default:
+               return TRACE_TYPE_UNHANDLED;
+       }
+}
+
+/* Trace allocations */
+void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id,
+                            unsigned long call_site,
+                            const void *ptr,
+                            size_t bytes_req,
+                            size_t bytes_alloc,
+                            gfp_t gfp_flags,
+                            int node)
+{
+       struct ring_buffer_event *event;
+       struct kmemtrace_alloc_entry *entry;
+       struct trace_array *tr = kmemtrace_array;
+       unsigned long irq_flags;
+
+       if (!kmem_tracing_enabled)
+               return;
+
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, 0, 0);
+
+       entry->ent.type = TRACE_KMEM_ALLOC;
+       entry->call_site = call_site;
+       entry->ptr = ptr;
+       entry->bytes_req = bytes_req;
+       entry->bytes_alloc = bytes_alloc;
+       entry->gfp_flags = gfp_flags;
+       entry->node     =       node;
+
+       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+       trace_wake_up();
+}
+
+void kmemtrace_mark_free(enum kmemtrace_type_id type_id,
+                      unsigned long call_site,
+                      const void *ptr)
+{
+       struct ring_buffer_event *event;
+       struct kmemtrace_free_entry *entry;
+       struct trace_array *tr = kmemtrace_array;
+       unsigned long irq_flags;
+
+       if (!kmem_tracing_enabled)
+               return;
+
+       event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
+                                        &irq_flags);
+       if (!event)
+               return;
+       entry   = ring_buffer_event_data(event);
+       tracing_generic_entry_update(&entry->ent, 0, 0);
+
+       entry->ent.type = TRACE_KMEM_FREE;
+       entry->type_id  = type_id;
+       entry->call_site = call_site;
+       entry->ptr = ptr;
+
+       ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
+
+       trace_wake_up();
+}
+
+static struct tracer kmem_tracer __read_mostly = {
+       .name           = "kmemtrace",
+       .init           = kmem_trace_init,
+       .reset          = kmem_trace_reset,
+       .print_line     = kmemtrace_print_line,
+       .print_header = kmemtrace_headers,
+       .flags          = &kmem_tracer_flags
+};
+
+static int __init init_kmem_tracer(void)
+{
+       return register_tracer(&kmem_tracer);
+}
+
+device_initcall(init_kmem_tracer);
index cc7a4f8..534505b 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/mmiotrace.h>
 #include <linux/ftrace.h>
 #include <trace/boot.h>
+#include <trace/kmemtrace.h>
 
 enum trace_type {
        __TRACE_FIRST_TYPE = 0,
@@ -29,6 +30,8 @@ enum trace_type {
        TRACE_GRAPH_ENT,
        TRACE_USER_STACK,
        TRACE_HW_BRANCHES,
+       TRACE_KMEM_ALLOC,
+       TRACE_KMEM_FREE,
        TRACE_POWER,
 
        __TRACE_LAST_TYPE
@@ -170,6 +173,24 @@ struct trace_power {
        struct power_trace      state_data;
 };
 
+struct kmemtrace_alloc_entry {
+       struct trace_entry      ent;
+       enum kmemtrace_type_id type_id;
+       unsigned long call_site;
+       const void *ptr;
+       size_t bytes_req;
+       size_t bytes_alloc;
+       gfp_t gfp_flags;
+       int node;
+};
+
+struct kmemtrace_free_entry {
+       struct trace_entry      ent;
+       enum kmemtrace_type_id type_id;
+       unsigned long call_site;
+       const void *ptr;
+};
+
 /*
  * trace_flag_type is an enumeration that holds different
  * states when a trace occurs. These are:
@@ -280,6 +301,10 @@ extern void __ftrace_bad_type(void);
                          TRACE_GRAPH_RET);             \
                IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
                IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
+               IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry,       \
+                         TRACE_KMEM_ALLOC);    \
+               IF_ASSIGN(var, ent, struct kmemtrace_free_entry,        \
+                         TRACE_KMEM_FREE);     \
                __ftrace_bad_type();                                    \
        } while (0)
 
index b5417e2..b0f239e 100644 (file)
@@ -803,26 +803,6 @@ config FIREWIRE_OHCI_REMOTE_DMA
 
          If unsure, say N.
 
-config KMEMTRACE
-       bool "Kernel memory tracer (kmemtrace)"
-       depends on RELAY && DEBUG_FS && MARKERS
-       help
-         kmemtrace provides tracing for slab allocator functions, such as
-         kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
-         data is then fed to the userspace application in order to analyse
-         allocation hotspots, internal fragmentation and so on, making it
-         possible to see how well an allocator performs, as well as debug
-         and profile kernel code.
-
-         This requires an userspace application to use. See
-         Documentation/vm/kmemtrace.txt for more information.
-
-         Saying Y will make the kernel somewhat larger and slower. However,
-         if you disable kmemtrace at run-time or boot-time, the performance
-         impact is minimal (depending on the arch the kernel is built for).
-
-         If unsure, say N.
-
 menuconfig BUILD_DOCSRC
        bool "Build targets in Documentation/ tree"
        depends on HEADERS_CHECK
index 2a70a80..0573b50 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/module.h>
 #include <linux/marker.h>
 #include <linux/gfp.h>
-#include <linux/kmemtrace.h>
+#include <trace/kmemtrace.h>
 
 #define KMEMTRACE_SUBBUF_SIZE          524288
 #define KMEMTRACE_DEF_N_SUBBUFS                20
index 0f1a49f..4d1c0fc 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -65,7 +65,7 @@
 #include <linux/module.h>
 #include <linux/rcupdate.h>
 #include <linux/list.h>
-#include <linux/kmemtrace.h>
+#include <trace/kmemtrace.h>
 #include <asm/atomic.h>
 
 /*
index cc4001f..7bf8cf8 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -16,7 +16,7 @@
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
-#include <linux/kmemtrace.h>
+#include <trace/kmemtrace.h>
 #include <linux/cpu.h>
 #include <linux/cpuset.h>
 #include <linux/mempolicy.h>