Merge branch 'linus' into tracing/mmiotrace
Ingo Molnar [Mon, 7 Jul 2008 06:07:35 +0000 (08:07 +0200)]
85 files changed:
Documentation/tracers/mmiotrace.txt [new file with mode: 0644]
Makefile
arch/powerpc/Kconfig
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/ftrace.c [new file with mode: 0644]
arch/powerpc/kernel/io.c
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/platforms/powermac/Makefile
arch/sparc64/Kconfig
arch/sparc64/Kconfig.debug
arch/sparc64/kernel/Makefile
arch/sparc64/kernel/ftrace.c [new file with mode: 0644]
arch/sparc64/lib/mcount.S
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/kernel/Makefile
arch/x86/kernel/alternative.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/ftrace.c [new file with mode: 0644]
arch/x86/kernel/i386_ksyms_32.c
arch/x86/kernel/machine_kexec_32.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/vsyscall_64.c
arch/x86/kernel/x8664_ksyms_64.c
arch/x86/lib/Makefile
arch/x86/lib/thunk_32.S [new file with mode: 0644]
arch/x86/lib/thunk_64.S
arch/x86/mm/Makefile
arch/x86/mm/fault.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/ioremap.c
arch/x86/mm/kmmio.c [new file with mode: 0644]
arch/x86/mm/mmio-mod.c [new file with mode: 0644]
arch/x86/mm/pageattr.c
arch/x86/mm/pf_in.c [new file with mode: 0644]
arch/x86/mm/pf_in.h [new file with mode: 0644]
arch/x86/mm/testmmiotrace.c [new file with mode: 0644]
arch/x86/vdso/vclock_gettime.c
arch/x86/vdso/vgetcpu.c
include/asm-powerpc/hw_irq.h
include/asm-x86/alternative.h
include/asm-x86/irqflags.h
include/asm-x86/vsyscall.h
include/linux/ftrace.h [new file with mode: 0644]
include/linux/irqflags.h
include/linux/linkage.h
include/linux/marker.h
include/linux/mmiotrace.h [new file with mode: 0644]
include/linux/preempt.h
include/linux/sched.h
include/linux/writeback.h
kernel/Makefile
kernel/fork.c
kernel/lockdep.c
kernel/marker.c
kernel/printk.c
kernel/sched.c
kernel/semaphore.c
kernel/spinlock.c
kernel/sysctl.c
kernel/trace/Kconfig [new file with mode: 0644]
kernel/trace/Makefile [new file with mode: 0644]
kernel/trace/ftrace.c [new file with mode: 0644]
kernel/trace/trace.c [new file with mode: 0644]
kernel/trace/trace.h [new file with mode: 0644]
kernel/trace/trace_functions.c [new file with mode: 0644]
kernel/trace/trace_irqsoff.c [new file with mode: 0644]
kernel/trace/trace_mmiotrace.c [new file with mode: 0644]
kernel/trace/trace_sched_switch.c [new file with mode: 0644]
kernel/trace/trace_sched_wakeup.c [new file with mode: 0644]
kernel/trace/trace_selftest.c [new file with mode: 0644]
kernel/trace/trace_selftest_dynamic.c [new file with mode: 0644]
lib/Kconfig.debug
lib/Makefile
lib/smp_processor_id.c
mm/page-writeback.c
scripts/Makefile.lib

diff --git a/Documentation/tracers/mmiotrace.txt b/Documentation/tracers/mmiotrace.txt
new file mode 100644 (file)
index 0000000..a4afb56
--- /dev/null
@@ -0,0 +1,164 @@
+               In-kernel memory-mapped I/O tracing
+
+
+Home page and links to optional user space tools:
+
+       http://nouveau.freedesktop.org/wiki/MmioTrace
+
+MMIO tracing was originally developed by Intel around 2003 for their Fault
+Injection Test Harness. In Dec 2006 - Jan 2007, using the code from Intel,
+Jeff Muizelaar created a tool for tracing MMIO accesses with the Nouveau
+project in mind. Since then many people have contributed.
+
+Mmiotrace was built for reverse engineering any memory-mapped IO device with
+the Nouveau project as the first real user. Only x86 and x86_64 architectures
+are supported.
+
+Out-of-tree mmiotrace was originally modified for mainline inclusion and
+ftrace framework by Pekka Paalanen <pq@iki.fi>.
+
+
+Preparation
+-----------
+
+Mmiotrace feature is compiled in by the CONFIG_MMIOTRACE option. Tracing is
+disabled by default, so it is safe to have this set to yes. SMP systems are
+supported, but tracing is unreliable and may miss events if more than one CPU
+is on-line, therefore mmiotrace takes all but one CPU off-line during run-time
+activation. You can re-enable CPUs by hand, but you have been warned, there
+is no way to automatically detect if you are losing events due to CPUs racing.
+
+
+Usage Quick Reference
+---------------------
+
+$ mount -t debugfs debugfs /debug
+$ echo mmiotrace > /debug/tracing/current_tracer
+$ cat /debug/tracing/trace_pipe > mydump.txt &
+Start X or whatever.
+$ echo "X is up" > /debug/tracing/marker
+$ echo none > /debug/tracing/current_tracer
+Check for lost events.
+
+
+Usage
+-----
+
+Make sure debugfs is mounted to /debug. If not, (requires root privileges)
+$ mount -t debugfs debugfs /debug
+
+Check that the driver you are about to trace is not loaded.
+
+Activate mmiotrace (requires root privileges):
+$ echo mmiotrace > /debug/tracing/current_tracer
+
+Start storing the trace:
+$ cat /debug/tracing/trace_pipe > mydump.txt &
+The 'cat' process should stay running (sleeping) in the background.
+
+Load the driver you want to trace and use it. Mmiotrace will only catch MMIO
+accesses to areas that are ioremapped while mmiotrace is active.
+
+[Unimplemented feature:]
+During tracing you can place comments (markers) into the trace by
+$ echo "X is up" > /debug/tracing/marker
+This makes it easier to see which part of the (huge) trace corresponds to
+which action. It is recommended to place descriptive markers about what you
+do.
+
+Shut down mmiotrace (requires root privileges):
+$ echo none > /debug/tracing/current_tracer
+The 'cat' process exits. If it does not, kill it by issuing 'fg' command and
+pressing ctrl+c.
+
+Check that mmiotrace did not lose events due to a buffer filling up. Either
+$ grep -i lost mydump.txt
+which tells you exactly how many events were lost, or use
+$ dmesg
+to view your kernel log and look for "mmiotrace has lost events" warning. If
+events were lost, the trace is incomplete. You should enlarge the buffers and
+try again. Buffers are enlarged by first seeing how large the current buffers
+are:
+$ cat /debug/tracing/trace_entries
+gives you a number. Approximately double this number and write it back, for
+instance:
+$ echo 128000 > /debug/tracing/trace_entries
+Then start again from the top.
+
+If you are doing a trace for a driver project, e.g. Nouveau, you should also
+do the following before sending your results:
+$ lspci -vvv > lspci.txt
+$ dmesg > dmesg.txt
+$ tar zcf pciid-nick-mmiotrace.tar.gz mydump.txt lspci.txt dmesg.txt
+and then send the .tar.gz file. The trace compresses considerably. Replace
+"pciid" and "nick" with the PCI ID or model name of your piece of hardware
+under investigation and your nick name.
+
+
+How Mmiotrace Works
+-------------------
+
+Access to hardware IO-memory is gained by mapping addresses from PCI bus by
+calling one of the ioremap_*() functions. Mmiotrace is hooked into the
+__ioremap() function and gets called whenever a mapping is created. Mapping is
+an event that is recorded into the trace log. Note, that ISA range mappings
+are not caught, since the mapping always exists and is returned directly.
+
+MMIO accesses are recorded via page faults. Just before __ioremap() returns,
+the mapped pages are marked as not present. Any access to the pages causes a
+fault. The page fault handler calls mmiotrace to handle the fault. Mmiotrace
+marks the page present, sets TF flag to achieve single stepping and exits the
+fault handler. The instruction that faulted is executed and debug trap is
+entered. Here mmiotrace again marks the page as not present. The instruction
+is decoded to get the type of operation (read/write), data width and the value
+read or written. These are stored to the trace log.
+
+Setting the page present in the page fault handler has a race condition on SMP
+machines. During the single stepping other CPUs may run freely on that page
+and events can be missed without a notice. Re-enabling other CPUs during
+tracing is discouraged.
+
+
+Trace Log Format
+----------------
+
+The raw log is text and easily filtered with e.g. grep and awk. One record is
+one line in the log. A record starts with a keyword, followed by keyword
+dependant arguments. Arguments are separated by a space, or continue until the
+end of line. The format for version 20070824 is as follows:
+
+Explanation    Keyword Space separated arguments
+---------------------------------------------------------------------------
+
+read event     R       width, timestamp, map id, physical, value, PC, PID
+write event    W       width, timestamp, map id, physical, value, PC, PID
+ioremap event  MAP     timestamp, map id, physical, virtual, length, PC, PID
+iounmap event  UNMAP   timestamp, map id, PC, PID
+marker         MARK    timestamp, text
+version                VERSION the string "20070824"
+info for reader        LSPCI   one line from lspci -v
+PCI address map        PCIDEV  space separated /proc/bus/pci/devices data
+unk. opcode    UNKNOWN timestamp, map id, physical, data, PC, PID
+
+Timestamp is in seconds with decimals. Physical is a PCI bus address, virtual
+is a kernel virtual address. Width is the data width in bytes and value is the
+data value. Map id is an arbitrary id number identifying the mapping that was
+used in an operation. PC is the program counter and PID is process id. PC is
+zero if it is not recorded. PID is always zero as tracing MMIO accesses
+originating in user space memory is not yet supported.
+
+For instance, the following awk filter will pass all 32-bit writes that target
+physical addresses in the range [0xfb73ce40, 0xfb800000[
+
+$ awk '/W 4 / { adr=strtonum($5); if (adr >= 0xfb73ce40 &&
+adr < 0xfb800000) print; }'
+
+
+Tools for Developers
+--------------------
+
+The user space tools include utilities for:
+- replacing numeric addresses and values with hardware register names
+- replaying MMIO logs, i.e., re-executing the recorded writes
+
+
index 6315424..8e51995 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -528,6 +528,10 @@ KBUILD_CFLAGS      += -g
 KBUILD_AFLAGS  += -gdwarf-2
 endif
 
+ifdef CONFIG_FTRACE
+KBUILD_CFLAGS  += -pg
+endif
+
 # We trigger additional mismatches with less inlining
 ifdef CONFIG_DEBUG_SECTION_MISMATCH
 KBUILD_CFLAGS += $(call cc-option, -fno-inline-functions-called-once)
index 3934e26..a5e9912 100644 (file)
@@ -105,11 +105,13 @@ config ARCH_NO_VIRT_TO_BUS
 config PPC
        bool
        default y
+       select HAVE_DYNAMIC_FTRACE
+       select HAVE_FTRACE
        select HAVE_IDE
-       select HAVE_OPROFILE
        select HAVE_KPROBES
        select HAVE_KRETPROBES
        select HAVE_LMB
+       select HAVE_OPROFILE
 
 config EARLY_PRINTK
        bool
index 2346d27..f3f5e26 100644 (file)
@@ -12,6 +12,18 @@ CFLAGS_prom_init.o      += -fPIC
 CFLAGS_btext.o         += -fPIC
 endif
 
+ifdef CONFIG_FTRACE
+# Do not trace early boot code
+CFLAGS_REMOVE_cputable.o = -pg
+CFLAGS_REMOVE_prom_init.o = -pg
+
+ifdef CONFIG_DYNAMIC_FTRACE
+# dynamic ftrace setup.
+CFLAGS_REMOVE_ftrace.o = -pg
+endif
+
+endif
+
 obj-y                          := cputable.o ptrace.o syscalls.o \
                                   irq.o align.o signal_32.o pmc.o vdso.o \
                                   init_task.o process.o systbl.o idle.o \
@@ -78,6 +90,8 @@ obj-$(CONFIG_KEXEC)           += machine_kexec.o crash.o \
 obj-$(CONFIG_AUDIT)            += audit.o
 obj64-$(CONFIG_AUDIT)          += compat_audit.o
 
+obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
+
 obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
 
 ifneq ($(CONFIG_PPC_INDIRECT_IO),y)
index 0c8614d..0e62218 100644 (file)
@@ -1035,3 +1035,133 @@ machine_check_in_rtas:
        /* XXX load up BATs and panic */
 
 #endif /* CONFIG_PPC_RTAS */
+
+#ifdef CONFIG_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+       stwu    r1,-48(r1)
+       stw     r3, 12(r1)
+       stw     r4, 16(r1)
+       stw     r5, 20(r1)
+       stw     r6, 24(r1)
+       mflr    r3
+       stw     r7, 28(r1)
+       mfcr    r5
+       stw     r8, 32(r1)
+       stw     r9, 36(r1)
+       stw     r10,40(r1)
+       stw     r3, 44(r1)
+       stw     r5, 8(r1)
+       .globl mcount_call
+mcount_call:
+       bl      ftrace_stub
+       nop
+       lwz     r6, 8(r1)
+       lwz     r0, 44(r1)
+       lwz     r3, 12(r1)
+       mtctr   r0
+       lwz     r4, 16(r1)
+       mtcr    r6
+       lwz     r5, 20(r1)
+       lwz     r6, 24(r1)
+       lwz     r0, 52(r1)
+       lwz     r7, 28(r1)
+       lwz     r8, 32(r1)
+       mtlr    r0
+       lwz     r9, 36(r1)
+       lwz     r10,40(r1)
+       addi    r1, r1, 48
+       bctr
+
+_GLOBAL(ftrace_caller)
+       /* Based off of objdump optput from glibc */
+       stwu    r1,-48(r1)
+       stw     r3, 12(r1)
+       stw     r4, 16(r1)
+       stw     r5, 20(r1)
+       stw     r6, 24(r1)
+       mflr    r3
+       lwz     r4, 52(r1)
+       mfcr    r5
+       stw     r7, 28(r1)
+       stw     r8, 32(r1)
+       stw     r9, 36(r1)
+       stw     r10,40(r1)
+       stw     r3, 44(r1)
+       stw     r5, 8(r1)
+.globl ftrace_call
+ftrace_call:
+       bl      ftrace_stub
+       nop
+       lwz     r6, 8(r1)
+       lwz     r0, 44(r1)
+       lwz     r3, 12(r1)
+       mtctr   r0
+       lwz     r4, 16(r1)
+       mtcr    r6
+       lwz     r5, 20(r1)
+       lwz     r6, 24(r1)
+       lwz     r0, 52(r1)
+       lwz     r7, 28(r1)
+       lwz     r8, 32(r1)
+       mtlr    r0
+       lwz     r9, 36(r1)
+       lwz     r10,40(r1)
+       addi    r1, r1, 48
+       bctr
+#else
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+       stwu    r1,-48(r1)
+       stw     r3, 12(r1)
+       stw     r4, 16(r1)
+       stw     r5, 20(r1)
+       stw     r6, 24(r1)
+       mflr    r3
+       lwz     r4, 52(r1)
+       mfcr    r5
+       stw     r7, 28(r1)
+       stw     r8, 32(r1)
+       stw     r9, 36(r1)
+       stw     r10,40(r1)
+       stw     r3, 44(r1)
+       stw     r5, 8(r1)
+
+       LOAD_REG_ADDR(r5, ftrace_trace_function)
+#if 0
+       mtctr   r3
+       mr      r1, r5
+       bctrl
+#endif
+       lwz     r5,0(r5)
+#if 1
+       mtctr   r5
+       bctrl
+#else
+       bl      ftrace_stub
+#endif
+       nop
+
+       lwz     r6, 8(r1)
+       lwz     r0, 44(r1)
+       lwz     r3, 12(r1)
+       mtctr   r0
+       lwz     r4, 16(r1)
+       mtcr    r6
+       lwz     r5, 20(r1)
+       lwz     r6, 24(r1)
+       lwz     r0, 52(r1)
+       lwz     r7, 28(r1)
+       lwz     r8, 32(r1)
+       mtlr    r0
+       lwz     r9, 36(r1)
+       lwz     r10,40(r1)
+       addi    r1, r1, 48
+       bctr
+#endif
+
+_GLOBAL(ftrace_stub)
+       blr
+
+#endif /* CONFIG_MCOUNT */
index c0db5b7..2c4d9e0 100644 (file)
@@ -870,3 +870,65 @@ _GLOBAL(enter_prom)
        ld      r0,16(r1)
        mtlr    r0
         blr
+
+#ifdef CONFIG_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE
+_GLOBAL(mcount)
+_GLOBAL(_mcount)
+       /* Taken from output of objdump from lib64/glibc */
+       mflr    r3
+       stdu    r1, -112(r1)
+       std     r3, 128(r1)
+       .globl mcount_call
+mcount_call:
+       bl      ftrace_stub
+       nop
+       ld      r0, 128(r1)
+       mtlr    r0
+       addi    r1, r1, 112
+       blr
+
+_GLOBAL(ftrace_caller)
+       /* Taken from output of objdump from lib64/glibc */
+       mflr    r3
+       ld      r11, 0(r1)
+       stdu    r1, -112(r1)
+       std     r3, 128(r1)
+       ld      r4, 16(r11)
+.globl ftrace_call
+ftrace_call:
+       bl      ftrace_stub
+       nop
+       ld      r0, 128(r1)
+       mtlr    r0
+       addi    r1, r1, 112
+_GLOBAL(ftrace_stub)
+       blr
+#else
+_GLOBAL(mcount)
+       blr
+
+_GLOBAL(_mcount)
+       /* Taken from output of objdump from lib64/glibc */
+       mflr    r3
+       ld      r11, 0(r1)
+       stdu    r1, -112(r1)
+       std     r3, 128(r1)
+       ld      r4, 16(r11)
+
+
+       LOAD_REG_ADDR(r5,ftrace_trace_function)
+       ld      r5,0(r5)
+       ld      r5,0(r5)
+       mtctr   r5
+       bctrl
+
+       nop
+       ld      r0, 128(r1)
+       mtlr    r0
+       addi    r1, r1, 112
+_GLOBAL(ftrace_stub)
+       blr
+
+#endif
+#endif
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..5a4993f
--- /dev/null
@@ -0,0 +1,165 @@
+/*
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <linux/hardirq.h>
+#include <linux/ftrace.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include <asm/cacheflush.h>
+
+#define CALL_BACK              4
+
+static unsigned int ftrace_nop = 0x60000000;
+
+#ifdef CONFIG_PPC32
+# define GET_ADDR(addr) addr
+#else
+/* PowerPC64's functions are data that points to the functions */
+# define GET_ADDR(addr) *(unsigned long *)addr
+#endif
+
+notrace int ftrace_ip_converted(unsigned long ip)
+{
+       unsigned int save;
+
+       ip -= CALL_BACK;
+       save = *(unsigned int *)ip;
+
+       return save == ftrace_nop;
+}
+
+static unsigned int notrace ftrace_calc_offset(long ip, long addr)
+{
+       return (int)((addr + CALL_BACK) - ip);
+}
+
+notrace unsigned char *ftrace_nop_replace(void)
+{
+       return (char *)&ftrace_nop;
+}
+
+notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+       static unsigned int op;
+
+       addr = GET_ADDR(addr);
+
+       /* Set to "bl addr" */
+       op = 0x48000001 | (ftrace_calc_offset(ip, addr) & 0x03fffffe);
+
+       /*
+        * No locking needed, this must be called via kstop_machine
+        * which in essence is like running on a uniprocessor machine.
+        */
+       return (unsigned char *)&op;
+}
+
+#ifdef CONFIG_PPC64
+# define _ASM_ALIGN    " .align 3 "
+# define _ASM_PTR      " .llong "
+#else
+# define _ASM_ALIGN    " .align 2 "
+# define _ASM_PTR      " .long "
+#endif
+
+notrace int
+ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+                  unsigned char *new_code)
+{
+       unsigned replaced;
+       unsigned old = *(unsigned *)old_code;
+       unsigned new = *(unsigned *)new_code;
+       int faulted = 0;
+
+       /* move the IP back to the start of the call */
+       ip -= CALL_BACK;
+
+       /*
+        * Note: Due to modules and __init, code can
+        *  disappear and change, we need to protect against faulting
+        *  as well as code changing.
+        *
+        * No real locking needed, this code is run through
+        * kstop_machine.
+        */
+       asm volatile (
+               "1: lwz         %1, 0(%2)\n"
+               "   cmpw        %1, %5\n"
+               "   bne         2f\n"
+               "   stwu        %3, 0(%2)\n"
+               "2:\n"
+               ".section .fixup, \"ax\"\n"
+               "3:     li %0, 1\n"
+               "       b 2b\n"
+               ".previous\n"
+               ".section __ex_table,\"a\"\n"
+               _ASM_ALIGN "\n"
+               _ASM_PTR "1b, 3b\n"
+               ".previous"
+               : "=r"(faulted), "=r"(replaced)
+               : "r"(ip), "r"(new),
+                 "0"(faulted), "r"(old)
+               : "memory");
+
+       if (replaced != old && replaced != new)
+               faulted = 2;
+
+       if (!faulted)
+               flush_icache_range(ip, ip + 8);
+
+       return faulted;
+}
+
+notrace int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       unsigned long ip = (unsigned long)(&ftrace_call);
+       unsigned char old[4], *new;
+       int ret;
+
+       ip += CALL_BACK;
+
+       memcpy(old, &ftrace_call, 4);
+       new = ftrace_call_replace(ip, (unsigned long)func);
+       ret = ftrace_modify_code(ip, old, new);
+
+       return ret;
+}
+
+notrace int ftrace_mcount_set(unsigned long *data)
+{
+       unsigned long ip = (long)(&mcount_call);
+       unsigned long *addr = data;
+       unsigned char old[4], *new;
+
+       /* ip is at the location, but modify code will subtact this */
+       ip += CALL_BACK;
+
+       /*
+        * Replace the mcount stub with a pointer to the
+        * ip recorder function.
+        */
+       memcpy(old, &mcount_call, 4);
+       new = ftrace_call_replace(ip, *addr);
+       *addr = ftrace_modify_code(ip, old, new);
+
+       return 0;
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+       /* This is running in kstop_machine */
+
+       ftrace_mcount_set(data);
+
+       return 0;
+}
+
index e31aca9..1882bf4 100644 (file)
@@ -120,7 +120,8 @@ EXPORT_SYMBOL(_outsl_ns);
 
 #define IO_CHECK_ALIGN(v,a) ((((unsigned long)(v)) & ((a) - 1)) == 0)
 
-void _memset_io(volatile void __iomem *addr, int c, unsigned long n)
+notrace void
+_memset_io(volatile void __iomem *addr, int c, unsigned long n)
 {
        void *p = (void __force *)addr;
        u32 lc = c;
index bcc249d..dcc946e 100644 (file)
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(irq_desc);
 
 int distribute_irqs = 1;
 
-static inline unsigned long get_hard_enabled(void)
+static inline notrace unsigned long get_hard_enabled(void)
 {
        unsigned long enabled;
 
@@ -108,13 +108,13 @@ static inline unsigned long get_hard_enabled(void)
        return enabled;
 }
 
-static inline void set_soft_enabled(unsigned long enable)
+static inline notrace void set_soft_enabled(unsigned long enable)
 {
        __asm__ __volatile__("stb %0,%1(13)"
        : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
 }
 
-void raw_local_irq_restore(unsigned long en)
+notrace void raw_local_irq_restore(unsigned long en)
 {
        /*
         * get_paca()->soft_enabled = en;
index 5112a4a..22f8e2b 100644 (file)
 #include <asm/kgdb.h>
 #endif
 
+#ifdef CONFIG_FTRACE
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
+
 extern void bootx_init(unsigned long r4, unsigned long phys);
 
 int boot_cpuid;
@@ -81,7 +86,7 @@ int ucache_bsize;
  * from the address that it was linked at, so we must use RELOC/PTRRELOC
  * to access static data (including strings).  -- paulus
  */
-unsigned long __init early_init(unsigned long dt_ptr)
+notrace unsigned long __init early_init(unsigned long dt_ptr)
 {
        unsigned long offset = reloc_offset();
        struct cpu_spec *spec;
@@ -111,7 +116,7 @@ unsigned long __init early_init(unsigned long dt_ptr)
  * This is called very early on the boot process, after a minimal
  * MMU environment has been set up but before MMU_init is called.
  */
-void __init machine_init(unsigned long dt_ptr, unsigned long phys)
+notrace void __init machine_init(unsigned long dt_ptr, unsigned long phys)
 {
        /* Enable early debugging if any specified (see udbg.h) */
        udbg_early_init();
@@ -133,7 +138,7 @@ void __init machine_init(unsigned long dt_ptr, unsigned long phys)
 
 #ifdef CONFIG_BOOKE_WDT
 /* Checks wdt=x and wdt_period=xx command-line option */
-int __init early_parse_wdt(char *p)
+notrace int __init early_parse_wdt(char *p)
 {
        if (p && strncmp(p, "0", 1) != 0)
               booke_wdt_enabled = 1;
index 098fd96..277bf18 100644 (file)
@@ -85,6 +85,11 @@ struct ppc64_caches ppc64_caches = {
 };
 EXPORT_SYMBOL_GPL(ppc64_caches);
 
+#ifdef CONFIG_FTRACE
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
+
 /*
  * These are used in binfmt_elf.c to put aux entries on the stack
  * for each elf executable being started.
index 4d72c8f..8977417 100644 (file)
@@ -1,5 +1,10 @@
 CFLAGS_bootx_init.o            += -fPIC
 
+ifdef CONFIG_FTRACE
+# Do not trace early boot code
+CFLAGS_REMOVE_bootx_init.o = -pg
+endif
+
 obj-y                          += pic.o setup.o time.o feature.o pci.o \
                                   sleep.o low_i2c.o cache.o pfunc_core.o \
                                   pfunc_base.o
index eb36f3b..fca9246 100644 (file)
@@ -11,6 +11,8 @@ config SPARC
 config SPARC64
        bool
        default y
+       select HAVE_DYNAMIC_FTRACE
+       select HAVE_FTRACE
        select HAVE_IDE
        select HAVE_LMB
        select HAVE_ARCH_KGDB
index 6a4d28a..d6d32d1 100644 (file)
@@ -33,7 +33,7 @@ config DEBUG_PAGEALLOC
 
 config MCOUNT
        bool
-       depends on STACK_DEBUG
+       depends on STACK_DEBUG || FTRACE
        default y
 
 config FRAME_POINTER
index ec4f5eb..418b578 100644 (file)
@@ -14,6 +14,7 @@ obj-y         := process.o setup.o cpu.o idprom.o \
                   power.o sbus.o sparc64_ksyms.o chmc.o \
                   visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
 
+obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
 obj-$(CONFIG_PCI)       += ebus.o pci_common.o \
                            pci_psycho.o pci_sabre.o pci_schizo.o \
diff --git a/arch/sparc64/kernel/ftrace.c b/arch/sparc64/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..f449e6d
--- /dev/null
@@ -0,0 +1,99 @@
+#include <linux/spinlock.h>
+#include <linux/hardirq.h>
+#include <linux/ftrace.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+static const u32 ftrace_nop = 0x01000000;
+
+notrace int ftrace_ip_converted(unsigned long ip)
+{
+       u32 insn = *(u32 *) ip;
+
+       return (insn == ftrace_nop);
+}
+
+notrace unsigned char *ftrace_nop_replace(void)
+{
+       return (char *)&ftrace_nop;
+}
+
+notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+       static u32 call;
+       s32 off;
+
+       off = ((s32)addr - (s32)ip);
+       call = 0x40000000 | ((u32)off >> 2);
+
+       return (unsigned char *) &call;
+}
+
+notrace int
+ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+                  unsigned char *new_code)
+{
+       u32 old = *(u32 *)old_code;
+       u32 new = *(u32 *)new_code;
+       u32 replaced;
+       int faulted;
+
+       __asm__ __volatile__(
+       "1:     cas     [%[ip]], %[old], %[new]\n"
+       "       flush   %[ip]\n"
+       "       mov     0, %[faulted]\n"
+       "2:\n"
+       "       .section .fixup,#alloc,#execinstr\n"
+       "       .align  4\n"
+       "3:     sethi   %%hi(2b), %[faulted]\n"
+       "       jmpl    %[faulted] + %%lo(2b), %%g0\n"
+       "        mov    1, %[faulted]\n"
+       "       .previous\n"
+       "       .section __ex_table,\"a\"\n"
+       "       .align  4\n"
+       "       .word   1b, 3b\n"
+       "       .previous\n"
+       : "=r" (replaced), [faulted] "=r" (faulted)
+       : [new] "0" (new), [old] "r" (old), [ip] "r" (ip)
+       : "memory");
+
+       if (replaced != old && replaced != new)
+               faulted = 2;
+
+       return faulted;
+}
+
+notrace int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       unsigned long ip = (unsigned long)(&ftrace_call);
+       unsigned char old[4], *new;
+
+       memcpy(old, &ftrace_call, 4);
+       new = ftrace_call_replace(ip, (unsigned long)func);
+       return ftrace_modify_code(ip, old, new);
+}
+
+notrace int ftrace_mcount_set(unsigned long *data)
+{
+       unsigned long ip = (long)(&mcount_call);
+       unsigned long *addr = data;
+       unsigned char old[4], *new;
+
+       /*
+        * Replace the mcount stub with a pointer to the
+        * ip recorder function.
+        */
+       memcpy(old, &mcount_call, 4);
+       new = ftrace_call_replace(ip, *addr);
+       *addr = ftrace_modify_code(ip, old, new);
+
+       return 0;
+}
+
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+       ftrace_mcount_set(data);
+       return 0;
+}
index 9e4534b..7735a7a 100644 (file)
@@ -28,10 +28,13 @@ ovstack:
        .skip           OVSTACKSIZE
 #endif
        .text
-       .align 32
-       .globl mcount, _mcount
-mcount:
+       .align          32
+       .globl          _mcount
+       .type           _mcount,#function
+       .globl          mcount
+       .type           mcount,#function
 _mcount:
+mcount:
 #ifdef CONFIG_STACK_DEBUG
        /*
         * Check whether %sp is dangerously low.
@@ -55,6 +58,53 @@ _mcount:
         or             %g3, %lo(panicstring), %o0
        call            prom_halt
         nop
+1:
+#endif
+#ifdef CONFIG_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE
+       mov             %o7, %o0
+       .globl          mcount_call
+mcount_call:
+       call            ftrace_stub
+        mov            %o0, %o7
+#else
+       sethi           %hi(ftrace_trace_function), %g1
+       sethi           %hi(ftrace_stub), %g2
+       ldx             [%g1 + %lo(ftrace_trace_function)], %g1
+       or              %g2, %lo(ftrace_stub), %g2
+       cmp             %g1, %g2
+       be,pn           %icc, 1f
+        mov            %i7, %o1
+       jmpl            %g1, %g0
+        mov            %o7, %o0
+       /* not reached */
+1:
 #endif
-1:     retl
+#endif
+       retl
         nop
+       .size           _mcount,.-_mcount
+       .size           mcount,.-mcount
+
+#ifdef CONFIG_FTRACE
+       .globl          ftrace_stub
+       .type           ftrace_stub,#function
+ftrace_stub:
+       retl
+        nop
+       .size           ftrace_stub,.-ftrace_stub
+#ifdef CONFIG_DYNAMIC_FTRACE
+       .globl          ftrace_caller
+       .type           ftrace_caller,#function
+ftrace_caller:
+       mov             %i7, %o1
+       mov             %o7, %o0
+       .globl          ftrace_call
+ftrace_call:
+       call            ftrace_stub
+        mov            %o0, %o7
+       retl
+        nop
+       .size           ftrace_caller,.-ftrace_caller
+#endif
+#endif
index bf07b6f..c3a4c03 100644 (file)
@@ -23,6 +23,8 @@ config X86
        select HAVE_OPROFILE
        select HAVE_KPROBES
        select HAVE_KRETPROBES
+       select HAVE_DYNAMIC_FTRACE
+       select HAVE_FTRACE
        select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
        select HAVE_ARCH_KGDB if !X86_VOYAGER
 
index 1836337..f7169ed 100644 (file)
@@ -172,6 +172,34 @@ config IOMMU_LEAK
          Add a simple leak tracer to the IOMMU code. This is useful when you
          are debugging a buggy device driver that leaks IOMMU mappings.
 
+config MMIOTRACE_HOOKS
+       bool
+
+config MMIOTRACE
+       bool "Memory mapped IO tracing"
+       depends on DEBUG_KERNEL && PCI
+       select TRACING
+       select MMIOTRACE_HOOKS
+       default y
+       help
+         Mmiotrace traces Memory Mapped I/O access and is meant for
+         debugging and reverse engineering. It is called from the ioremap
+         implementation and works via page faults. Tracing is disabled by
+         default and can be enabled at run-time.
+
+         See Documentation/tracers/mmiotrace.txt.
+         If you are not helping to develop drivers, say N.
+
+config MMIOTRACE_TEST
+       tristate "Test module for mmiotrace"
+       depends on MMIOTRACE && m
+       help
+         This is a dumb module for testing mmiotrace. It is very dangerous
+         as it will write garbage to IO memory starting at a given address.
+         However, it should be safe to use on e.g. unused portion of VRAM.
+
+         Say N, unless you absolutely know what you are doing.
+
 #
 # IO delay types:
 #
index 77807d4..5ff6720 100644 (file)
@@ -6,6 +6,13 @@ extra-y                := head_$(BITS).o head$(BITS).o init_task.o vmlinux.lds
 
 CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE)
 
+ifdef CONFIG_FTRACE
+# Do not profile debug utilities
+CFLAGS_REMOVE_tsc_64.o = -pg
+CFLAGS_REMOVE_tsc_32.o = -pg
+CFLAGS_REMOVE_rtc.o = -pg
+endif
+
 #
 # vsyscalls (which work on the user stack) should have
 # no stack-protector checks:
@@ -56,6 +63,7 @@ obj-$(CONFIG_X86_MPPARSE)     += mpparse.o
 obj-$(CONFIG_X86_LOCAL_APIC)   += apic_$(BITS).o nmi_$(BITS).o
 obj-$(CONFIG_X86_IO_APIC)      += io_apic_$(BITS).o
 obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
+obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
 obj-$(CONFIG_KEXEC)            += machine_kexec_$(BITS).o
 obj-$(CONFIG_KEXEC)            += relocate_kernel_$(BITS).o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump_$(BITS).o
index 65c7857..2763cb3 100644 (file)
@@ -1,6 +1,6 @@
 #include <linux/module.h>
 #include <linux/sched.h>
-#include <linux/spinlock.h>
+#include <linux/mutex.h>
 #include <linux/list.h>
 #include <linux/kprobes.h>
 #include <linux/mm.h>
@@ -143,7 +143,7 @@ static const unsigned char *const p6_nops[ASM_NOP_MAX+1] = {
 #ifdef CONFIG_X86_64
 
 extern char __vsyscall_0;
-static inline const unsigned char*const * find_nop_table(void)
+const unsigned char *const *find_nop_table(void)
 {
        return boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
               boot_cpu_data.x86 < 6 ? k8_nops : p6_nops;
@@ -162,7 +162,7 @@ static const struct nop {
        { -1, NULL }
 };
 
-static const unsigned char*const * find_nop_table(void)
+const unsigned char *const *find_nop_table(void)
 {
        const unsigned char *const *noptable = intel_nops;
        int i;
@@ -279,7 +279,7 @@ struct smp_alt_module {
        struct list_head next;
 };
 static LIST_HEAD(smp_alt_modules);
-static DEFINE_SPINLOCK(smp_alt);
+static DEFINE_MUTEX(smp_alt);
 static int smp_mode = 1;       /* protected by smp_alt */
 
 void alternatives_smp_module_add(struct module *mod, char *name,
@@ -312,12 +312,12 @@ void alternatives_smp_module_add(struct module *mod, char *name,
                __func__, smp->locks, smp->locks_end,
                smp->text, smp->text_end, smp->name);
 
-       spin_lock(&smp_alt);
+       mutex_lock(&smp_alt);
        list_add_tail(&smp->next, &smp_alt_modules);
        if (boot_cpu_has(X86_FEATURE_UP))
                alternatives_smp_unlock(smp->locks, smp->locks_end,
                                        smp->text, smp->text_end);
-       spin_unlock(&smp_alt);
+       mutex_unlock(&smp_alt);
 }
 
 void alternatives_smp_module_del(struct module *mod)
@@ -327,17 +327,17 @@ void alternatives_smp_module_del(struct module *mod)
        if (smp_alt_once || noreplace_smp)
                return;
 
-       spin_lock(&smp_alt);
+       mutex_lock(&smp_alt);
        list_for_each_entry(item, &smp_alt_modules, next) {
                if (mod != item->mod)
                        continue;
                list_del(&item->next);
-               spin_unlock(&smp_alt);
+               mutex_unlock(&smp_alt);
                DPRINTK("%s: %s\n", __func__, item->name);
                kfree(item);
                return;
        }
-       spin_unlock(&smp_alt);
+       mutex_unlock(&smp_alt);
 }
 
 void alternatives_smp_switch(int smp)
@@ -359,7 +359,7 @@ void alternatives_smp_switch(int smp)
                return;
        BUG_ON(!smp && (num_online_cpus() > 1));
 
-       spin_lock(&smp_alt);
+       mutex_lock(&smp_alt);
 
        /*
         * Avoid unnecessary switches because it forces JIT based VMs to
@@ -383,7 +383,7 @@ void alternatives_smp_switch(int smp)
                                                mod->text, mod->text_end);
        }
        smp_mode = smp;
-       spin_unlock(&smp_alt);
+       mutex_unlock(&smp_alt);
 }
 
 #endif
index c778e4f..04ea83c 100644 (file)
@@ -1110,6 +1110,74 @@ ENDPROC(xen_failsafe_callback)
 
 #endif /* CONFIG_XEN */
 
+#ifdef CONFIG_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+ENTRY(mcount)
+       pushl %eax
+       pushl %ecx
+       pushl %edx
+       movl 0xc(%esp), %eax
+
+.globl mcount_call
+mcount_call:
+       call ftrace_stub
+
+       popl %edx
+       popl %ecx
+       popl %eax
+
+       ret
+END(mcount)
+
+ENTRY(ftrace_caller)
+       pushl %eax
+       pushl %ecx
+       pushl %edx
+       movl 0xc(%esp), %eax
+       movl 0x4(%ebp), %edx
+
+.globl ftrace_call
+ftrace_call:
+       call ftrace_stub
+
+       popl %edx
+       popl %ecx
+       popl %eax
+
+.globl ftrace_stub
+ftrace_stub:
+       ret
+END(ftrace_caller)
+
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+ENTRY(mcount)
+       cmpl $ftrace_stub, ftrace_trace_function
+       jnz trace
+.globl ftrace_stub
+ftrace_stub:
+       ret
+
+       /* taken from glibc */
+trace:
+       pushl %eax
+       pushl %ecx
+       pushl %edx
+       movl 0xc(%esp), %eax
+       movl 0x4(%ebp), %edx
+
+       call *ftrace_trace_function
+
+       popl %edx
+       popl %ecx
+       popl %eax
+
+       jmp ftrace_stub
+END(mcount)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FTRACE */
+
 .section .rodata,"a"
 #include "syscall_table_32.S"
 
index 556a8df..fe25e5f 100644 (file)
 
        .code64
 
+#ifdef CONFIG_FTRACE
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(mcount)
+
+       subq $0x38, %rsp
+       movq %rax, (%rsp)
+       movq %rcx, 8(%rsp)
+       movq %rdx, 16(%rsp)
+       movq %rsi, 24(%rsp)
+       movq %rdi, 32(%rsp)
+       movq %r8, 40(%rsp)
+       movq %r9, 48(%rsp)
+
+       movq 0x38(%rsp), %rdi
+
+.globl mcount_call
+mcount_call:
+       call ftrace_stub
+
+       movq 48(%rsp), %r9
+       movq 40(%rsp), %r8
+       movq 32(%rsp), %rdi
+       movq 24(%rsp), %rsi
+       movq 16(%rsp), %rdx
+       movq 8(%rsp), %rcx
+       movq (%rsp), %rax
+       addq $0x38, %rsp
+
+       retq
+END(mcount)
+
+ENTRY(ftrace_caller)
+
+       /* taken from glibc */
+       subq $0x38, %rsp
+       movq %rax, (%rsp)
+       movq %rcx, 8(%rsp)
+       movq %rdx, 16(%rsp)
+       movq %rsi, 24(%rsp)
+       movq %rdi, 32(%rsp)
+       movq %r8, 40(%rsp)
+       movq %r9, 48(%rsp)
+
+       movq 0x38(%rsp), %rdi
+       movq 8(%rbp), %rsi
+
+.globl ftrace_call
+ftrace_call:
+       call ftrace_stub
+
+       movq 48(%rsp), %r9
+       movq 40(%rsp), %r8
+       movq 32(%rsp), %rdi
+       movq 24(%rsp), %rsi
+       movq 16(%rsp), %rdx
+       movq 8(%rsp), %rcx
+       movq (%rsp), %rax
+       addq $0x38, %rsp
+
+.globl ftrace_stub
+ftrace_stub:
+       retq
+END(ftrace_caller)
+
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+ENTRY(mcount)
+       cmpq $ftrace_stub, ftrace_trace_function
+       jnz trace
+.globl ftrace_stub
+ftrace_stub:
+       retq
+
+trace:
+       /* taken from glibc */
+       subq $0x38, %rsp
+       movq %rax, (%rsp)
+       movq %rcx, 8(%rsp)
+       movq %rdx, 16(%rsp)
+       movq %rsi, 24(%rsp)
+       movq %rdi, 32(%rsp)
+       movq %r8, 40(%rsp)
+       movq %r9, 48(%rsp)
+
+       movq 0x38(%rsp), %rdi
+       movq 8(%rbp), %rsi
+
+       call   *ftrace_trace_function
+
+       movq 48(%rsp), %r9
+       movq 40(%rsp), %r8
+       movq 32(%rsp), %rdi
+       movq 24(%rsp), %rsi
+       movq 16(%rsp), %rdx
+       movq 8(%rsp), %rcx
+       movq (%rsp), %rax
+       addq $0x38, %rsp
+
+       jmp ftrace_stub
+END(mcount)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FTRACE */
+
 #ifndef CONFIG_PREEMPT
 #define retint_kernel retint_restore_args
 #endif 
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
new file mode 100644 (file)
index 0000000..498608c
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ *
+ * Thanks goes to Ingo Molnar, for suggesting the idea.
+ * Mathieu Desnoyers, for suggesting postponing the modifications.
+ * Arjan van de Ven, for keeping me straight, and explaining to me
+ * the dangers of modifying code on the run.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/hardirq.h>
+#include <linux/ftrace.h>
+#include <linux/percpu.h>
+#include <linux/init.h>
+#include <linux/list.h>
+
+#include <asm/alternative.h>
+
+#define CALL_BACK              5
+
+/* Long is fine, even if it is only 4 bytes ;-) */
+static long *ftrace_nop;
+
+union ftrace_code_union {
+       char code[5];
+       struct {
+               char e8;
+               int offset;
+       } __attribute__((packed));
+};
+
+notrace int ftrace_ip_converted(unsigned long ip)
+{
+       unsigned long save;
+
+       ip -= CALL_BACK;
+       save = *(long *)ip;
+
+       return save == *ftrace_nop;
+}
+
+static int notrace ftrace_calc_offset(long ip, long addr)
+{
+       return (int)(addr - ip);
+}
+
+notrace unsigned char *ftrace_nop_replace(void)
+{
+       return (char *)ftrace_nop;
+}
+
+notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
+{
+       static union ftrace_code_union calc;
+
+       calc.e8         = 0xe8;
+       calc.offset     = ftrace_calc_offset(ip, addr);
+
+       /*
+        * No locking needed, this must be called via kstop_machine
+        * which in essence is like running on a uniprocessor machine.
+        */
+       return calc.code;
+}
+
+notrace int
+ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+                  unsigned char *new_code)
+{
+       unsigned replaced;
+       unsigned old = *(unsigned *)old_code; /* 4 bytes */
+       unsigned new = *(unsigned *)new_code; /* 4 bytes */
+       unsigned char newch = new_code[4];
+       int faulted = 0;
+
+       /* move the IP back to the start of the call */
+       ip -= CALL_BACK;
+
+       /*
+        * Note: Due to modules and __init, code can
+        *  disappear and change, we need to protect against faulting
+        *  as well as code changing.
+        *
+        * No real locking needed, this code is run through
+        * kstop_machine.
+        */
+       asm volatile (
+               "1: lock\n"
+               "   cmpxchg %3, (%2)\n"
+               "   jnz 2f\n"
+               "   movb %b4, 4(%2)\n"
+               "2:\n"
+               ".section .fixup, \"ax\"\n"
+               "3:     movl $1, %0\n"
+               "       jmp 2b\n"
+               ".previous\n"
+               _ASM_EXTABLE(1b, 3b)
+               : "=r"(faulted), "=a"(replaced)
+               : "r"(ip), "r"(new), "r"(newch),
+                 "0"(faulted), "a"(old)
+               : "memory");
+       sync_core();
+
+       if (replaced != old && replaced != new)
+               faulted = 2;
+
+       return faulted;
+}
+
+notrace int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+       unsigned long ip = (unsigned long)(&ftrace_call);
+       unsigned char old[5], *new;
+       int ret;
+
+       ip += CALL_BACK;
+
+       memcpy(old, &ftrace_call, 5);
+       new = ftrace_call_replace(ip, (unsigned long)func);
+       ret = ftrace_modify_code(ip, old, new);
+
+       return ret;
+}
+
+notrace int ftrace_mcount_set(unsigned long *data)
+{
+       unsigned long ip = (long)(&mcount_call);
+       unsigned long *addr = data;
+       unsigned char old[5], *new;
+
+       /* ip is at the location, but modify code will subtact this */
+       ip += CALL_BACK;
+
+       /*
+        * Replace the mcount stub with a pointer to the
+        * ip recorder function.
+        */
+       memcpy(old, &mcount_call, 5);
+       new = ftrace_call_replace(ip, *addr);
+       *addr = ftrace_modify_code(ip, old, new);
+
+       return 0;
+}
+
+int __init ftrace_dyn_arch_init(void *data)
+{
+       const unsigned char *const *noptable = find_nop_table();
+
+       /* This is running in kstop_machine */
+
+       ftrace_mcount_set(data);
+
+       ftrace_nop = (unsigned long *)noptable[CALL_BACK];
+
+       return 0;
+}
+
index deb4378..29999db 100644 (file)
@@ -1,7 +1,14 @@
+#include <linux/ftrace.h>
 #include <linux/module.h>
+
 #include <asm/checksum.h>
-#include <asm/desc.h>
 #include <asm/pgtable.h>
+#include <asm/desc.h>
+
+#ifdef CONFIG_FTRACE
+/* mcount is defined in assembly */
+EXPORT_SYMBOL(mcount);
+#endif
 
 /* Networking helper routines. */
 EXPORT_SYMBOL(csum_partial_copy_generic);
index d0b234c..88923fd 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/numa.h>
+#include <linux/ftrace.h>
+
 #include <asm/pgtable.h>
 #include <asm/pgalloc.h>
 #include <asm/tlbflush.h>
@@ -107,6 +109,8 @@ NORET_TYPE void machine_kexec(struct kimage *image)
        unsigned long page_list[PAGES_NR];
        void *control_page;
 
+       tracer_disable();
+
        /* Interrupts aren't acceptable while we reboot */
        local_irq_disable();
 
index 576a03d..1558fdc 100644 (file)
@@ -11,6 +11,8 @@
 #include <linux/string.h>
 #include <linux/reboot.h>
 #include <linux/numa.h>
+#include <linux/ftrace.h>
+
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
@@ -184,6 +186,8 @@ NORET_TYPE void machine_kexec(struct kimage *image)
        unsigned long page_list[PAGES_NR];
        void *control_page;
 
+       tracer_disable();
+
        /* Interrupts aren't acceptable while we reboot */
        local_irq_disable();
 
index e2db9ac..347a7ab 100644 (file)
@@ -185,7 +185,10 @@ void cpu_idle(void)
 
                        local_irq_disable();
                        __get_cpu_var(irq_stat).idle_timestamp = jiffies;
+                       /* Don't trace irqs off for idle */
+                       stop_critical_timings();
                        idle();
+                       start_critical_timings();
                }
                tick_nohz_restart_sched_tick();
                preempt_enable_no_resched();
index c6eb5c9..ea090e6 100644 (file)
@@ -165,7 +165,10 @@ void cpu_idle(void)
                         */
                        local_irq_disable();
                        enter_idle();
+                       /* Don't trace irqs off for idle */
+                       stop_critical_timings();
                        idle();
+                       start_critical_timings();
                        /* In many cases the interrupt that ended idle
                           has already called exit_idle. But some idle
                           loops can be woken up without interrupt. */
index 61efa2f..4063dfa 100644 (file)
@@ -42,7 +42,8 @@
 #include <asm/topology.h>
 #include <asm/vgtod.h>
 
-#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
+#define __vsyscall(nr) \
+               __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
 #define __syscall_clobber "r11","cx","memory"
 
 /*
index f6c05d0..122885b 100644 (file)
@@ -1,15 +1,22 @@
 /* Exports for assembly files.
    All C exports should go in the respective C files. */
 
+#include <linux/ftrace.h>
 #include <linux/module.h>
-#include <net/checksum.h>
 #include <linux/smp.h>
 
+#include <net/checksum.h>
+
 #include <asm/processor.h>
-#include <asm/uaccess.h>
 #include <asm/pgtable.h>
+#include <asm/uaccess.h>
 #include <asm/desc.h>
 
+#ifdef CONFIG_FTRACE
+/* mcount is defined in assembly */
+EXPORT_SYMBOL(mcount);
+#endif
+
 EXPORT_SYMBOL(kernel_thread);
 
 EXPORT_SYMBOL(__get_user_1);
index 76f60f5..84aa288 100644 (file)
@@ -5,6 +5,7 @@
 obj-$(CONFIG_SMP) := msr-on-cpu.o
 
 lib-y := delay_$(BITS).o
+lib-y += thunk_$(BITS).o
 lib-y += usercopy_$(BITS).o getuser_$(BITS).o putuser_$(BITS).o
 lib-y += memcpy_$(BITS).o
 
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S
new file mode 100644 (file)
index 0000000..650b11e
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Trampoline to trace irqs off. (otherwise CALLER_ADDR1 might crash)
+ * Copyright 2008 by Steven Rostedt, Red Hat, Inc
+ *  (inspired by Andi Kleen's thunk_64.S)
+ * Subject to the GNU public license, v.2. No warranty of any kind.
+ */
+
+       #include <linux/linkage.h>
+
+#define ARCH_TRACE_IRQS_ON                     \
+       pushl %eax;                             \
+       pushl %ecx;                             \
+       pushl %edx;                             \
+       call trace_hardirqs_on;                 \
+       popl %edx;                              \
+       popl %ecx;                              \
+       popl %eax;
+
+#define ARCH_TRACE_IRQS_OFF                    \
+       pushl %eax;                             \
+       pushl %ecx;                             \
+       pushl %edx;                             \
+       call trace_hardirqs_off;                \
+       popl %edx;                              \
+       popl %ecx;                              \
+       popl %eax;
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+       /* put return address in eax (arg1) */
+       .macro thunk_ra name,func
+       .globl \name
+\name:
+       pushl %eax
+       pushl %ecx
+       pushl %edx
+       /* Place EIP in the arg1 */
+       movl 3*4(%esp), %eax
+       call \func
+       popl %edx
+       popl %ecx
+       popl %eax
+       ret
+       .endm
+
+       thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller
+       thunk_ra trace_hardirqs_off_thunk,trace_hardirqs_off_caller
+#endif
index e009251..bf9a7d5 100644 (file)
@@ -2,6 +2,7 @@
  * Save registers before calling assembly functions. This avoids
  * disturbance of register allocation in some inline assembly constructs.
  * Copyright 2001,2002 by Andi Kleen, SuSE Labs.
+ * Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc.
  * Subject to the GNU public license, v.2. No warranty of any kind.
  */
 
 #endif 
        
 #ifdef CONFIG_TRACE_IRQFLAGS
-       thunk trace_hardirqs_on_thunk,trace_hardirqs_on
-       thunk trace_hardirqs_off_thunk,trace_hardirqs_off
+       /* put return address in rdi (arg1) */
+       .macro thunk_ra name,func
+       .globl \name
+\name:
+       CFI_STARTPROC
+       SAVE_ARGS
+       /* SAVE_ARGS pushs 9 elements */
+       /* the next element would be the rip */
+       movq 9*8(%rsp), %rdi
+       call \func
+       jmp  restore
+       CFI_ENDPROC
+       .endm
+
+       thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller
+       thunk_ra trace_hardirqs_off_thunk,trace_hardirqs_off_caller
 #endif
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
index b7b3e4c..07dab50 100644 (file)
@@ -8,6 +8,11 @@ obj-$(CONFIG_X86_PTDUMP)       += dump_pagetables.o
 
 obj-$(CONFIG_HIGHMEM)          += highmem_32.o
 
+obj-$(CONFIG_MMIOTRACE_HOOKS)  += kmmio.o
+obj-$(CONFIG_MMIOTRACE)                += mmiotrace.o
+mmiotrace-y                    := pf_in.o mmio-mod.o
+obj-$(CONFIG_MMIOTRACE_TEST)   += testmmiotrace.o
+
 ifeq ($(CONFIG_X86_32),y)
 obj-$(CONFIG_NUMA)             += discontig_32.o
 else
index 8bcb6f4..0a778e3 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/ptrace.h>
+#include <linux/mmiotrace.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
 #define PF_RSVD                (1<<3)
 #define PF_INSTR       (1<<4)
 
+static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
+{
+#ifdef CONFIG_MMIOTRACE_HOOKS
+       if (unlikely(is_kmmio_active()))
+               if (kmmio_handler(regs, addr) == 1)
+                       return -1;
+#endif
+       return 0;
+}
+
 static inline int notify_page_fault(struct pt_regs *regs)
 {
 #ifdef CONFIG_KPROBES
@@ -606,6 +617,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
 
        if (notify_page_fault(regs))
                return;
+       if (unlikely(kmmio_fault(regs, address)))
+               return;
 
        /*
         * We fault-in kernel-space virtual memory on-demand. The
index ec30d10..f96eca2 100644 (file)
@@ -710,6 +710,8 @@ void mark_rodata_ro(void)
        unsigned long start = PFN_ALIGN(_text);
        unsigned long size = PFN_ALIGN(_etext) - start;
 
+#ifndef CONFIG_DYNAMIC_FTRACE
+       /* Dynamic tracing modifies the kernel text section */
        set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
        printk(KERN_INFO "Write protecting the kernel text: %luk\n",
                size >> 10);
@@ -722,6 +724,8 @@ void mark_rodata_ro(void)
        printk(KERN_INFO "Testing CPA: write protecting again\n");
        set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
 #endif
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
        start += size;
        size = (unsigned long)__end_rodata - start;
        set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
index 819dad9..17c0a61 100644 (file)
@@ -767,6 +767,13 @@ EXPORT_SYMBOL_GPL(rodata_test_data);
 void mark_rodata_ro(void)
 {
        unsigned long start = PFN_ALIGN(_stext), end = PFN_ALIGN(__end_rodata);
+       unsigned long rodata_start =
+               ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+       /* Dynamic tracing modifies the kernel text section */
+       start = rodata_start;
+#endif
 
        printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
               (end - start) >> 10);
@@ -776,8 +783,7 @@ void mark_rodata_ro(void)
         * The rodata section (but not the kernel text!) should also be
         * not-executable.
         */
-       start = ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
-       set_memory_nx(start, (end - start) >> PAGE_SHIFT);
+       set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
 
        rodata_test();
 
index 2b2bb3f..e92aa46 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
+#include <linux/mmiotrace.h>
 
 #include <asm/cacheflush.h>
 #include <asm/e820.h>
@@ -122,10 +123,13 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
 {
        unsigned long pfn, offset, vaddr;
        resource_size_t last_addr;
+       const resource_size_t unaligned_phys_addr = phys_addr;
+       const unsigned long unaligned_size = size;
        struct vm_struct *area;
        unsigned long new_prot_val;
        pgprot_t prot;
        int retval;
+       void __iomem *ret_addr;
 
        /* Don't allow wraparound or zero size */
        last_addr = phys_addr + size - 1;
@@ -233,7 +237,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
                return NULL;
        }
 
-       return (void __iomem *) (vaddr + offset);
+       ret_addr = (void __iomem *) (vaddr + offset);
+       mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
+
+       return ret_addr;
 }
 
 /**
@@ -325,6 +332,8 @@ void iounmap(volatile void __iomem *addr)
        addr = (volatile void __iomem *)
                (PAGE_MASK & (unsigned long __force)addr);
 
+       mmiotrace_iounmap(addr);
+
        /* Use the vm area unlocked, assuming the caller
           ensures there isn't another iounmap for the same address
           in parallel. Reuse of the virtual address is prevented by
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
new file mode 100644 (file)
index 0000000..b65871e
--- /dev/null
@@ -0,0 +1,509 @@
+/* Support for MMIO probes.
+ * Benfit many code from kprobes
+ * (C) 2002 Louis Zhuang <louis.zhuang@intel.com>.
+ *     2007 Alexander Eichner
+ *     2008 Pekka Paalanen <pq@iki.fi>
+ */
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/uaccess.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/percpu.h>
+#include <linux/kdebug.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <linux/errno.h>
+#include <asm/debugreg.h>
+#include <linux/mmiotrace.h>
+
+#define KMMIO_PAGE_HASH_BITS 4
+#define KMMIO_PAGE_TABLE_SIZE (1 << KMMIO_PAGE_HASH_BITS)
+
+struct kmmio_fault_page {
+       struct list_head list;
+       struct kmmio_fault_page *release_next;
+       unsigned long page; /* location of the fault page */
+
+       /*
+        * Number of times this page has been registered as a part
+        * of a probe. If zero, page is disarmed and this may be freed.
+        * Used only by writers (RCU).
+        */
+       int count;
+};
+
+struct kmmio_delayed_release {
+       struct rcu_head rcu;
+       struct kmmio_fault_page *release_list;
+};
+
+struct kmmio_context {
+       struct kmmio_fault_page *fpage;
+       struct kmmio_probe *probe;
+       unsigned long saved_flags;
+       unsigned long addr;
+       int active;
+};
+
+static DEFINE_SPINLOCK(kmmio_lock);
+
+/* Protected by kmmio_lock */
+unsigned int kmmio_count;
+
+/* Read-protected by RCU, write-protected by kmmio_lock. */
+static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
+static LIST_HEAD(kmmio_probes);
+
+static struct list_head *kmmio_page_list(unsigned long page)
+{
+       return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
+}
+
+/* Accessed per-cpu */
+static DEFINE_PER_CPU(struct kmmio_context, kmmio_ctx);
+
+/*
+ * this is basically a dynamic stabbing problem:
+ * Could use the existing prio tree code or
+ * Possible better implementations:
+ * The Interval Skip List: A Data Structure for Finding All Intervals That
+ * Overlap a Point (might be simple)
+ * Space Efficient Dynamic Stabbing with Fast Queries - Mikkel Thorup
+ */
+/* Get the kmmio at this addr (if any). You must be holding RCU read lock. */
+static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
+{
+       struct kmmio_probe *p;
+       list_for_each_entry_rcu(p, &kmmio_probes, list) {
+               if (addr >= p->addr && addr <= (p->addr + p->len))
+                       return p;
+       }
+       return NULL;
+}
+
+/* You must be holding RCU read lock. */
+static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
+{
+       struct list_head *head;
+       struct kmmio_fault_page *p;
+
+       page &= PAGE_MASK;
+       head = kmmio_page_list(page);
+       list_for_each_entry_rcu(p, head, list) {
+               if (p->page == page)
+                       return p;
+       }
+       return NULL;
+}
+
+static void set_page_present(unsigned long addr, bool present,
+                                                       unsigned int *pglevel)
+{
+       pteval_t pteval;
+       pmdval_t pmdval;
+       unsigned int level;
+       pmd_t *pmd;
+       pte_t *pte = lookup_address(addr, &level);
+
+       if (!pte) {
+               pr_err("kmmio: no pte for page 0x%08lx\n", addr);
+               return;
+       }
+
+       if (pglevel)
+               *pglevel = level;
+
+       switch (level) {
+       case PG_LEVEL_2M:
+               pmd = (pmd_t *)pte;
+               pmdval = pmd_val(*pmd) & ~_PAGE_PRESENT;
+               if (present)
+                       pmdval |= _PAGE_PRESENT;
+               set_pmd(pmd, __pmd(pmdval));
+               break;
+
+       case PG_LEVEL_4K:
+               pteval = pte_val(*pte) & ~_PAGE_PRESENT;
+               if (present)
+                       pteval |= _PAGE_PRESENT;
+               set_pte_atomic(pte, __pte(pteval));
+               break;
+
+       default:
+               pr_err("kmmio: unexpected page level 0x%x.\n", level);
+               return;
+       }
+
+       __flush_tlb_one(addr);
+}
+
+/** Mark the given page as not present. Access to it will trigger a fault. */
+static void arm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+{
+       set_page_present(page & PAGE_MASK, false, pglevel);
+}
+
+/** Mark the given page as present. */
+static void disarm_kmmio_fault_page(unsigned long page, unsigned int *pglevel)
+{
+       set_page_present(page & PAGE_MASK, true, pglevel);
+}
+
+/*
+ * This is being called from do_page_fault().
+ *
+ * We may be in an interrupt or a critical section. Also prefecthing may
+ * trigger a page fault. We may be in the middle of process switch.
+ * We cannot take any locks, because we could be executing especially
+ * within a kmmio critical section.
+ *
+ * Local interrupts are disabled, so preemption cannot happen.
+ * Do not enable interrupts, do not sleep, and watch out for other CPUs.
+ */
+/*
+ * Interrupts are disabled on entry as trap3 is an interrupt gate
+ * and they remain disabled thorough out this function.
+ */
+int kmmio_handler(struct pt_regs *regs, unsigned long addr)
+{
+       struct kmmio_context *ctx;
+       struct kmmio_fault_page *faultpage;
+       int ret = 0; /* default to fault not handled */
+
+       /*
+        * Preemption is now disabled to prevent process switch during
+        * single stepping. We can only handle one active kmmio trace
+        * per cpu, so ensure that we finish it before something else
+        * gets to run. We also hold the RCU read lock over single
+        * stepping to avoid looking up the probe and kmmio_fault_page
+        * again.
+        */
+       preempt_disable();
+       rcu_read_lock();
+
+       faultpage = get_kmmio_fault_page(addr);
+       if (!faultpage) {
+               /*
+                * Either this page fault is not caused by kmmio, or
+                * another CPU just pulled the kmmio probe from under
+                * our feet. The latter case should not be possible.
+                */
+               goto no_kmmio;
+       }
+
+       ctx = &get_cpu_var(kmmio_ctx);
+       if (ctx->active) {
+               disarm_kmmio_fault_page(faultpage->page, NULL);
+               if (addr == ctx->addr) {
+                       /*
+                        * On SMP we sometimes get recursive probe hits on the
+                        * same address. Context is already saved, fall out.
+                        */
+                       pr_debug("kmmio: duplicate probe hit on CPU %d, for "
+                                               "address 0x%08lx.\n",
+                                               smp_processor_id(), addr);
+                       ret = 1;
+                       goto no_kmmio_ctx;
+               }
+               /*
+                * Prevent overwriting already in-flight context.
+                * This should not happen, let's hope disarming at least
+                * prevents a panic.
+                */
+               pr_emerg("kmmio: recursive probe hit on CPU %d, "
+                                       "for address 0x%08lx. Ignoring.\n",
+                                       smp_processor_id(), addr);
+               pr_emerg("kmmio: previous hit was at 0x%08lx.\n",
+                                       ctx->addr);
+               goto no_kmmio_ctx;
+       }
+       ctx->active++;
+
+       ctx->fpage = faultpage;
+       ctx->probe = get_kmmio_probe(addr);
+       ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
+       ctx->addr = addr;
+
+       if (ctx->probe && ctx->probe->pre_handler)
+               ctx->probe->pre_handler(ctx->probe, regs, addr);
+
+       /*
+        * Enable single-stepping and disable interrupts for the faulting
+        * context. Local interrupts must not get enabled during stepping.
+        */
+       regs->flags |= X86_EFLAGS_TF;
+       regs->flags &= ~X86_EFLAGS_IF;
+
+       /* Now we set present bit in PTE and single step. */
+       disarm_kmmio_fault_page(ctx->fpage->page, NULL);
+
+       /*
+        * If another cpu accesses the same page while we are stepping,
+        * the access will not be caught. It will simply succeed and the
+        * only downside is we lose the event. If this becomes a problem,
+        * the user should drop to single cpu before tracing.
+        */
+
+       put_cpu_var(kmmio_ctx);
+       return 1; /* fault handled */
+
+no_kmmio_ctx:
+       put_cpu_var(kmmio_ctx);
+no_kmmio:
+       rcu_read_unlock();
+       preempt_enable_no_resched();
+       return ret;
+}
+
+/*
+ * Interrupts are disabled on entry as trap1 is an interrupt gate
+ * and they remain disabled thorough out this function.
+ * This must always get called as the pair to kmmio_handler().
+ */
+static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
+{
+       int ret = 0;
+       struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);
+
+       if (!ctx->active) {
+               pr_debug("kmmio: spurious debug trap on CPU %d.\n",
+                                                       smp_processor_id());
+               goto out;
+       }
+
+       if (ctx->probe && ctx->probe->post_handler)
+               ctx->probe->post_handler(ctx->probe, condition, regs);
+
+       arm_kmmio_fault_page(ctx->fpage->page, NULL);
+
+       regs->flags &= ~X86_EFLAGS_TF;
+       regs->flags |= ctx->saved_flags;
+
+       /* These were acquired in kmmio_handler(). */
+       ctx->active--;
+       BUG_ON(ctx->active);
+       rcu_read_unlock();
+       preempt_enable_no_resched();
+
+       /*
+        * if somebody else is singlestepping across a probe point, flags
+        * will have TF set, in which case, continue the remaining processing
+        * of do_debug, as if this is not a probe hit.
+        */
+       if (!(regs->flags & X86_EFLAGS_TF))
+               ret = 1;
+out:
+       put_cpu_var(kmmio_ctx);
+       return ret;
+}
+
+/* You must be holding kmmio_lock. */
+static int add_kmmio_fault_page(unsigned long page)
+{
+       struct kmmio_fault_page *f;
+
+       page &= PAGE_MASK;
+       f = get_kmmio_fault_page(page);
+       if (f) {
+               if (!f->count)
+                       arm_kmmio_fault_page(f->page, NULL);
+               f->count++;
+               return 0;
+       }
+
+       f = kmalloc(sizeof(*f), GFP_ATOMIC);
+       if (!f)
+               return -1;
+
+       f->count = 1;
+       f->page = page;
+       list_add_rcu(&f->list, kmmio_page_list(f->page));
+
+       arm_kmmio_fault_page(f->page, NULL);
+
+       return 0;
+}
+
+/* You must be holding kmmio_lock. */
+static void release_kmmio_fault_page(unsigned long page,
+                               struct kmmio_fault_page **release_list)
+{
+       struct kmmio_fault_page *f;
+
+       page &= PAGE_MASK;
+       f = get_kmmio_fault_page(page);
+       if (!f)
+               return;
+
+       f->count--;
+       BUG_ON(f->count < 0);
+       if (!f->count) {
+               disarm_kmmio_fault_page(f->page, NULL);
+               f->release_next = *release_list;
+               *release_list = f;
+       }
+}
+
+/*
+ * With page-unaligned ioremaps, one or two armed pages may contain
+ * addresses from outside the intended mapping. Events for these addresses
+ * are currently silently dropped. The events may result only from programming
+ * mistakes by accessing addresses before the beginning or past the end of a
+ * mapping.
+ */
+int register_kmmio_probe(struct kmmio_probe *p)
+{
+       unsigned long flags;
+       int ret = 0;
+       unsigned long size = 0;
+       const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
+
+       spin_lock_irqsave(&kmmio_lock, flags);
+       if (get_kmmio_probe(p->addr)) {
+               ret = -EEXIST;
+               goto out;
+       }
+       kmmio_count++;
+       list_add_rcu(&p->list, &kmmio_probes);
+       while (size < size_lim) {
+               if (add_kmmio_fault_page(p->addr + size))
+                       pr_err("kmmio: Unable to set page fault.\n");
+               size += PAGE_SIZE;
+       }
+out:
+       spin_unlock_irqrestore(&kmmio_lock, flags);
+       /*
+        * XXX: What should I do here?
+        * Here was a call to global_flush_tlb(), but it does not exist
+        * anymore. It seems it's not needed after all.
+        */
+       return ret;
+}
+EXPORT_SYMBOL(register_kmmio_probe);
+
+static void rcu_free_kmmio_fault_pages(struct rcu_head *head)
+{
+       struct kmmio_delayed_release *dr = container_of(
+                                               head,
+                                               struct kmmio_delayed_release,
+                                               rcu);
+       struct kmmio_fault_page *p = dr->release_list;
+       while (p) {
+               struct kmmio_fault_page *next = p->release_next;
+               BUG_ON(p->count);
+               kfree(p);
+               p = next;
+       }
+       kfree(dr);
+}
+
+static void remove_kmmio_fault_pages(struct rcu_head *head)
+{
+       struct kmmio_delayed_release *dr = container_of(
+                                               head,
+                                               struct kmmio_delayed_release,
+                                               rcu);
+       struct kmmio_fault_page *p = dr->release_list;
+       struct kmmio_fault_page **prevp = &dr->release_list;
+       unsigned long flags;
+       spin_lock_irqsave(&kmmio_lock, flags);
+       while (p) {
+               if (!p->count)
+                       list_del_rcu(&p->list);
+               else
+                       *prevp = p->release_next;
+               prevp = &p->release_next;
+               p = p->release_next;
+       }
+       spin_unlock_irqrestore(&kmmio_lock, flags);
+       /* This is the real RCU destroy call. */
+       call_rcu(&dr->rcu, rcu_free_kmmio_fault_pages);
+}
+
+/*
+ * Remove a kmmio probe. You have to synchronize_rcu() before you can be
+ * sure that the callbacks will not be called anymore. Only after that
+ * you may actually release your struct kmmio_probe.
+ *
+ * Unregistering a kmmio fault page has three steps:
+ * 1. release_kmmio_fault_page()
+ *    Disarm the page, wait a grace period to let all faults finish.
+ * 2. remove_kmmio_fault_pages()
+ *    Remove the pages from kmmio_page_table.
+ * 3. rcu_free_kmmio_fault_pages()
+ *    Actally free the kmmio_fault_page structs as with RCU.
+ */
+void unregister_kmmio_probe(struct kmmio_probe *p)
+{
+       unsigned long flags;
+       unsigned long size = 0;
+       const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
+       struct kmmio_fault_page *release_list = NULL;
+       struct kmmio_delayed_release *drelease;
+
+       spin_lock_irqsave(&kmmio_lock, flags);
+       while (size < size_lim) {
+               release_kmmio_fault_page(p->addr + size, &release_list);
+               size += PAGE_SIZE;
+       }
+       list_del_rcu(&p->list);
+       kmmio_count--;
+       spin_unlock_irqrestore(&kmmio_lock, flags);
+
+       drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
+       if (!drelease) {
+               pr_crit("kmmio: leaking kmmio_fault_page objects.\n");
+               return;
+       }
+       drelease->release_list = release_list;
+
+       /*
+        * This is not really RCU here. We have just disarmed a set of
+        * pages so that they cannot trigger page faults anymore. However,
+        * we cannot remove the pages from kmmio_page_table,
+        * because a probe hit might be in flight on another CPU. The
+        * pages are collected into a list, and they will be removed from
+        * kmmio_page_table when it is certain that no probe hit related to
+        * these pages can be in flight. RCU grace period sounds like a
+        * good choice.
+        *
+        * If we removed the pages too early, kmmio page fault handler might
+        * not find the respective kmmio_fault_page and determine it's not
+        * a kmmio fault, when it actually is. This would lead to madness.
+        */
+       call_rcu(&drelease->rcu, remove_kmmio_fault_pages);
+}
+EXPORT_SYMBOL(unregister_kmmio_probe);
+
+static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
+                                                               void *args)
+{
+       struct die_args *arg = args;
+
+       if (val == DIE_DEBUG && (arg->err & DR_STEP))
+               if (post_kmmio_handler(arg->err, arg->regs) == 1)
+                       return NOTIFY_STOP;
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block nb_die = {
+       .notifier_call = kmmio_die_notifier
+};
+
+static int __init init_kmmio(void)
+{
+       int i;
+       for (i = 0; i < KMMIO_PAGE_TABLE_SIZE; i++)
+               INIT_LIST_HEAD(&kmmio_page_table[i]);
+       return register_die_notifier(&nb_die);
+}
+fs_initcall(init_kmmio); /* should be before device_initcall() */
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
new file mode 100644 (file)
index 0000000..e7397e1
--- /dev/null
@@ -0,0 +1,515 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2005
+ *               Jeff Muizelaar, 2006, 2007
+ *               Pekka Paalanen, 2008 <pq@iki.fi>
+ *
+ * Derived from the read-mod example from relay-examples by Tom Zanussi.
+ */
+#define DEBUG 1
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/version.h>
+#include <linux/kallsyms.h>
+#include <asm/pgtable.h>
+#include <linux/mmiotrace.h>
+#include <asm/e820.h> /* for ISA_START_ADDRESS */
+#include <asm/atomic.h>
+#include <linux/percpu.h>
+#include <linux/cpu.h>
+
+#include "pf_in.h"
+
+#define NAME "mmiotrace: "
+
+struct trap_reason {
+       unsigned long addr;
+       unsigned long ip;
+       enum reason_type type;
+       int active_traces;
+};
+
+struct remap_trace {
+       struct list_head list;
+       struct kmmio_probe probe;
+       resource_size_t phys;
+       unsigned long id;
+};
+
+/* Accessed per-cpu. */
+static DEFINE_PER_CPU(struct trap_reason, pf_reason);
+static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
+
+#if 0 /* XXX: no way gather this info anymore */
+/* Access to this is not per-cpu. */
+static DEFINE_PER_CPU(atomic_t, dropped);
+#endif
+
+static struct dentry *marker_file;
+
+static DEFINE_MUTEX(mmiotrace_mutex);
+static DEFINE_SPINLOCK(trace_lock);
+static atomic_t mmiotrace_enabled;
+static LIST_HEAD(trace_list);          /* struct remap_trace */
+
+/*
+ * Locking in this file:
+ * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
+ * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
+ *   and trace_lock.
+ * - Routines depending on is_enabled() must take trace_lock.
+ * - trace_list users must hold trace_lock.
+ * - is_enabled() guarantees that mmio_trace_record is allowed.
+ * - pre/post callbacks assume the effect of is_enabled() being true.
+ */
+
+/* module parameters */
+static unsigned long   filter_offset;
+static int             nommiotrace;
+static int             trace_pc;
+
+module_param(filter_offset, ulong, 0);
+module_param(nommiotrace, bool, 0);
+module_param(trace_pc, bool, 0);
+
+MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
+MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
+MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
+
+static bool is_enabled(void)
+{
+       return atomic_read(&mmiotrace_enabled);
+}
+
+#if 0 /* XXX: needs rewrite */
+/*
+ * Write callback for the debugfs entry:
+ * Read a marker and write it to the mmio trace log
+ */
+static ssize_t write_marker(struct file *file, const char __user *buffer,
+                                               size_t count, loff_t *ppos)
+{
+       char *event = NULL;
+       struct mm_io_header *headp;
+       ssize_t len = (count > 65535) ? 65535 : count;
+
+       event = kzalloc(sizeof(*headp) + len, GFP_KERNEL);
+       if (!event)
+               return -ENOMEM;
+
+       headp = (struct mm_io_header *)event;
+       headp->type = MMIO_MAGIC | (MMIO_MARKER << MMIO_OPCODE_SHIFT);
+       headp->data_len = len;
+
+       if (copy_from_user(event + sizeof(*headp), buffer, len)) {
+               kfree(event);
+               return -EFAULT;
+       }
+
+       spin_lock_irq(&trace_lock);
+#if 0 /* XXX: convert this to use tracing */
+       if (is_enabled())
+               relay_write(chan, event, sizeof(*headp) + len);
+       else
+#endif
+               len = -EINVAL;
+       spin_unlock_irq(&trace_lock);
+       kfree(event);
+       return len;
+}
+#endif
+
+static void print_pte(unsigned long address)
+{
+       unsigned int level;
+       pte_t *pte = lookup_address(address, &level);
+
+       if (!pte) {
+               pr_err(NAME "Error in %s: no pte for page 0x%08lx\n",
+                                                       __func__, address);
+               return;
+       }
+
+       if (level == PG_LEVEL_2M) {
+               pr_emerg(NAME "4MB pages are not currently supported: "
+                                                       "0x%08lx\n", address);
+               BUG();
+       }
+       pr_info(NAME "pte for 0x%lx: 0x%llx 0x%llx\n", address,
+               (unsigned long long)pte_val(*pte),
+               (unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
+}
+
+/*
+ * For some reason the pre/post pairs have been called in an
+ * unmatched order. Report and die.
+ */
+static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
+{
+       const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
+       pr_emerg(NAME "unexpected fault for address: 0x%08lx, "
+                                       "last fault for address: 0x%08lx\n",
+                                       addr, my_reason->addr);
+       print_pte(addr);
+       print_symbol(KERN_EMERG "faulting IP is at %s\n", regs->ip);
+       print_symbol(KERN_EMERG "last faulting IP was at %s\n", my_reason->ip);
+#ifdef __i386__
+       pr_emerg("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
+                       regs->ax, regs->bx, regs->cx, regs->dx);
+       pr_emerg("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
+                       regs->si, regs->di, regs->bp, regs->sp);
+#else
+       pr_emerg("rax: %016lx   rcx: %016lx   rdx: %016lx\n",
+                                       regs->ax, regs->cx, regs->dx);
+       pr_emerg("rsi: %016lx   rdi: %016lx   rbp: %016lx   rsp: %016lx\n",
+                               regs->si, regs->di, regs->bp, regs->sp);
+#endif
+       put_cpu_var(pf_reason);
+       BUG();
+}
+
+static void pre(struct kmmio_probe *p, struct pt_regs *regs,
+                                               unsigned long addr)
+{
+       struct trap_reason *my_reason = &get_cpu_var(pf_reason);
+       struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
+       const unsigned long instptr = instruction_pointer(regs);
+       const enum reason_type type = get_ins_type(instptr);
+       struct remap_trace *trace = p->private;
+
+       /* it doesn't make sense to have more than one active trace per cpu */
+       if (my_reason->active_traces)
+               die_kmmio_nesting_error(regs, addr);
+       else
+               my_reason->active_traces++;
+
+       my_reason->type = type;
+       my_reason->addr = addr;
+       my_reason->ip = instptr;
+
+       my_trace->phys = addr - trace->probe.addr + trace->phys;
+       my_trace->map_id = trace->id;
+
+       /*
+        * Only record the program counter when requested.
+        * It may taint clean-room reverse engineering.
+        */
+       if (trace_pc)
+               my_trace->pc = instptr;
+       else
+               my_trace->pc = 0;
+
+       /*
+        * XXX: the timestamp recorded will be *after* the tracing has been
+        * done, not at the time we hit the instruction. SMP implications
+        * on event ordering?
+        */
+
+       switch (type) {
+       case REG_READ:
+               my_trace->opcode = MMIO_READ;
+               my_trace->width = get_ins_mem_width(instptr);
+               break;
+       case REG_WRITE:
+               my_trace->opcode = MMIO_WRITE;
+               my_trace->width = get_ins_mem_width(instptr);
+               my_trace->value = get_ins_reg_val(instptr, regs);
+               break;
+       case IMM_WRITE:
+               my_trace->opcode = MMIO_WRITE;
+               my_trace->width = get_ins_mem_width(instptr);
+               my_trace->value = get_ins_imm_val(instptr);
+               break;
+       default:
+               {
+                       unsigned char *ip = (unsigned char *)instptr;
+                       my_trace->opcode = MMIO_UNKNOWN_OP;
+                       my_trace->width = 0;
+                       my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
+                                                               *(ip + 2);
+               }
+       }
+       put_cpu_var(cpu_trace);
+       put_cpu_var(pf_reason);
+}
+
+static void post(struct kmmio_probe *p, unsigned long condition,
+                                                       struct pt_regs *regs)
+{
+       struct trap_reason *my_reason = &get_cpu_var(pf_reason);
+       struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
+
+       /* this should always return the active_trace count to 0 */
+       my_reason->active_traces--;
+       if (my_reason->active_traces) {
+               pr_emerg(NAME "unexpected post handler");
+               BUG();
+       }
+
+       switch (my_reason->type) {
+       case REG_READ:
+               my_trace->value = get_ins_reg_val(my_reason->ip, regs);
+               break;
+       default:
+               break;
+       }
+
+       mmio_trace_rw(my_trace);
+       put_cpu_var(cpu_trace);
+       put_cpu_var(pf_reason);
+}
+
+static void ioremap_trace_core(resource_size_t offset, unsigned long size,
+                                                       void __iomem *addr)
+{
+       static atomic_t next_id;
+       struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
+       /* These are page-unaligned. */
+       struct mmiotrace_map map = {
+               .phys = offset,
+               .virt = (unsigned long)addr,
+               .len = size,
+               .opcode = MMIO_PROBE
+       };
+
+       if (!trace) {
+               pr_err(NAME "kmalloc failed in ioremap\n");
+               return;
+       }
+
+       *trace = (struct remap_trace) {
+               .probe = {
+                       .addr = (unsigned long)addr,
+                       .len = size,
+                       .pre_handler = pre,
+                       .post_handler = post,
+                       .private = trace
+               },
+               .phys = offset,
+               .id = atomic_inc_return(&next_id)
+       };
+       map.map_id = trace->id;
+
+       spin_lock_irq(&trace_lock);
+       if (!is_enabled())
+               goto not_enabled;
+
+       mmio_trace_mapping(&map);
+       list_add_tail(&trace->list, &trace_list);
+       if (!nommiotrace)
+               register_kmmio_probe(&trace->probe);
+
+not_enabled:
+       spin_unlock_irq(&trace_lock);
+}
+
+void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+                                               void __iomem *addr)
+{
+       if (!is_enabled()) /* recheck and proper locking in *_core() */
+               return;
+
+       pr_debug(NAME "ioremap_*(0x%llx, 0x%lx) = %p\n",
+                               (unsigned long long)offset, size, addr);
+       if ((filter_offset) && (offset != filter_offset))
+               return;
+       ioremap_trace_core(offset, size, addr);
+}
+
+static void iounmap_trace_core(volatile void __iomem *addr)
+{
+       struct mmiotrace_map map = {
+               .phys = 0,
+               .virt = (unsigned long)addr,
+               .len = 0,
+               .opcode = MMIO_UNPROBE
+       };
+       struct remap_trace *trace;
+       struct remap_trace *tmp;
+       struct remap_trace *found_trace = NULL;
+
+       pr_debug(NAME "Unmapping %p.\n", addr);
+
+       spin_lock_irq(&trace_lock);
+       if (!is_enabled())
+               goto not_enabled;
+
+       list_for_each_entry_safe(trace, tmp, &trace_list, list) {
+               if ((unsigned long)addr == trace->probe.addr) {
+                       if (!nommiotrace)
+                               unregister_kmmio_probe(&trace->probe);
+                       list_del(&trace->list);
+                       found_trace = trace;
+                       break;
+               }
+       }
+       map.map_id = (found_trace) ? found_trace->id : -1;
+       mmio_trace_mapping(&map);
+
+not_enabled:
+       spin_unlock_irq(&trace_lock);
+       if (found_trace) {
+               synchronize_rcu(); /* unregister_kmmio_probe() requirement */
+               kfree(found_trace);
+       }
+}
+
+void mmiotrace_iounmap(volatile void __iomem *addr)
+{
+       might_sleep();
+       if (is_enabled()) /* recheck and proper locking in *_core() */
+               iounmap_trace_core(addr);
+}
+
+static void clear_trace_list(void)
+{
+       struct remap_trace *trace;
+       struct remap_trace *tmp;
+
+       /*
+        * No locking required, because the caller ensures we are in a
+        * critical section via mutex, and is_enabled() is false,
+        * i.e. nothing can traverse or modify this list.
+        * Caller also ensures is_enabled() cannot change.
+        */
+       list_for_each_entry(trace, &trace_list, list) {
+               pr_notice(NAME "purging non-iounmapped "
+                                       "trace @0x%08lx, size 0x%lx.\n",
+                                       trace->probe.addr, trace->probe.len);
+               if (!nommiotrace)
+                       unregister_kmmio_probe(&trace->probe);
+       }
+       synchronize_rcu(); /* unregister_kmmio_probe() requirement */
+
+       list_for_each_entry_safe(trace, tmp, &trace_list, list) {
+               list_del(&trace->list);
+               kfree(trace);
+       }
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static cpumask_t downed_cpus;
+
+static void enter_uniprocessor(void)
+{
+       int cpu;
+       int err;
+
+       get_online_cpus();
+       downed_cpus = cpu_online_map;
+       cpu_clear(first_cpu(cpu_online_map), downed_cpus);
+       if (num_online_cpus() > 1)
+               pr_notice(NAME "Disabling non-boot CPUs...\n");
+       put_online_cpus();
+
+       for_each_cpu_mask(cpu, downed_cpus) {
+               err = cpu_down(cpu);
+               if (!err)
+                       pr_info(NAME "CPU%d is down.\n", cpu);
+               else
+                       pr_err(NAME "Error taking CPU%d down: %d\n", cpu, err);
+       }
+       if (num_online_cpus() > 1)
+               pr_warning(NAME "multiple CPUs still online, "
+                                               "may miss events.\n");
+}
+
+static void leave_uniprocessor(void)
+{
+       int cpu;
+       int err;
+
+       if (cpus_weight(downed_cpus) == 0)
+               return;
+       pr_notice(NAME "Re-enabling CPUs...\n");
+       for_each_cpu_mask(cpu, downed_cpus) {
+               err = cpu_up(cpu);
+               if (!err)
+                       pr_info(NAME "enabled CPU%d.\n", cpu);
+               else
+                       pr_err(NAME "cannot re-enable CPU%d: %d\n", cpu, err);
+       }
+}
+
+#else /* !CONFIG_HOTPLUG_CPU */
+static void enter_uniprocessor(void)
+{
+       if (num_online_cpus() > 1)
+               pr_warning(NAME "multiple CPUs are online, may miss events. "
+                       "Suggest booting with maxcpus=1 kernel argument.\n");
+}
+
+static void leave_uniprocessor(void)
+{
+}
+#endif
+
+#if 0 /* XXX: out of order */
+static struct file_operations fops_marker = {
+       .owner =        THIS_MODULE,
+       .write =        write_marker
+};
+#endif
+
+void enable_mmiotrace(void)
+{
+       mutex_lock(&mmiotrace_mutex);
+       if (is_enabled())
+               goto out;
+
+#if 0 /* XXX: tracing does not support text entries */
+       marker_file = debugfs_create_file("marker", 0660, dir, NULL,
+                                                               &fops_marker);
+       if (!marker_file)
+               pr_err(NAME "marker file creation failed.\n");
+#endif
+
+       if (nommiotrace)
+               pr_info(NAME "MMIO tracing disabled.\n");
+       enter_uniprocessor();
+       spin_lock_irq(&trace_lock);
+       atomic_inc(&mmiotrace_enabled);
+       spin_unlock_irq(&trace_lock);
+       pr_info(NAME "enabled.\n");
+out:
+       mutex_unlock(&mmiotrace_mutex);
+}
+
+void disable_mmiotrace(void)
+{
+       mutex_lock(&mmiotrace_mutex);
+       if (!is_enabled())
+               goto out;
+
+       spin_lock_irq(&trace_lock);
+       atomic_dec(&mmiotrace_enabled);
+       BUG_ON(is_enabled());
+       spin_unlock_irq(&trace_lock);
+
+       clear_trace_list(); /* guarantees: no more kmmio callbacks */
+       leave_uniprocessor();
+       if (marker_file) {
+               debugfs_remove(marker_file);
+               marker_file = NULL;
+       }
+
+       pr_info(NAME "disabled.\n");
+out:
+       mutex_unlock(&mmiotrace_mutex);
+}
index 60bcb5b..57970f2 100644 (file)
@@ -227,6 +227,7 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
 
        return pte_offset_kernel(pmd, address);
 }
+EXPORT_SYMBOL_GPL(lookup_address);
 
 /*
  * Set the new pmd in all the pgds we know about:
diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
new file mode 100644 (file)
index 0000000..efa1911
--- /dev/null
@@ -0,0 +1,489 @@
+/*
+ *  Fault Injection Test harness (FI)
+ *  Copyright (C) Intel Crop.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307,
+ *  USA.
+ *
+ */
+
+/*  Id: pf_in.c,v 1.1.1.1 2002/11/12 05:56:32 brlock Exp
+ *  Copyright by Intel Crop., 2002
+ *  Louis Zhuang (louis.zhuang@intel.com)
+ *
+ *  Bjorn Steinbrink (B.Steinbrink@gmx.de), 2007
+ */
+
+#include <linux/module.h>
+#include <linux/ptrace.h> /* struct pt_regs */
+#include "pf_in.h"
+
+#ifdef __i386__
+/* IA32 Manual 3, 2-1 */
+static unsigned char prefix_codes[] = {
+       0xF0, 0xF2, 0xF3, 0x2E, 0x36, 0x3E, 0x26, 0x64,
+       0x65, 0x2E, 0x3E, 0x66, 0x67
+};
+/* IA32 Manual 3, 3-432*/
+static unsigned int reg_rop[] = {
+       0x8A, 0x8B, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F
+};
+static unsigned int reg_wop[] = { 0x88, 0x89 };
+static unsigned int imm_wop[] = { 0xC6, 0xC7 };
+/* IA32 Manual 3, 3-432*/
+static unsigned int rw8[] = { 0x88, 0x8A, 0xC6 };
+static unsigned int rw32[] = {
+       0x89, 0x8B, 0xC7, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F
+};
+static unsigned int mw8[] = { 0x88, 0x8A, 0xC6, 0xB60F, 0xBE0F };
+static unsigned int mw16[] = { 0xB70F, 0xBF0F };
+static unsigned int mw32[] = { 0x89, 0x8B, 0xC7 };
+static unsigned int mw64[] = {};
+#else /* not __i386__ */
+static unsigned char prefix_codes[] = {
+       0x66, 0x67, 0x2E, 0x3E, 0x26, 0x64, 0x65, 0x36,
+       0xF0, 0xF3, 0xF2,
+       /* REX Prefixes */
+       0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
+       0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f
+};
+/* AMD64 Manual 3, Appendix A*/
+static unsigned int reg_rop[] = {
+       0x8A, 0x8B, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F
+};
+static unsigned int reg_wop[] = { 0x88, 0x89 };
+static unsigned int imm_wop[] = { 0xC6, 0xC7 };
+static unsigned int rw8[] = { 0xC6, 0x88, 0x8A };
+static unsigned int rw32[] = {
+       0xC7, 0x89, 0x8B, 0xB60F, 0xB70F, 0xBE0F, 0xBF0F
+};
+/* 8 bit only */
+static unsigned int mw8[] = { 0xC6, 0x88, 0x8A, 0xB60F, 0xBE0F };
+/* 16 bit only */
+static unsigned int mw16[] = { 0xB70F, 0xBF0F };
+/* 16 or 32 bit */
+static unsigned int mw32[] = { 0xC7 };
+/* 16, 32 or 64 bit */
+static unsigned int mw64[] = { 0x89, 0x8B };
+#endif /* not __i386__ */
+
+static int skip_prefix(unsigned char *addr, int *shorted, int *enlarged,
+                                                               int *rexr)
+{
+       int i;
+       unsigned char *p = addr;
+       *shorted = 0;
+       *enlarged = 0;
+       *rexr = 0;
+
+restart:
+       for (i = 0; i < ARRAY_SIZE(prefix_codes); i++) {
+               if (*p == prefix_codes[i]) {
+                       if (*p == 0x66)
+                               *shorted = 1;
+#ifdef __amd64__
+                       if ((*p & 0xf8) == 0x48)
+                               *enlarged = 1;
+                       if ((*p & 0xf4) == 0x44)
+                               *rexr = 1;
+#endif
+                       p++;
+                       goto restart;
+               }
+       }
+
+       return (p - addr);
+}
+
+static int get_opcode(unsigned char *addr, unsigned int *opcode)
+{
+       int len;
+
+       if (*addr == 0x0F) {
+               /* 0x0F is extension instruction */
+               *opcode = *(unsigned short *)addr;
+               len = 2;
+       } else {
+               *opcode = *addr;
+               len = 1;
+       }
+
+       return len;
+}
+
+#define CHECK_OP_TYPE(opcode, array, type) \
+       for (i = 0; i < ARRAY_SIZE(array); i++) { \
+               if (array[i] == opcode) { \
+                       rv = type; \
+                       goto exit; \
+               } \
+       }
+
+enum reason_type get_ins_type(unsigned long ins_addr)
+{
+       unsigned int opcode;
+       unsigned char *p;
+       int shorted, enlarged, rexr;
+       int i;
+       enum reason_type rv = OTHERS;
+
+       p = (unsigned char *)ins_addr;
+       p += skip_prefix(p, &shorted, &enlarged, &rexr);
+       p += get_opcode(p, &opcode);
+
+       CHECK_OP_TYPE(opcode, reg_rop, REG_READ);
+       CHECK_OP_TYPE(opcode, reg_wop, REG_WRITE);
+       CHECK_OP_TYPE(opcode, imm_wop, IMM_WRITE);
+
+exit:
+       return rv;
+}
+#undef CHECK_OP_TYPE
+
+static unsigned int get_ins_reg_width(unsigned long ins_addr)
+{
+       unsigned int opcode;
+       unsigned char *p;
+       int i, shorted, enlarged, rexr;
+
+       p = (unsigned char *)ins_addr;
+       p += skip_prefix(p, &shorted, &enlarged, &rexr);
+       p += get_opcode(p, &opcode);
+
+       for (i = 0; i < ARRAY_SIZE(rw8); i++)
+               if (rw8[i] == opcode)
+                       return 1;
+
+       for (i = 0; i < ARRAY_SIZE(rw32); i++)
+               if (rw32[i] == opcode)
+                       return (shorted ? 2 : (enlarged ? 8 : 4));
+
+       printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode);
+       return 0;
+}
+
+unsigned int get_ins_mem_width(unsigned long ins_addr)
+{
+       unsigned int opcode;
+       unsigned char *p;
+       int i, shorted, enlarged, rexr;
+
+       p = (unsigned char *)ins_addr;
+       p += skip_prefix(p, &shorted, &enlarged, &rexr);
+       p += get_opcode(p, &opcode);
+
+       for (i = 0; i < ARRAY_SIZE(mw8); i++)
+               if (mw8[i] == opcode)
+                       return 1;
+
+       for (i = 0; i < ARRAY_SIZE(mw16); i++)
+               if (mw16[i] == opcode)
+                       return 2;
+
+       for (i = 0; i < ARRAY_SIZE(mw32); i++)
+               if (mw32[i] == opcode)
+                       return shorted ? 2 : 4;
+
+       for (i = 0; i < ARRAY_SIZE(mw64); i++)
+               if (mw64[i] == opcode)
+                       return shorted ? 2 : (enlarged ? 8 : 4);
+
+       printk(KERN_ERR "mmiotrace: Unknown opcode 0x%02x\n", opcode);
+       return 0;
+}
+
+/*
+ * Define register ident in mod/rm byte.
+ * Note: these are NOT the same as in ptrace-abi.h.
+ */
+enum {
+       arg_AL = 0,
+       arg_CL = 1,
+       arg_DL = 2,
+       arg_BL = 3,
+       arg_AH = 4,
+       arg_CH = 5,
+       arg_DH = 6,
+       arg_BH = 7,
+
+       arg_AX = 0,
+       arg_CX = 1,
+       arg_DX = 2,
+       arg_BX = 3,
+       arg_SP = 4,
+       arg_BP = 5,
+       arg_SI = 6,
+       arg_DI = 7,
+#ifdef __amd64__
+       arg_R8  = 8,
+       arg_R9  = 9,
+       arg_R10 = 10,
+       arg_R11 = 11,
+       arg_R12 = 12,
+       arg_R13 = 13,
+       arg_R14 = 14,
+       arg_R15 = 15
+#endif
+};
+
+static unsigned char *get_reg_w8(int no, struct pt_regs *regs)
+{
+       unsigned char *rv = NULL;
+
+       switch (no) {
+       case arg_AL:
+               rv = (unsigned char *)&regs->ax;
+               break;
+       case arg_BL:
+               rv = (unsigned char *)&regs->bx;
+               break;
+       case arg_CL:
+               rv = (unsigned char *)&regs->cx;
+               break;
+       case arg_DL:
+               rv = (unsigned char *)&regs->dx;
+               break;
+       case arg_AH:
+               rv = 1 + (unsigned char *)&regs->ax;
+               break;
+       case arg_BH:
+               rv = 1 + (unsigned char *)&regs->bx;
+               break;
+       case arg_CH:
+               rv = 1 + (unsigned char *)&regs->cx;
+               break;
+       case arg_DH:
+               rv = 1 + (unsigned char *)&regs->dx;
+               break;
+#ifdef __amd64__
+       case arg_R8:
+               rv = (unsigned char *)&regs->r8;
+               break;
+       case arg_R9:
+               rv = (unsigned char *)&regs->r9;
+               break;
+       case arg_R10:
+               rv = (unsigned char *)&regs->r10;
+               break;
+       case arg_R11:
+               rv = (unsigned char *)&regs->r11;
+               break;
+       case arg_R12:
+               rv = (unsigned char *)&regs->r12;
+               break;
+       case arg_R13:
+               rv = (unsigned char *)&regs->r13;
+               break;
+       case arg_R14:
+               rv = (unsigned char *)&regs->r14;
+               break;
+       case arg_R15:
+               rv = (unsigned char *)&regs->r15;
+               break;
+#endif
+       default:
+               printk(KERN_ERR "mmiotrace: Error reg no# %d\n", no);
+               break;
+       }
+       return rv;
+}
+
+static unsigned long *get_reg_w32(int no, struct pt_regs *regs)
+{
+       unsigned long *rv = NULL;
+
+       switch (no) {
+       case arg_AX:
+               rv = &regs->ax;
+               break;
+       case arg_BX:
+               rv = &regs->bx;
+               break;
+       case arg_CX:
+               rv = &regs->cx;
+               break;
+       case arg_DX:
+               rv = &regs->dx;
+               break;
+       case arg_SP:
+               rv = &regs->sp;
+               break;
+       case arg_BP:
+               rv = &regs->bp;
+               break;
+       case arg_SI:
+               rv = &regs->si;
+               break;
+       case arg_DI:
+               rv = &regs->di;
+               break;
+#ifdef __amd64__
+       case arg_R8:
+               rv = &regs->r8;
+               break;
+       case arg_R9:
+               rv = &regs->r9;
+               break;
+       case arg_R10:
+               rv = &regs->r10;
+               break;
+       case arg_R11:
+               rv = &regs->r11;
+               break;
+       case arg_R12:
+               rv = &regs->r12;
+               break;
+       case arg_R13:
+               rv = &regs->r13;
+               break;
+       case arg_R14:
+               rv = &regs->r14;
+               break;
+       case arg_R15:
+               rv = &regs->r15;
+               break;
+#endif
+       default:
+               printk(KERN_ERR "mmiotrace: Error reg no# %d\n", no);
+       }
+
+       return rv;
+}
+
+unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
+{
+       unsigned int opcode;
+       unsigned char mod_rm;
+       int reg;
+       unsigned char *p;
+       int i, shorted, enlarged, rexr;
+       unsigned long rv;
+
+       p = (unsigned char *)ins_addr;
+       p += skip_prefix(p, &shorted, &enlarged, &rexr);
+       p += get_opcode(p, &opcode);
+       for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
+               if (reg_rop[i] == opcode) {
+                       rv = REG_READ;
+                       goto do_work;
+               }
+
+       for (i = 0; i < ARRAY_SIZE(reg_wop); i++)
+               if (reg_wop[i] == opcode) {
+                       rv = REG_WRITE;
+                       goto do_work;
+               }
+
+       printk(KERN_ERR "mmiotrace: Not a register instruction, opcode "
+                                                       "0x%02x\n", opcode);
+       goto err;
+
+do_work:
+       mod_rm = *p;
+       reg = ((mod_rm >> 3) & 0x7) | (rexr << 3);
+       switch (get_ins_reg_width(ins_addr)) {
+       case 1:
+               return *get_reg_w8(reg, regs);
+
+       case 2:
+               return *(unsigned short *)get_reg_w32(reg, regs);
+
+       case 4:
+               return *(unsigned int *)get_reg_w32(reg, regs);
+
+#ifdef __amd64__
+       case 8:
+               return *(unsigned long *)get_reg_w32(reg, regs);
+#endif
+
+       default:
+               printk(KERN_ERR "mmiotrace: Error width# %d\n", reg);
+       }
+
+err:
+       return 0;
+}
+
+unsigned long get_ins_imm_val(unsigned long ins_addr)
+{
+       unsigned int opcode;
+       unsigned char mod_rm;
+       unsigned char mod;
+       unsigned char *p;
+       int i, shorted, enlarged, rexr;
+       unsigned long rv;
+
+       p = (unsigned char *)ins_addr;
+       p += skip_prefix(p, &shorted, &enlarged, &rexr);
+       p += get_opcode(p, &opcode);
+       for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
+               if (imm_wop[i] == opcode) {
+                       rv = IMM_WRITE;
+                       goto do_work;
+               }
+
+       printk(KERN_ERR "mmiotrace: Not an immediate instruction, opcode "
+                                                       "0x%02x\n", opcode);
+       goto err;
+
+do_work:
+       mod_rm = *p;
+       mod = mod_rm >> 6;
+       p++;
+       switch (mod) {
+       case 0:
+               /* if r/m is 5 we have a 32 disp (IA32 Manual 3, Table 2-2)  */
+               /* AMD64: XXX Check for address size prefix? */
+               if ((mod_rm & 0x7) == 0x5)
+                       p += 4;
+               break;
+
+       case 1:
+               p += 1;
+               break;
+
+       case 2:
+               p += 4;
+               break;
+
+       case 3:
+       default:
+               printk(KERN_ERR "mmiotrace: not a memory access instruction "
+                                               "at 0x%lx, rm_mod=0x%02x\n",
+                                               ins_addr, mod_rm);
+       }
+
+       switch (get_ins_reg_width(ins_addr)) {
+       case 1:
+               return *(unsigned char *)p;
+
+       case 2:
+               return *(unsigned short *)p;
+
+       case 4:
+               return *(unsigned int *)p;
+
+#ifdef __amd64__
+       case 8:
+               return *(unsigned long *)p;
+#endif
+
+       default:
+               printk(KERN_ERR "mmiotrace: Error: width.\n");
+       }
+
+err:
+       return 0;
+}
diff --git a/arch/x86/mm/pf_in.h b/arch/x86/mm/pf_in.h
new file mode 100644 (file)
index 0000000..e05341a
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ *  Fault Injection Test harness (FI)
+ *  Copyright (C) Intel Crop.
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307,
+ *  USA.
+ *
+ */
+
+#ifndef __PF_H_
+#define __PF_H_
+
+enum reason_type {
+       NOT_ME, /* page fault is not in regions */
+       NOTHING,        /* access others point in regions */
+       REG_READ,       /* read from addr to reg */
+       REG_WRITE,      /* write from reg to addr */
+       IMM_WRITE,      /* write from imm to addr */
+       OTHERS  /* Other instructions can not intercept */
+};
+
+enum reason_type get_ins_type(unsigned long ins_addr);
+unsigned int get_ins_mem_width(unsigned long ins_addr);
+unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs);
+unsigned long get_ins_imm_val(unsigned long ins_addr);
+
+#endif /* __PF_H_ */
diff --git a/arch/x86/mm/testmmiotrace.c b/arch/x86/mm/testmmiotrace.c
new file mode 100644 (file)
index 0000000..d877c5b
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Written by Pekka Paalanen, 2008 <pq@iki.fi>
+ */
+#include <linux/module.h>
+#include <linux/io.h>
+
+#define MODULE_NAME "testmmiotrace"
+
+static unsigned long mmio_address;
+module_param(mmio_address, ulong, 0);
+MODULE_PARM_DESC(mmio_address, "Start address of the mapping of 16 kB.");
+
+static void do_write_test(void __iomem *p)
+{
+       unsigned int i;
+       for (i = 0; i < 256; i++)
+               iowrite8(i, p + i);
+       for (i = 1024; i < (5 * 1024); i += 2)
+               iowrite16(i * 12 + 7, p + i);
+       for (i = (5 * 1024); i < (16 * 1024); i += 4)
+               iowrite32(i * 212371 + 13, p + i);
+}
+
+static void do_read_test(void __iomem *p)
+{
+       unsigned int i;
+       for (i = 0; i < 256; i++)
+               ioread8(p + i);
+       for (i = 1024; i < (5 * 1024); i += 2)
+               ioread16(p + i);
+       for (i = (5 * 1024); i < (16 * 1024); i += 4)
+               ioread32(p + i);
+}
+
+static void do_test(void)
+{
+       void __iomem *p = ioremap_nocache(mmio_address, 0x4000);
+       if (!p) {
+               pr_err(MODULE_NAME ": could not ioremap, aborting.\n");
+               return;
+       }
+       do_write_test(p);
+       do_read_test(p);
+       iounmap(p);
+}
+
+static int __init init(void)
+{
+       if (mmio_address == 0) {
+               pr_err(MODULE_NAME ": you have to use the module argument "
+                                                       "mmio_address.\n");
+               pr_err(MODULE_NAME ": DO NOT LOAD THIS MODULE UNLESS"
+                               " YOU REALLY KNOW WHAT YOU ARE DOING!\n");
+               return -ENXIO;
+       }
+
+       pr_warning(MODULE_NAME ": WARNING: mapping 16 kB @ 0x%08lx "
+                                       "in PCI address space, and writing "
+                                       "rubbish in there.\n", mmio_address);
+       do_test();
+       return 0;
+}
+
+static void __exit cleanup(void)
+{
+       pr_debug(MODULE_NAME ": unloaded.\n");
+}
+
+module_init(init);
+module_exit(cleanup);
+MODULE_LICENSE("GPL");
index efa2ba7..1ef0f90 100644 (file)
@@ -23,7 +23,7 @@
 
 #define gtod vdso_vsyscall_gtod_data
 
-static long vdso_fallback_gettime(long clock, struct timespec *ts)
+notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
 {
        long ret;
        asm("syscall" : "=a" (ret) :
@@ -31,7 +31,7 @@ static long vdso_fallback_gettime(long clock, struct timespec *ts)
        return ret;
 }
 
-static inline long vgetns(void)
+notrace static inline long vgetns(void)
 {
        long v;
        cycles_t (*vread)(void);
@@ -40,7 +40,7 @@ static inline long vgetns(void)
        return (v * gtod->clock.mult) >> gtod->clock.shift;
 }
 
-static noinline int do_realtime(struct timespec *ts)
+notrace static noinline int do_realtime(struct timespec *ts)
 {
        unsigned long seq, ns;
        do {
@@ -54,7 +54,8 @@ static noinline int do_realtime(struct timespec *ts)
 }
 
 /* Copy of the version in kernel/time.c which we cannot directly access */
-static void vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
+notrace static void
+vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
 {
        while (nsec >= NSEC_PER_SEC) {
                nsec -= NSEC_PER_SEC;
@@ -68,7 +69,7 @@ static void vset_normalized_timespec(struct timespec *ts, long sec, long nsec)
        ts->tv_nsec = nsec;
 }
 
-static noinline int do_monotonic(struct timespec *ts)
+notrace static noinline int do_monotonic(struct timespec *ts)
 {
        unsigned long seq, ns, secs;
        do {
@@ -82,7 +83,7 @@ static noinline int do_monotonic(struct timespec *ts)
        return 0;
 }
 
-int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
+notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
 {
        if (likely(gtod->sysctl_enabled && gtod->clock.vread))
                switch (clock) {
@@ -96,7 +97,7 @@ int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
 int clock_gettime(clockid_t, struct timespec *)
        __attribute__((weak, alias("__vdso_clock_gettime")));
 
-int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
 {
        long ret;
        if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
index c8097f1..9fbc6b2 100644 (file)
@@ -13,7 +13,8 @@
 #include <asm/vgtod.h>
 #include "vextern.h"
 
-long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
+notrace long
+__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
 {
        unsigned int p;
 
index ad8c9f7..f75a5fc 100644 (file)
@@ -59,6 +59,11 @@ extern void iseries_handle_interrupts(void);
                get_paca()->hard_enabled = 0;   \
        } while(0)
 
+static inline int irqs_disabled_flags(unsigned long flags)
+{
+       return flags == 0;
+}
+
 #else
 
 #if defined(CONFIG_BOOKE)
@@ -113,6 +118,11 @@ static inline void local_irq_save_ptr(unsigned long *flags)
 #define hard_irq_enable()      local_irq_enable()
 #define hard_irq_disable()     local_irq_disable()
 
+static inline int irqs_disabled_flags(unsigned long flags)
+{
+       return (flags & MSR_EE) == 0;
+}
+
 #endif /* CONFIG_PPC64 */
 
 /*
index 1f6a9ca..f6aa18e 100644 (file)
@@ -72,6 +72,8 @@ static inline void alternatives_smp_module_del(struct module *mod) {}
 static inline void alternatives_smp_switch(int smp) {}
 #endif /* CONFIG_SMP */
 
+const unsigned char *const *find_nop_table(void);
+
 /*
  * Alternative instructions for different CPU types or capabilities.
  *
index c242527..24d71b1 100644 (file)
@@ -179,8 +179,6 @@ static inline void trace_hardirqs_fixup(void)
  * have a reliable stack. x86_64 only.
  */
 #define SWAPGS_UNSAFE_STACK    swapgs
-#define ARCH_TRACE_IRQS_ON             call trace_hardirqs_on_thunk
-#define ARCH_TRACE_IRQS_OFF            call trace_hardirqs_off_thunk
 #define ARCH_LOCKDEP_SYS_EXIT          call lockdep_sys_exit_thunk
 #define ARCH_LOCKDEP_SYS_EXIT_IRQ      \
        TRACE_IRQS_ON; \
@@ -192,24 +190,6 @@ static inline void trace_hardirqs_fixup(void)
        TRACE_IRQS_OFF;
 
 #else
-#define ARCH_TRACE_IRQS_ON                     \
-       pushl %eax;                             \
-       pushl %ecx;                             \
-       pushl %edx;                             \
-       call trace_hardirqs_on;                 \
-       popl %edx;                              \
-       popl %ecx;                              \
-       popl %eax;
-
-#define ARCH_TRACE_IRQS_OFF                    \
-       pushl %eax;                             \
-       pushl %ecx;                             \
-       pushl %edx;                             \
-       call trace_hardirqs_off;                \
-       popl %edx;                              \
-       popl %ecx;                              \
-       popl %eax;
-
 #define ARCH_LOCKDEP_SYS_EXIT                  \
        pushl %eax;                             \
        pushl %ecx;                             \
@@ -223,8 +203,8 @@ static inline void trace_hardirqs_fixup(void)
 #endif
 
 #ifdef CONFIG_TRACE_IRQFLAGS
-#  define TRACE_IRQS_ON                ARCH_TRACE_IRQS_ON
-#  define TRACE_IRQS_OFF       ARCH_TRACE_IRQS_OFF
+#  define TRACE_IRQS_ON                call trace_hardirqs_on_thunk;
+#  define TRACE_IRQS_OFF       call trace_hardirqs_off_thunk;
 #else
 #  define TRACE_IRQS_ON
 #  define TRACE_IRQS_OFF
index 17b3700..6b66ff9 100644 (file)
@@ -24,7 +24,8 @@ enum vsyscall_num {
        ((unused, __section__ (".vsyscall_gtod_data"),aligned(16)))
 #define __section_vsyscall_clock __attribute__ \
        ((unused, __section__ (".vsyscall_clock"),aligned(16)))
-#define __vsyscall_fn __attribute__ ((unused,__section__(".vsyscall_fn")))
+#define __vsyscall_fn \
+       __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace
 
 #define VGETCPU_RDTSCP 1
 #define VGETCPU_LSL    2
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
new file mode 100644 (file)
index 0000000..922e23d
--- /dev/null
@@ -0,0 +1,132 @@
+#ifndef _LINUX_FTRACE_H
+#define _LINUX_FTRACE_H
+
+#ifdef CONFIG_FTRACE
+
+#include <linux/linkage.h>
+#include <linux/fs.h>
+
+extern int ftrace_enabled;
+extern int
+ftrace_enable_sysctl(struct ctl_table *table, int write,
+                    struct file *filp, void __user *buffer, size_t *lenp,
+                    loff_t *ppos);
+
+typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
+
+struct ftrace_ops {
+       ftrace_func_t     func;
+       struct ftrace_ops *next;
+};
+
+/*
+ * The ftrace_ops must be a static and should also
+ * be read_mostly.  These functions do modify read_mostly variables
+ * so use them sparely. Never free an ftrace_op or modify the
+ * next pointer after it has been registered. Even after unregistering
+ * it, the next pointer may still be used internally.
+ */
+int register_ftrace_function(struct ftrace_ops *ops);
+int unregister_ftrace_function(struct ftrace_ops *ops);
+void clear_ftrace_function(void);
+
+extern void ftrace_stub(unsigned long a0, unsigned long a1);
+extern void mcount(void);
+
+#else /* !CONFIG_FTRACE */
+# define register_ftrace_function(ops) do { } while (0)
+# define unregister_ftrace_function(ops) do { } while (0)
+# define clear_ftrace_function(ops) do { } while (0)
+#endif /* CONFIG_FTRACE */
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+# define FTRACE_HASHBITS       10
+# define FTRACE_HASHSIZE       (1<<FTRACE_HASHBITS)
+
+enum {
+       FTRACE_FL_FREE          = (1 << 0),
+       FTRACE_FL_FAILED        = (1 << 1),
+       FTRACE_FL_FILTER        = (1 << 2),
+       FTRACE_FL_ENABLED       = (1 << 3),
+};
+
+struct dyn_ftrace {
+       struct hlist_node node;
+       unsigned long     ip;
+       unsigned long     flags;
+};
+
+int ftrace_force_update(void);
+void ftrace_set_filter(unsigned char *buf, int len, int reset);
+
+/* defined in arch */
+extern int ftrace_ip_converted(unsigned long ip);
+extern unsigned char *ftrace_nop_replace(void);
+extern unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr);
+extern int ftrace_dyn_arch_init(void *data);
+extern int ftrace_mcount_set(unsigned long *data);
+extern int ftrace_modify_code(unsigned long ip, unsigned char *old_code,
+                             unsigned char *new_code);
+extern int ftrace_update_ftrace_func(ftrace_func_t func);
+extern void ftrace_caller(void);
+extern void ftrace_call(void);
+extern void mcount_call(void);
+#else
+# define ftrace_force_update()                 ({ 0; })
+# define ftrace_set_filter(buf, len, reset)    do { } while (0)
+#endif
+
+/* totally disable ftrace - can not re-enable after this */
+void ftrace_kill(void);
+
+static inline void tracer_disable(void)
+{
+#ifdef CONFIG_FTRACE
+       ftrace_enabled = 0;
+#endif
+}
+
+#ifdef CONFIG_FRAME_POINTER
+/* TODO: need to fix this for ARM */
+# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+# define CALLER_ADDR1 ((unsigned long)__builtin_return_address(1))
+# define CALLER_ADDR2 ((unsigned long)__builtin_return_address(2))
+# define CALLER_ADDR3 ((unsigned long)__builtin_return_address(3))
+# define CALLER_ADDR4 ((unsigned long)__builtin_return_address(4))
+# define CALLER_ADDR5 ((unsigned long)__builtin_return_address(5))
+# define CALLER_ADDR6 ((unsigned long)__builtin_return_address(6))
+#else
+# define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
+# define CALLER_ADDR1 0UL
+# define CALLER_ADDR2 0UL
+# define CALLER_ADDR3 0UL
+# define CALLER_ADDR4 0UL
+# define CALLER_ADDR5 0UL
+# define CALLER_ADDR6 0UL
+#endif
+
+#ifdef CONFIG_IRQSOFF_TRACER
+  extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
+  extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
+#else
+# define time_hardirqs_on(a0, a1)              do { } while (0)
+# define time_hardirqs_off(a0, a1)             do { } while (0)
+#endif
+
+#ifdef CONFIG_PREEMPT_TRACER
+  extern void trace_preempt_on(unsigned long a0, unsigned long a1);
+  extern void trace_preempt_off(unsigned long a0, unsigned long a1);
+#else
+# define trace_preempt_on(a0, a1)              do { } while (0)
+# define trace_preempt_off(a0, a1)             do { } while (0)
+#endif
+
+#ifdef CONFIG_CONTEXT_SWITCH_TRACER
+extern void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
+#else
+static inline void
+ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
+#endif
+
+#endif /* _LINUX_FTRACE_H */
index e600c4e..2b1c2e5 100644 (file)
 #define _LINUX_TRACE_IRQFLAGS_H
 
 #ifdef CONFIG_TRACE_IRQFLAGS
-  extern void trace_hardirqs_on(void);
-  extern void trace_hardirqs_off(void);
   extern void trace_softirqs_on(unsigned long ip);
   extern void trace_softirqs_off(unsigned long ip);
+  extern void trace_hardirqs_on(void);
+  extern void trace_hardirqs_off(void);
 # define trace_hardirq_context(p)      ((p)->hardirq_context)
 # define trace_softirq_context(p)      ((p)->softirq_context)
 # define trace_hardirqs_enabled(p)     ((p)->hardirqs_enabled)
 # define INIT_TRACE_IRQFLAGS
 #endif
 
+#if defined(CONFIG_IRQSOFF_TRACER) || \
+       defined(CONFIG_PREEMPT_TRACER)
+ extern void stop_critical_timings(void);
+ extern void start_critical_timings(void);
+#else
+# define stop_critical_timings() do { } while (0)
+# define start_critical_timings() do { } while (0)
+#endif
+
 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
 
 #include <asm/irqflags.h>
index 2119610..14f329c 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <asm/linkage.h>
 
+#define notrace __attribute__((no_instrument_function))
+
 #ifdef __cplusplus
 #define CPP_ASMLINKAGE extern "C"
 #else
index 430f6ad..1290653 100644 (file)
@@ -44,8 +44,8 @@ struct marker {
                                 */
        char state;             /* Marker state. */
        char ptype;             /* probe type : 0 : single, 1 : multi */
-       void (*call)(const struct marker *mdata,        /* Probe wrapper */
-               void *call_private, const char *fmt, ...);
+                               /* Probe wrapper */
+       void (*call)(const struct marker *mdata, void *call_private, ...);
        struct marker_probe_closure single;
        struct marker_probe_closure *multi;
 } __attribute__((aligned(8)));
@@ -58,8 +58,12 @@ struct marker {
  * Make sure the alignment of the structure in the __markers section will
  * not add unwanted padding between the beginning of the section and the
  * structure. Force alignment to the same alignment as the section start.
+ *
+ * The "generic" argument controls which marker enabling mechanism must be used.
+ * If generic is true, a variable read is used.
+ * If generic is false, immediate values are used.
  */
-#define __trace_mark(name, call_private, format, args...)              \
+#define __trace_mark(generic, name, call_private, format, args...)     \
        do {                                                            \
                static const char __mstrtab_##name[]                    \
                __attribute__((section("__markers_strings")))           \
@@ -72,15 +76,14 @@ struct marker {
                __mark_check_format(format, ## args);                   \
                if (unlikely(__mark_##name.state)) {                    \
                        (*__mark_##name.call)                           \
-                               (&__mark_##name, call_private,          \
-                               format, ## args);                       \
+                               (&__mark_##name, call_private, ## args);\
                }                                                       \
        } while (0)
 
 extern void marker_update_probe_range(struct marker *begin,
        struct marker *end);
 #else /* !CONFIG_MARKERS */
-#define __trace_mark(name, call_private, format, args...) \
+#define __trace_mark(generic, name, call_private, format, args...) \
                __mark_check_format(format, ## args)
 static inline void marker_update_probe_range(struct marker *begin,
        struct marker *end)
@@ -88,15 +91,30 @@ static inline void marker_update_probe_range(struct marker *begin,
 #endif /* CONFIG_MARKERS */
 
 /**
- * trace_mark - Marker
+ * trace_mark - Marker using code patching
  * @name: marker name, not quoted.
  * @format: format string
  * @args...: variable argument list
  *
- * Places a marker.
+ * Places a marker using optimized code patching technique (imv_read())
+ * to be enabled when immediate values are present.
  */
 #define trace_mark(name, format, args...) \
-       __trace_mark(name, NULL, format, ## args)
+       __trace_mark(0, name, NULL, format, ## args)
+
+/**
+ * _trace_mark - Marker using variable read
+ * @name: marker name, not quoted.
+ * @format: format string
+ * @args...: variable argument list
+ *
+ * Places a marker using a standard memory read (_imv_read()) to be
+ * enabled. Should be used for markers in code paths where instruction
+ * modification based enabling is not welcome. (__init and __exit functions,
+ * lockdep, some traps, printk).
+ */
+#define _trace_mark(name, format, args...) \
+       __trace_mark(1, name, NULL, format, ## args)
 
 /**
  * MARK_NOARGS - Format string for a marker with no argument.
@@ -117,9 +135,9 @@ static inline void __printf(1, 2) ___mark_check_format(const char *fmt, ...)
 extern marker_probe_func __mark_empty_function;
 
 extern void marker_probe_cb(const struct marker *mdata,
-       void *call_private, const char *fmt, ...);
+       void *call_private, ...);
 extern void marker_probe_cb_noarg(const struct marker *mdata,
-       void *call_private, const char *fmt, ...);
+       void *call_private, ...);
 
 /*
  * Connect a probe to a marker.
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
new file mode 100644 (file)
index 0000000..61d19e1
--- /dev/null
@@ -0,0 +1,85 @@
+#ifndef MMIOTRACE_H
+#define MMIOTRACE_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+struct kmmio_probe;
+struct pt_regs;
+
+typedef void (*kmmio_pre_handler_t)(struct kmmio_probe *,
+                               struct pt_regs *, unsigned long addr);
+typedef void (*kmmio_post_handler_t)(struct kmmio_probe *,
+                               unsigned long condition, struct pt_regs *);
+
+struct kmmio_probe {
+       struct list_head list; /* kmmio internal list */
+       unsigned long addr; /* start location of the probe point */
+       unsigned long len; /* length of the probe region */
+       kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */
+       kmmio_post_handler_t post_handler; /* Called after addr is executed */
+       void *private;
+};
+
+/* kmmio is active by some kmmio_probes? */
+static inline int is_kmmio_active(void)
+{
+       extern unsigned int kmmio_count;
+       return kmmio_count;
+}
+
+extern int register_kmmio_probe(struct kmmio_probe *p);
+extern void unregister_kmmio_probe(struct kmmio_probe *p);
+
+/* Called from page fault handler. */
+extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
+
+/* Called from ioremap.c */
+#ifdef CONFIG_MMIOTRACE
+extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
+                                                       void __iomem *addr);
+extern void mmiotrace_iounmap(volatile void __iomem *addr);
+#else
+static inline void mmiotrace_ioremap(resource_size_t offset,
+                                       unsigned long size, void __iomem *addr)
+{
+}
+
+static inline void mmiotrace_iounmap(volatile void __iomem *addr)
+{
+}
+#endif /* CONFIG_MMIOTRACE_HOOKS */
+
+enum mm_io_opcode {
+       MMIO_READ = 0x1,     /* struct mmiotrace_rw */
+       MMIO_WRITE = 0x2,    /* struct mmiotrace_rw */
+       MMIO_PROBE = 0x3,    /* struct mmiotrace_map */
+       MMIO_UNPROBE = 0x4,  /* struct mmiotrace_map */
+       MMIO_MARKER = 0x5,   /* raw char data */
+       MMIO_UNKNOWN_OP = 0x6, /* struct mmiotrace_rw */
+};
+
+struct mmiotrace_rw {
+       resource_size_t phys;   /* PCI address of register */
+       unsigned long value;
+       unsigned long pc;       /* optional program counter */
+       int map_id;
+       unsigned char opcode;   /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */
+       unsigned char width;    /* size of register access in bytes */
+};
+
+struct mmiotrace_map {
+       resource_size_t phys;   /* base address in PCI space */
+       unsigned long virt;     /* base virtual address */
+       unsigned long len;      /* mapping size */
+       int map_id;
+       unsigned char opcode;   /* MMIO_PROBE or MMIO_UNPROBE */
+};
+
+/* in kernel/trace/trace_mmiotrace.c */
+extern void enable_mmiotrace(void);
+extern void disable_mmiotrace(void);
+extern void mmio_trace_rw(struct mmiotrace_rw *rw);
+extern void mmio_trace_mapping(struct mmiotrace_map *map);
+
+#endif /* MMIOTRACE_H */
index 23f0c54..72b1a10 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/linkage.h>
 #include <linux/list.h>
 
-#ifdef CONFIG_DEBUG_PREEMPT
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
   extern void add_preempt_count(int val);
   extern void sub_preempt_count(int val);
 #else
@@ -52,6 +52,34 @@ do { \
        preempt_check_resched(); \
 } while (0)
 
+/* For debugging and tracer internals only! */
+#define add_preempt_count_notrace(val)                 \
+       do { preempt_count() += (val); } while (0)
+#define sub_preempt_count_notrace(val)                 \
+       do { preempt_count() -= (val); } while (0)
+#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
+#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
+
+#define preempt_disable_notrace() \
+do { \
+       inc_preempt_count_notrace(); \
+       barrier(); \
+} while (0)
+
+#define preempt_enable_no_resched_notrace() \
+do { \
+       barrier(); \
+       dec_preempt_count_notrace(); \
+} while (0)
+
+/* preempt_check_resched is OK to trace */
+#define preempt_enable_notrace() \
+do { \
+       preempt_enable_no_resched_notrace(); \
+       barrier(); \
+       preempt_check_resched(); \
+} while (0)
+
 #else
 
 #define preempt_disable()              do { } while (0)
@@ -59,6 +87,10 @@ do { \
 #define preempt_enable()               do { } while (0)
 #define preempt_check_resched()                do { } while (0)
 
+#define preempt_disable_notrace()              do { } while (0)
+#define preempt_enable_no_resched_notrace()    do { } while (0)
+#define preempt_enable_notrace()               do { } while (0)
+
 #endif
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
index c5d3f84..aa60985 100644 (file)
@@ -246,6 +246,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev);
 extern void init_idle(struct task_struct *idle, int cpu);
 extern void init_idle_bootup_task(struct task_struct *idle);
 
+extern int runqueue_is_locked(void);
+
 extern cpumask_t nohz_cpu_mask;
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
 extern int select_nohz_load_balancer(int cpu);
@@ -2131,6 +2133,18 @@ static inline void arch_pick_mmap_layout(struct mm_struct *mm)
 }
 #endif
 
+#ifdef CONFIG_TRACING
+extern void
+__trace_special(void *__tr, void *__data,
+               unsigned long arg1, unsigned long arg2, unsigned long arg3);
+#else
+static inline void
+__trace_special(void *__tr, void *__data,
+               unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+}
+#endif
+
 extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
 extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
 
@@ -2225,6 +2239,8 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
 }
 #endif /* CONFIG_MM_OWNER */
 
+#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
+
 #endif /* __KERNEL__ */
 
 #endif
index f462439..bd91987 100644 (file)
@@ -105,6 +105,8 @@ extern int vm_highmem_is_dirtyable;
 extern int block_dump;
 extern int laptop_mode;
 
+extern unsigned long determine_dirtyable_memory(void);
+
 extern int dirty_ratio_handler(struct ctl_table *table, int write,
                struct file *filp, void __user *buffer, size_t *lenp,
                loff_t *ppos);
index 1c9938a..ca2433e 100644 (file)
@@ -11,6 +11,18 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
            hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
            notifier.o ksysfs.o pm_qos_params.o sched_clock.o
 
+CFLAGS_REMOVE_sched.o = -pg -mno-spe
+
+ifdef CONFIG_FTRACE
+# Do not trace debug files and internal ftrace files
+CFLAGS_REMOVE_lockdep.o = -pg
+CFLAGS_REMOVE_lockdep_proc.o = -pg
+CFLAGS_REMOVE_mutex-debug.o = -pg
+CFLAGS_REMOVE_rtmutex-debug.o = -pg
+CFLAGS_REMOVE_cgroup-debug.o = -pg
+CFLAGS_REMOVE_sched_clock.o = -pg
+endif
+
 obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
 obj-y += time/
@@ -69,6 +81,8 @@ obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
 obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
 obj-$(CONFIG_MARKERS) += marker.o
 obj-$(CONFIG_LATENCYTOP) += latencytop.o
+obj-$(CONFIG_FTRACE) += trace/
+obj-$(CONFIG_TRACING) += trace/
 
 ifneq ($(CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER),y)
 # According to Alan Modra <alan@linuxcare.com.au>, the -fno-omit-frame-pointer is
index 19908b2..d66d676 100644 (file)
@@ -909,7 +909,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        rt_mutex_init_task(p);
 
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_LOCKDEP)
        DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
        DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
 #endif
index 81a4e4a..65548ef 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/irqflags.h>
 #include <linux/utsname.h>
 #include <linux/hash.h>
+#include <linux/ftrace.h>
 
 #include <asm/sections.h>
 
@@ -81,6 +82,8 @@ static int graph_lock(void)
                __raw_spin_unlock(&lockdep_lock);
                return 0;
        }
+       /* prevent any recursions within lockdep from causing deadlocks */
+       current->lockdep_recursion++;
        return 1;
 }
 
@@ -89,6 +92,7 @@ static inline int graph_unlock(void)
        if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
                return DEBUG_LOCKS_WARN_ON(1);
 
+       current->lockdep_recursion--;
        __raw_spin_unlock(&lockdep_lock);
        return 0;
 }
@@ -982,7 +986,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
        return 1;
 }
 
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 /*
  * Forwards and backwards subgraph searching, for the purposes of
  * proving that two subgraphs can be connected by a new dependency
@@ -1680,7 +1684,7 @@ valid_state(struct task_struct *curr, struct held_lock *this,
 static int mark_lock(struct task_struct *curr, struct held_lock *this,
                     enum lock_usage_bit new_bit);
 
-#ifdef CONFIG_TRACE_IRQFLAGS
+#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
 
 /*
  * print irq inversion bug:
@@ -2013,11 +2017,13 @@ void early_boot_irqs_on(void)
 /*
  * Hardirqs will be enabled:
  */
-void trace_hardirqs_on(void)
+void trace_hardirqs_on_caller(unsigned long a0)
 {
        struct task_struct *curr = current;
        unsigned long ip;
 
+       time_hardirqs_on(CALLER_ADDR0, a0);
+
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
 
@@ -2055,16 +2061,23 @@ void trace_hardirqs_on(void)
        curr->hardirq_enable_event = ++curr->irq_events;
        debug_atomic_inc(&hardirqs_on_events);
 }
+EXPORT_SYMBOL(trace_hardirqs_on_caller);
 
+void trace_hardirqs_on(void)
+{
+       trace_hardirqs_on_caller(CALLER_ADDR0);
+}
 EXPORT_SYMBOL(trace_hardirqs_on);
 
 /*
  * Hardirqs were disabled:
  */
-void trace_hardirqs_off(void)
+void trace_hardirqs_off_caller(unsigned long a0)
 {
        struct task_struct *curr = current;
 
+       time_hardirqs_off(CALLER_ADDR0, a0);
+
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
 
@@ -2082,7 +2095,12 @@ void trace_hardirqs_off(void)
        } else
                debug_atomic_inc(&redundant_hardirqs_off);
 }
+EXPORT_SYMBOL(trace_hardirqs_off_caller);
 
+void trace_hardirqs_off(void)
+{
+       trace_hardirqs_off_caller(CALLER_ADDR0);
+}
 EXPORT_SYMBOL(trace_hardirqs_off);
 
 /*
@@ -2246,7 +2264,7 @@ static inline int separate_irq_context(struct task_struct *curr,
  * Mark a lock with a usage bit, and validate the state transition:
  */
 static int mark_lock(struct task_struct *curr, struct held_lock *this,
-                    enum lock_usage_bit new_bit)
+                            enum lock_usage_bit new_bit)
 {
        unsigned int new_mask = 1 << new_bit, ret = 1;
 
@@ -2686,7 +2704,7 @@ static void check_flags(unsigned long flags)
  * and also avoid lockdep recursion:
  */
 void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
-                 int trylock, int read, int check, unsigned long ip)
+                         int trylock, int read, int check, unsigned long ip)
 {
        unsigned long flags;
 
@@ -2708,7 +2726,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
 
 EXPORT_SYMBOL_GPL(lock_acquire);
 
-void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
+void lock_release(struct lockdep_map *lock, int nested,
+                         unsigned long ip)
 {
        unsigned long flags;
 
index b5a9fe1..1abfb92 100644 (file)
@@ -55,8 +55,8 @@ static DEFINE_MUTEX(markers_mutex);
 struct marker_entry {
        struct hlist_node hlist;
        char *format;
-       void (*call)(const struct marker *mdata,        /* Probe wrapper */
-               void *call_private, const char *fmt, ...);
+                       /* Probe wrapper */
+       void (*call)(const struct marker *mdata, void *call_private, ...);
        struct marker_probe_closure single;
        struct marker_probe_closure *multi;
        int refcount;   /* Number of times armed. 0 if disarmed. */
@@ -91,15 +91,13 @@ EXPORT_SYMBOL_GPL(__mark_empty_function);
  * marker_probe_cb Callback that prepares the variable argument list for probes.
  * @mdata: pointer of type struct marker
  * @call_private: caller site private data
- * @fmt: format string
  * @...:  Variable argument list.
  *
  * Since we do not use "typical" pointer based RCU in the 1 argument case, we
  * need to put a full smp_rmb() in this branch. This is why we do not use
  * rcu_dereference() for the pointer read.
  */
-void marker_probe_cb(const struct marker *mdata, void *call_private,
-       const char *fmt, ...)
+void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
 {
        va_list args;
        char ptype;
@@ -120,8 +118,9 @@ void marker_probe_cb(const struct marker *mdata, void *call_private,
                /* Must read the ptr before private data. They are not data
                 * dependant, so we put an explicit smp_rmb() here. */
                smp_rmb();
-               va_start(args, fmt);
-               func(mdata->single.probe_private, call_private, fmt, &args);
+               va_start(args, call_private);
+               func(mdata->single.probe_private, call_private, mdata->format,
+                       &args);
                va_end(args);
        } else {
                struct marker_probe_closure *multi;
@@ -136,9 +135,9 @@ void marker_probe_cb(const struct marker *mdata, void *call_private,
                smp_read_barrier_depends();
                multi = mdata->multi;
                for (i = 0; multi[i].func; i++) {
-                       va_start(args, fmt);
-                       multi[i].func(multi[i].probe_private, call_private, fmt,
-                               &args);
+                       va_start(args, call_private);
+                       multi[i].func(multi[i].probe_private, call_private,
+                               mdata->format, &args);
                        va_end(args);
                }
        }
@@ -150,13 +149,11 @@ EXPORT_SYMBOL_GPL(marker_probe_cb);
  * marker_probe_cb Callback that does not prepare the variable argument list.
  * @mdata: pointer of type struct marker
  * @call_private: caller site private data
- * @fmt: format string
  * @...:  Variable argument list.
  *
  * Should be connected to markers "MARK_NOARGS".
  */
-void marker_probe_cb_noarg(const struct marker *mdata,
-       void *call_private, const char *fmt, ...)
+void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
 {
        va_list args;   /* not initialized */
        char ptype;
@@ -172,7 +169,8 @@ void marker_probe_cb_noarg(const struct marker *mdata,
                /* Must read the ptr before private data. They are not data
                 * dependant, so we put an explicit smp_rmb() here. */
                smp_rmb();
-               func(mdata->single.probe_private, call_private, fmt, &args);
+               func(mdata->single.probe_private, call_private, mdata->format,
+                       &args);
        } else {
                struct marker_probe_closure *multi;
                int i;
@@ -186,8 +184,8 @@ void marker_probe_cb_noarg(const struct marker *mdata,
                smp_read_barrier_depends();
                multi = mdata->multi;
                for (i = 0; multi[i].func; i++)
-                       multi[i].func(multi[i].probe_private, call_private, fmt,
-                               &args);
+                       multi[i].func(multi[i].probe_private, call_private,
+                               mdata->format, &args);
        }
        preempt_enable();
 }
index 8fb01c3..ae7d5b9 100644 (file)
@@ -1041,7 +1041,9 @@ void release_console_sem(void)
                _log_end = log_end;
                con_start = log_end;            /* Flush */
                spin_unlock(&logbuf_lock);
+               stop_critical_timings();        /* don't trace print latency */
                call_console_drivers(_con_start, _log_end);
+               start_critical_timings();
                local_irq_restore(flags);
        }
        console_locked = 0;
index 94ead43..70cb127 100644 (file)
@@ -70,6 +70,7 @@
 #include <linux/bootmem.h>
 #include <linux/debugfs.h>
 #include <linux/ctype.h>
+#include <linux/ftrace.h>
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
@@ -607,6 +608,24 @@ static inline void update_rq_clock(struct rq *rq)
 # define const_debug static const
 #endif
 
+/**
+ * runqueue_is_locked
+ *
+ * Returns true if the current cpu runqueue is locked.
+ * This interface allows printk to be called with the runqueue lock
+ * held and know whether or not it is OK to wake up the klogd.
+ */
+int runqueue_is_locked(void)
+{
+       int cpu = get_cpu();
+       struct rq *rq = cpu_rq(cpu);
+       int ret;
+
+       ret = spin_is_locked(&rq->lock);
+       put_cpu();
+       return ret;
+}
+
 /*
  * Debugging: various feature bits
  */
@@ -2149,6 +2168,9 @@ out_activate:
        success = 1;
 
 out_running:
+       trace_mark(kernel_sched_wakeup,
+               "pid %d state %ld ## rq %p task %p rq->curr %p",
+               p->pid, p->state, rq, p, rq->curr);
        check_preempt_curr(rq, p);
 
        p->state = TASK_RUNNING;
@@ -2279,6 +2301,9 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
                p->sched_class->task_new(rq, p);
                inc_nr_running(p, rq);
        }
+       trace_mark(kernel_sched_wakeup_new,
+               "pid %d state %ld ## rq %p task %p rq->curr %p",
+               p->pid, p->state, rq, p, rq->curr);
        check_preempt_curr(rq, p);
 #ifdef CONFIG_SMP
        if (p->sched_class->task_wake_up)
@@ -2451,6 +2476,11 @@ context_switch(struct rq *rq, struct task_struct *prev,
        struct mm_struct *mm, *oldmm;
 
        prepare_task_switch(rq, prev, next);
+       trace_mark(kernel_sched_schedule,
+               "prev_pid %d next_pid %d prev_state %ld "
+               "## rq %p prev %p next %p",
+               prev->pid, next->pid, prev->state,
+               rq, prev, next);
        mm = next->mm;
        oldmm = prev->active_mm;
        /*
@@ -4021,26 +4051,44 @@ void scheduler_tick(void)
 #endif
 }
 
-#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
+#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
+                               defined(CONFIG_PREEMPT_TRACER))
+
+static inline unsigned long get_parent_ip(unsigned long addr)
+{
+       if (in_lock_functions(addr)) {
+               addr = CALLER_ADDR2;
+               if (in_lock_functions(addr))
+                       addr = CALLER_ADDR3;
+       }
+       return addr;
+}
 
 void __kprobes add_preempt_count(int val)
 {
+#ifdef CONFIG_DEBUG_PREEMPT
        /*
         * Underflow?
         */
        if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
                return;
+#endif
        preempt_count() += val;
+#ifdef CONFIG_DEBUG_PREEMPT
        /*
         * Spinlock count overflowing soon?
         */
        DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
                                PREEMPT_MASK - 10);
+#endif
+       if (preempt_count() == val)
+               trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
 }
 EXPORT_SYMBOL(add_preempt_count);
 
 void __kprobes sub_preempt_count(int val)
 {
+#ifdef CONFIG_DEBUG_PREEMPT
        /*
         * Underflow?
         */
@@ -4052,7 +4100,10 @@ void __kprobes sub_preempt_count(int val)
        if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
                        !(preempt_count() & PREEMPT_MASK)))
                return;
+#endif
 
+       if (preempt_count() == val)
+               trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
        preempt_count() -= val;
 }
 EXPORT_SYMBOL(sub_preempt_count);
@@ -5384,7 +5435,7 @@ out_unlock:
        return retval;
 }
 
-static const char stat_nam[] = "RSDTtZX";
+static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
 
 void sched_show_task(struct task_struct *p)
 {
index 5c2942e..1a064ad 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/sched.h>
 #include <linux/semaphore.h>
 #include <linux/spinlock.h>
+#include <linux/ftrace.h>
 
 static noinline void __down(struct semaphore *sem);
 static noinline int __down_interruptible(struct semaphore *sem);
@@ -53,6 +54,7 @@ void down(struct semaphore *sem)
 {
        unsigned long flags;
 
+       ftrace_special(sem->count, 0, __LINE__);
        spin_lock_irqsave(&sem->lock, flags);
        if (likely(sem->count > 0))
                sem->count--;
index ae28c82..a1fb54c 100644 (file)
@@ -436,7 +436,7 @@ int __lockfunc _spin_trylock_bh(spinlock_t *lock)
 }
 EXPORT_SYMBOL(_spin_trylock_bh);
 
-int in_lock_functions(unsigned long addr)
+notrace int in_lock_functions(unsigned long addr)
 {
        /* Linker adds these: start and end of __lockfunc functions */
        extern char __lock_text_start[], __lock_text_end[];
index 2911665..efaf7c5 100644 (file)
@@ -46,6 +46,7 @@
 #include <linux/nfs_fs.h>
 #include <linux/acpi.h>
 #include <linux/reboot.h>
+#include <linux/ftrace.h>
 
 #include <asm/uaccess.h>
 #include <asm/processor.h>
@@ -455,6 +456,16 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = &proc_dointvec,
        },
+#ifdef CONFIG_FTRACE
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "ftrace_enabled",
+               .data           = &ftrace_enabled,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &ftrace_enable_sysctl,
+       },
+#endif
 #ifdef CONFIG_KMOD
        {
                .ctl_name       = KERN_MODPROBE,
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
new file mode 100644 (file)
index 0000000..5c2295b
--- /dev/null
@@ -0,0 +1,127 @@
+#
+# Architectures that offer an FTRACE implementation should select HAVE_FTRACE:
+#
+config HAVE_FTRACE
+       bool
+
+config HAVE_DYNAMIC_FTRACE
+       bool
+
+config TRACER_MAX_TRACE
+       bool
+
+config TRACING
+       bool
+       select DEBUG_FS
+       select STACKTRACE
+
+config FTRACE
+       bool "Kernel Function Tracer"
+       depends on HAVE_FTRACE
+       select FRAME_POINTER
+       select TRACING
+       select CONTEXT_SWITCH_TRACER
+       help
+         Enable the kernel to trace every kernel function. This is done
+         by using a compiler feature to insert a small, 5-byte No-Operation
+         instruction to the beginning of every kernel function, which NOP
+         sequence is then dynamically patched into a tracer call when
+         tracing is enabled by the administrator. If it's runtime disabled
+         (the bootup default), then the overhead of the instructions is very
+         small and not measurable even in micro-benchmarks.
+
+config IRQSOFF_TRACER
+       bool "Interrupts-off Latency Tracer"
+       default n
+       depends on TRACE_IRQFLAGS_SUPPORT
+       depends on GENERIC_TIME
+       depends on HAVE_FTRACE
+       select TRACE_IRQFLAGS
+       select TRACING
+       select TRACER_MAX_TRACE
+       help
+         This option measures the time spent in irqs-off critical
+         sections, with microsecond accuracy.
+
+         The default measurement method is a maximum search, which is
+         disabled by default and can be runtime (re-)started
+         via:
+
+             echo 0 > /debugfs/tracing/tracing_max_latency
+
+         (Note that kernel size and overhead increases with this option
+         enabled. This option and the preempt-off timing option can be
+         used together or separately.)
+
+config PREEMPT_TRACER
+       bool "Preemption-off Latency Tracer"
+       default n
+       depends on GENERIC_TIME
+       depends on PREEMPT
+       depends on HAVE_FTRACE
+       select TRACING
+       select TRACER_MAX_TRACE
+       help
+         This option measures the time spent in preemption off critical
+         sections, with microsecond accuracy.
+
+         The default measurement method is a maximum search, which is
+         disabled by default and can be runtime (re-)started
+         via:
+
+             echo 0 > /debugfs/tracing/tracing_max_latency
+
+         (Note that kernel size and overhead increases with this option
+         enabled. This option and the irqs-off timing option can be
+         used together or separately.)
+
+config SCHED_TRACER
+       bool "Scheduling Latency Tracer"
+       depends on HAVE_FTRACE
+       select TRACING
+       select CONTEXT_SWITCH_TRACER
+       select TRACER_MAX_TRACE
+       help
+         This tracer tracks the latency of the highest priority task
+         to be scheduled in, starting from the point it has woken up.
+
+config CONTEXT_SWITCH_TRACER
+       bool "Trace process context switches"
+       depends on HAVE_FTRACE
+       select TRACING
+       select MARKERS
+       help
+         This tracer gets called from the context switch and records
+         all switching of tasks.
+
+config DYNAMIC_FTRACE
+       bool "enable/disable ftrace tracepoints dynamically"
+       depends on FTRACE
+       depends on HAVE_DYNAMIC_FTRACE
+       default y
+       help
+         This option will modify all the calls to ftrace dynamically
+        (will patch them out of the binary image and replaces them
+        with a No-Op instruction) as they are called. A table is
+        created to dynamically enable them again.
+
+        This way a CONFIG_FTRACE kernel is slightly larger, but otherwise
+        has native performance as long as no tracing is active.
+
+        The changes to the code are done by a kernel thread that
+        wakes up once a second and checks to see if any ftrace calls
+        were made. If so, it runs stop_machine (stops all CPUS)
+        and modifies the code to jump over the call to ftrace.
+
+config FTRACE_SELFTEST
+       bool
+
+config FTRACE_STARTUP_TEST
+       bool "Perform a startup test on ftrace"
+       depends on TRACING
+       select FTRACE_SELFTEST
+       help
+         This option performs a series of startup tests on ftrace. On bootup
+         a series of tests are made to verify that the tracer is
+         functioning properly. It will do tests on all the configured
+         tracers of ftrace.
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
new file mode 100644 (file)
index 0000000..c44a7dc
--- /dev/null
@@ -0,0 +1,23 @@
+
+# Do not instrument the tracer itself:
+
+ifdef CONFIG_FTRACE
+ORIG_CFLAGS := $(KBUILD_CFLAGS)
+KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
+
+# selftest needs instrumentation
+CFLAGS_trace_selftest_dynamic.o = -pg
+obj-y += trace_selftest_dynamic.o
+endif
+
+obj-$(CONFIG_FTRACE) += libftrace.o
+
+obj-$(CONFIG_TRACING) += trace.o
+obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
+obj-$(CONFIG_FTRACE) += trace_functions.o
+obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
+obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
+obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
+obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
+
+libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
new file mode 100644 (file)
index 0000000..89bd9a6
--- /dev/null
@@ -0,0 +1,1398 @@
+/*
+ * Infrastructure for profiling code inserted by 'gcc -pg'.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
+ *
+ * Originally ported from the -rt patch by:
+ *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Based on code in the latency_tracer, that is:
+ *
+ *  Copyright (C) 2004-2006 Ingo Molnar
+ *  Copyright (C) 2004 William Lee Irwin III
+ */
+
+#include <linux/stop_machine.h>
+#include <linux/clocksource.h>
+#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/hardirq.h>
+#include <linux/kthread.h>
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/sysctl.h>
+#include <linux/ctype.h>
+#include <linux/hash.h>
+#include <linux/list.h>
+
+#include "trace.h"
+
+/* ftrace_enabled is a method to turn ftrace on or off */
+int ftrace_enabled __read_mostly;
+static int last_ftrace_enabled;
+
+/*
+ * ftrace_disabled is set when an anomaly is discovered.
+ * ftrace_disabled is much stronger than ftrace_enabled.
+ */
+static int ftrace_disabled __read_mostly;
+
+static DEFINE_SPINLOCK(ftrace_lock);
+static DEFINE_MUTEX(ftrace_sysctl_lock);
+
+static struct ftrace_ops ftrace_list_end __read_mostly =
+{
+       .func = ftrace_stub,
+};
+
+static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
+ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
+
+void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
+{
+       struct ftrace_ops *op = ftrace_list;
+
+       /* in case someone actually ports this to alpha! */
+       read_barrier_depends();
+
+       while (op != &ftrace_list_end) {
+               /* silly alpha */
+               read_barrier_depends();
+               op->func(ip, parent_ip);
+               op = op->next;
+       };
+}
+
+/**
+ * clear_ftrace_function - reset the ftrace function
+ *
+ * This NULLs the ftrace function and in essence stops
+ * tracing.  There may be lag
+ */
+void clear_ftrace_function(void)
+{
+       ftrace_trace_function = ftrace_stub;
+}
+
+static int __register_ftrace_function(struct ftrace_ops *ops)
+{
+       /* Should never be called by interrupts */
+       spin_lock(&ftrace_lock);
+
+       ops->next = ftrace_list;
+       /*
+        * We are entering ops into the ftrace_list but another
+        * CPU might be walking that list. We need to make sure
+        * the ops->next pointer is valid before another CPU sees
+        * the ops pointer included into the ftrace_list.
+        */
+       smp_wmb();
+       ftrace_list = ops;
+
+       if (ftrace_enabled) {
+               /*
+                * For one func, simply call it directly.
+                * For more than one func, call the chain.
+                */
+               if (ops->next == &ftrace_list_end)
+                       ftrace_trace_function = ops->func;
+               else
+                       ftrace_trace_function = ftrace_list_func;
+       }
+
+       spin_unlock(&ftrace_lock);
+
+       return 0;
+}
+
+static int __unregister_ftrace_function(struct ftrace_ops *ops)
+{
+       struct ftrace_ops **p;
+       int ret = 0;
+
+       spin_lock(&ftrace_lock);
+
+       /*
+        * If we are removing the last function, then simply point
+        * to the ftrace_stub.
+        */
+       if (ftrace_list == ops && ops->next == &ftrace_list_end) {
+               ftrace_trace_function = ftrace_stub;
+               ftrace_list = &ftrace_list_end;
+               goto out;
+       }
+
+       for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
+               if (*p == ops)
+                       break;
+
+       if (*p != ops) {
+               ret = -1;
+               goto out;
+       }
+
+       *p = (*p)->next;
+
+       if (ftrace_enabled) {
+               /* If we only have one func left, then call that directly */
+               if (ftrace_list == &ftrace_list_end ||
+                   ftrace_list->next == &ftrace_list_end)
+                       ftrace_trace_function = ftrace_list->func;
+       }
+
+ out:
+       spin_unlock(&ftrace_lock);
+
+       return ret;
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+static struct task_struct *ftraced_task;
+static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
+static unsigned long ftraced_iteration_counter;
+
+enum {
+       FTRACE_ENABLE_CALLS             = (1 << 0),
+       FTRACE_DISABLE_CALLS            = (1 << 1),
+       FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
+       FTRACE_ENABLE_MCOUNT            = (1 << 3),
+       FTRACE_DISABLE_MCOUNT           = (1 << 4),
+};
+
+static int ftrace_filtered;
+
+static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
+
+static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
+
+static DEFINE_SPINLOCK(ftrace_shutdown_lock);
+static DEFINE_MUTEX(ftraced_lock);
+static DEFINE_MUTEX(ftrace_filter_lock);
+
+struct ftrace_page {
+       struct ftrace_page      *next;
+       unsigned long           index;
+       struct dyn_ftrace       records[];
+};
+
+#define ENTRIES_PER_PAGE \
+  ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
+
+/* estimate from running different kernels */
+#define NR_TO_INIT             10000
+
+static struct ftrace_page      *ftrace_pages_start;
+static struct ftrace_page      *ftrace_pages;
+
+static int ftraced_trigger;
+static int ftraced_suspend;
+
+static int ftrace_record_suspend;
+
+static struct dyn_ftrace *ftrace_free_records;
+
+static inline int
+ftrace_ip_in_hash(unsigned long ip, unsigned long key)
+{
+       struct dyn_ftrace *p;
+       struct hlist_node *t;
+       int found = 0;
+
+       hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
+               if (p->ip == ip) {
+                       found = 1;
+                       break;
+               }
+       }
+
+       return found;
+}
+
+static inline void
+ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
+{
+       hlist_add_head(&node->node, &ftrace_hash[key]);
+}
+
+static void ftrace_free_rec(struct dyn_ftrace *rec)
+{
+       /* no locking, only called from kstop_machine */
+
+       rec->ip = (unsigned long)ftrace_free_records;
+       ftrace_free_records = rec;
+       rec->flags |= FTRACE_FL_FREE;
+}
+
+static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
+{
+       struct dyn_ftrace *rec;
+
+       /* First check for freed records */
+       if (ftrace_free_records) {
+               rec = ftrace_free_records;
+
+               if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
+                       WARN_ON_ONCE(1);
+                       ftrace_free_records = NULL;
+                       ftrace_disabled = 1;
+                       ftrace_enabled = 0;
+                       return NULL;
+               }
+
+               ftrace_free_records = (void *)rec->ip;
+               memset(rec, 0, sizeof(*rec));
+               return rec;
+       }
+
+       if (ftrace_pages->index == ENTRIES_PER_PAGE) {
+               if (!ftrace_pages->next)
+                       return NULL;
+               ftrace_pages = ftrace_pages->next;
+       }
+
+       return &ftrace_pages->records[ftrace_pages->index++];
+}
+
+static void
+ftrace_record_ip(unsigned long ip)
+{
+       struct dyn_ftrace *node;
+       unsigned long flags;
+       unsigned long key;
+       int resched;
+       int atomic;
+       int cpu;
+
+       if (!ftrace_enabled || ftrace_disabled)
+               return;
+
+       resched = need_resched();
+       preempt_disable_notrace();
+
+       /*
+        * We simply need to protect against recursion.
+        * Use the the raw version of smp_processor_id and not
+        * __get_cpu_var which can call debug hooks that can
+        * cause a recursive crash here.
+        */
+       cpu = raw_smp_processor_id();
+       per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
+       if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
+               goto out;
+
+       if (unlikely(ftrace_record_suspend))
+               goto out;
+
+       key = hash_long(ip, FTRACE_HASHBITS);
+
+       WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
+
+       if (ftrace_ip_in_hash(ip, key))
+               goto out;
+
+       atomic = irqs_disabled();
+
+       spin_lock_irqsave(&ftrace_shutdown_lock, flags);
+
+       /* This ip may have hit the hash before the lock */
+       if (ftrace_ip_in_hash(ip, key))
+               goto out_unlock;
+
+       /*
+        * There's a slight race that the ftraced will update the
+        * hash and reset here. If it is already converted, skip it.
+        */
+       if (ftrace_ip_converted(ip))
+               goto out_unlock;
+
+       node = ftrace_alloc_dyn_node(ip);
+       if (!node)
+               goto out_unlock;
+
+       node->ip = ip;
+
+       ftrace_add_hash(node, key);
+
+       ftraced_trigger = 1;
+
+ out_unlock:
+       spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
+ out:
+       per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
+
+       /* prevent recursion with scheduler */
+       if (resched)
+               preempt_enable_no_resched_notrace();
+       else
+               preempt_enable_notrace();
+}
+
+#define FTRACE_ADDR ((long)(ftrace_caller))
+#define MCOUNT_ADDR ((long)(mcount))
+
+static void
+__ftrace_replace_code(struct dyn_ftrace *rec,
+                     unsigned char *old, unsigned char *new, int enable)
+{
+       unsigned long ip;
+       int failed;
+
+       ip = rec->ip;
+
+       if (ftrace_filtered && enable) {
+               unsigned long fl;
+               /*
+                * If filtering is on:
+                *
+                * If this record is set to be filtered and
+                * is enabled then do nothing.
+                *
+                * If this record is set to be filtered and
+                * it is not enabled, enable it.
+                *
+                * If this record is not set to be filtered
+                * and it is not enabled do nothing.
+                *
+                * If this record is not set to be filtered and
+                * it is enabled, disable it.
+                */
+               fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
+
+               if ((fl ==  (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
+                   (fl == 0))
+                       return;
+
+               /*
+                * If it is enabled disable it,
+                * otherwise enable it!
+                */
+               if (fl == FTRACE_FL_ENABLED) {
+                       /* swap new and old */
+                       new = old;
+                       old = ftrace_call_replace(ip, FTRACE_ADDR);
+                       rec->flags &= ~FTRACE_FL_ENABLED;
+               } else {
+                       new = ftrace_call_replace(ip, FTRACE_ADDR);
+                       rec->flags |= FTRACE_FL_ENABLED;
+               }
+       } else {
+
+               if (enable)
+                       new = ftrace_call_replace(ip, FTRACE_ADDR);
+               else
+                       old = ftrace_call_replace(ip, FTRACE_ADDR);
+
+               if (enable) {
+                       if (rec->flags & FTRACE_FL_ENABLED)
+                               return;
+                       rec->flags |= FTRACE_FL_ENABLED;
+               } else {
+                       if (!(rec->flags & FTRACE_FL_ENABLED))
+                               return;
+                       rec->flags &= ~FTRACE_FL_ENABLED;
+               }
+       }
+
+       failed = ftrace_modify_code(ip, old, new);
+       if (failed) {
+               unsigned long key;
+               /* It is possible that the function hasn't been converted yet */
+               key = hash_long(ip, FTRACE_HASHBITS);
+               if (!ftrace_ip_in_hash(ip, key)) {
+                       rec->flags |= FTRACE_FL_FAILED;
+                       ftrace_free_rec(rec);
+               }
+
+       }
+}
+
+static void ftrace_replace_code(int enable)
+{
+       unsigned char *new = NULL, *old = NULL;
+       struct dyn_ftrace *rec;
+       struct ftrace_page *pg;
+       int i;
+
+       if (enable)
+               old = ftrace_nop_replace();
+       else
+               new = ftrace_nop_replace();
+
+       for (pg = ftrace_pages_start; pg; pg = pg->next) {
+               for (i = 0; i < pg->index; i++) {
+                       rec = &pg->records[i];
+
+                       /* don't modify code that has already faulted */
+                       if (rec->flags & FTRACE_FL_FAILED)
+                               continue;
+
+                       __ftrace_replace_code(rec, old, new, enable);
+               }
+       }
+}
+
+static void ftrace_shutdown_replenish(void)
+{
+       if (ftrace_pages->next)
+               return;
+
+       /* allocate another page */
+       ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
+}
+
+static void
+ftrace_code_disable(struct dyn_ftrace *rec)
+{
+       unsigned long ip;
+       unsigned char *nop, *call;
+       int failed;
+
+       ip = rec->ip;
+
+       nop = ftrace_nop_replace();
+       call = ftrace_call_replace(ip, MCOUNT_ADDR);
+
+       failed = ftrace_modify_code(ip, call, nop);
+       if (failed) {
+               rec->flags |= FTRACE_FL_FAILED;
+               ftrace_free_rec(rec);
+       }
+}
+
+static int __ftrace_modify_code(void *data)
+{
+       unsigned long addr;
+       int *command = data;
+
+       if (*command & FTRACE_ENABLE_CALLS)
+               ftrace_replace_code(1);
+       else if (*command & FTRACE_DISABLE_CALLS)
+               ftrace_replace_code(0);
+
+       if (*command & FTRACE_UPDATE_TRACE_FUNC)
+               ftrace_update_ftrace_func(ftrace_trace_function);
+
+       if (*command & FTRACE_ENABLE_MCOUNT) {
+               addr = (unsigned long)ftrace_record_ip;
+               ftrace_mcount_set(&addr);
+       } else if (*command & FTRACE_DISABLE_MCOUNT) {
+               addr = (unsigned long)ftrace_stub;
+               ftrace_mcount_set(&addr);
+       }
+
+       return 0;
+}
+
+static void ftrace_run_update_code(int command)
+{
+       stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
+}
+
+static ftrace_func_t saved_ftrace_func;
+
+static void ftrace_startup(void)
+{
+       int command = 0;
+
+       if (unlikely(ftrace_disabled))
+               return;
+
+       mutex_lock(&ftraced_lock);
+       ftraced_suspend++;
+       if (ftraced_suspend == 1)
+               command |= FTRACE_ENABLE_CALLS;
+
+       if (saved_ftrace_func != ftrace_trace_function) {
+               saved_ftrace_func = ftrace_trace_function;
+               command |= FTRACE_UPDATE_TRACE_FUNC;
+       }
+
+       if (!command || !ftrace_enabled)
+               goto out;
+
+       ftrace_run_update_code(command);
+ out:
+       mutex_unlock(&ftraced_lock);
+}
+
+static void ftrace_shutdown(void)
+{
+       int command = 0;
+
+       if (unlikely(ftrace_disabled))
+               return;
+
+       mutex_lock(&ftraced_lock);
+       ftraced_suspend--;
+       if (!ftraced_suspend)
+               command |= FTRACE_DISABLE_CALLS;
+
+       if (saved_ftrace_func != ftrace_trace_function) {
+               saved_ftrace_func = ftrace_trace_function;
+               command |= FTRACE_UPDATE_TRACE_FUNC;
+       }
+
+       if (!command || !ftrace_enabled)
+               goto out;
+
+       ftrace_run_update_code(command);
+ out:
+       mutex_unlock(&ftraced_lock);
+}
+
+static void ftrace_startup_sysctl(void)
+{
+       int command = FTRACE_ENABLE_MCOUNT;
+
+       if (unlikely(ftrace_disabled))
+               return;
+
+       mutex_lock(&ftraced_lock);
+       /* Force update next time */
+       saved_ftrace_func = NULL;
+       /* ftraced_suspend is true if we want ftrace running */
+       if (ftraced_suspend)
+               command |= FTRACE_ENABLE_CALLS;
+
+       ftrace_run_update_code(command);
+       mutex_unlock(&ftraced_lock);
+}
+
+static void ftrace_shutdown_sysctl(void)
+{
+       int command = FTRACE_DISABLE_MCOUNT;
+
+       if (unlikely(ftrace_disabled))
+               return;
+
+       mutex_lock(&ftraced_lock);
+       /* ftraced_suspend is true if ftrace is running */
+       if (ftraced_suspend)
+               command |= FTRACE_DISABLE_CALLS;
+
+       ftrace_run_update_code(command);
+       mutex_unlock(&ftraced_lock);
+}
+
+static cycle_t         ftrace_update_time;
+static unsigned long   ftrace_update_cnt;
+unsigned long          ftrace_update_tot_cnt;
+
+static int __ftrace_update_code(void *ignore)
+{
+       struct dyn_ftrace *p;
+       struct hlist_head head;
+       struct hlist_node *t;
+       int save_ftrace_enabled;
+       cycle_t start, stop;
+       int i;
+
+       /* Don't be recording funcs now */
+       save_ftrace_enabled = ftrace_enabled;
+       ftrace_enabled = 0;
+
+       start = ftrace_now(raw_smp_processor_id());
+       ftrace_update_cnt = 0;
+
+       /* No locks needed, the machine is stopped! */
+       for (i = 0; i < FTRACE_HASHSIZE; i++) {
+               if (hlist_empty(&ftrace_hash[i]))
+                       continue;
+
+               head = ftrace_hash[i];
+               INIT_HLIST_HEAD(&ftrace_hash[i]);
+
+               /* all CPUS are stopped, we are safe to modify code */
+               hlist_for_each_entry(p, t, &head, node) {
+                       ftrace_code_disable(p);
+                       ftrace_update_cnt++;
+               }
+
+       }
+
+       stop = ftrace_now(raw_smp_processor_id());
+       ftrace_update_time = stop - start;
+       ftrace_update_tot_cnt += ftrace_update_cnt;
+
+       ftrace_enabled = save_ftrace_enabled;
+
+       return 0;
+}
+
+static void ftrace_update_code(void)
+{
+       if (unlikely(ftrace_disabled))
+               return;
+
+       stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
+}
+
+static int ftraced(void *ignore)
+{
+       unsigned long usecs;
+
+       while (!kthread_should_stop()) {
+
+               set_current_state(TASK_INTERRUPTIBLE);
+
+               /* check once a second */
+               schedule_timeout(HZ);
+
+               if (unlikely(ftrace_disabled))
+                       continue;
+
+               mutex_lock(&ftrace_sysctl_lock);
+               mutex_lock(&ftraced_lock);
+               if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
+                       ftrace_record_suspend++;
+                       ftrace_update_code();
+                       usecs = nsecs_to_usecs(ftrace_update_time);
+                       if (ftrace_update_tot_cnt > 100000) {
+                               ftrace_update_tot_cnt = 0;
+                               pr_info("hm, dftrace overflow: %lu change%s"
+                                        " (%lu total) in %lu usec%s\n",
+                                       ftrace_update_cnt,
+                                       ftrace_update_cnt != 1 ? "s" : "",
+                                       ftrace_update_tot_cnt,
+                                       usecs, usecs != 1 ? "s" : "");
+                               ftrace_disabled = 1;
+                               WARN_ON_ONCE(1);
+                       }
+                       ftraced_trigger = 0;
+                       ftrace_record_suspend--;
+               }
+               ftraced_iteration_counter++;
+               mutex_unlock(&ftraced_lock);
+               mutex_unlock(&ftrace_sysctl_lock);
+
+               wake_up_interruptible(&ftraced_waiters);
+
+               ftrace_shutdown_replenish();
+       }
+       __set_current_state(TASK_RUNNING);
+       return 0;
+}
+
+static int __init ftrace_dyn_table_alloc(void)
+{
+       struct ftrace_page *pg;
+       int cnt;
+       int i;
+
+       /* allocate a few pages */
+       ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
+       if (!ftrace_pages_start)
+               return -1;
+
+       /*
+        * Allocate a few more pages.
+        *
+        * TODO: have some parser search vmlinux before
+        *   final linking to find all calls to ftrace.
+        *   Then we can:
+        *    a) know how many pages to allocate.
+        *     and/or
+        *    b) set up the table then.
+        *
+        *  The dynamic code is still necessary for
+        *  modules.
+        */
+
+       pg = ftrace_pages = ftrace_pages_start;
+
+       cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
+
+       for (i = 0; i < cnt; i++) {
+               pg->next = (void *)get_zeroed_page(GFP_KERNEL);
+
+               /* If we fail, we'll try later anyway */
+               if (!pg->next)
+                       break;
+
+               pg = pg->next;
+       }
+
+       return 0;
+}
+
+enum {
+       FTRACE_ITER_FILTER      = (1 << 0),
+       FTRACE_ITER_CONT        = (1 << 1),
+};
+
+#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
+
+struct ftrace_iterator {
+       loff_t                  pos;
+       struct ftrace_page      *pg;
+       unsigned                idx;
+       unsigned                flags;
+       unsigned char           buffer[FTRACE_BUFF_MAX+1];
+       unsigned                buffer_idx;
+       unsigned                filtered;
+};
+
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct ftrace_iterator *iter = m->private;
+       struct dyn_ftrace *rec = NULL;
+
+       (*pos)++;
+
+ retry:
+       if (iter->idx >= iter->pg->index) {
+               if (iter->pg->next) {
+                       iter->pg = iter->pg->next;
+                       iter->idx = 0;
+                       goto retry;
+               }
+       } else {
+               rec = &iter->pg->records[iter->idx++];
+               if ((rec->flags & FTRACE_FL_FAILED) ||
+                   ((iter->flags & FTRACE_ITER_FILTER) &&
+                    !(rec->flags & FTRACE_FL_FILTER))) {
+                       rec = NULL;
+                       goto retry;
+               }
+       }
+
+       iter->pos = *pos;
+
+       return rec;
+}
+
+static void *t_start(struct seq_file *m, loff_t *pos)
+{
+       struct ftrace_iterator *iter = m->private;
+       void *p = NULL;
+       loff_t l = -1;
+
+       if (*pos != iter->pos) {
+               for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
+                       ;
+       } else {
+               l = *pos;
+               p = t_next(m, p, &l);
+       }
+
+       return p;
+}
+
+static void t_stop(struct seq_file *m, void *p)
+{
+}
+
+static int t_show(struct seq_file *m, void *v)
+{
+       struct dyn_ftrace *rec = v;
+       char str[KSYM_SYMBOL_LEN];
+
+       if (!rec)
+               return 0;
+
+       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+
+       seq_printf(m, "%s\n", str);
+
+       return 0;
+}
+
+static struct seq_operations show_ftrace_seq_ops = {
+       .start = t_start,
+       .next = t_next,
+       .stop = t_stop,
+       .show = t_show,
+};
+
+static int
+ftrace_avail_open(struct inode *inode, struct file *file)
+{
+       struct ftrace_iterator *iter;
+       int ret;
+
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+       if (!iter)
+               return -ENOMEM;
+
+       iter->pg = ftrace_pages_start;
+       iter->pos = -1;
+
+       ret = seq_open(file, &show_ftrace_seq_ops);
+       if (!ret) {
+               struct seq_file *m = file->private_data;
+
+               m->private = iter;
+       } else {
+               kfree(iter);
+       }
+
+       return ret;
+}
+
+int ftrace_avail_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *m = (struct seq_file *)file->private_data;
+       struct ftrace_iterator *iter = m->private;
+
+       seq_release(inode, file);
+       kfree(iter);
+
+       return 0;
+}
+
+static void ftrace_filter_reset(void)
+{
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       unsigned i;
+
+       /* keep kstop machine from running */
+       preempt_disable();
+       ftrace_filtered = 0;
+       pg = ftrace_pages_start;
+       while (pg) {
+               for (i = 0; i < pg->index; i++) {
+                       rec = &pg->records[i];
+                       if (rec->flags & FTRACE_FL_FAILED)
+                               continue;
+                       rec->flags &= ~FTRACE_FL_FILTER;
+               }
+               pg = pg->next;
+       }
+       preempt_enable();
+}
+
+static int
+ftrace_filter_open(struct inode *inode, struct file *file)
+{
+       struct ftrace_iterator *iter;
+       int ret = 0;
+
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+       if (!iter)
+               return -ENOMEM;
+
+       mutex_lock(&ftrace_filter_lock);
+       if ((file->f_mode & FMODE_WRITE) &&
+           !(file->f_flags & O_APPEND))
+               ftrace_filter_reset();
+
+       if (file->f_mode & FMODE_READ) {
+               iter->pg = ftrace_pages_start;
+               iter->pos = -1;
+               iter->flags = FTRACE_ITER_FILTER;
+
+               ret = seq_open(file, &show_ftrace_seq_ops);
+               if (!ret) {
+                       struct seq_file *m = file->private_data;
+                       m->private = iter;
+               } else
+                       kfree(iter);
+       } else
+               file->private_data = iter;
+       mutex_unlock(&ftrace_filter_lock);
+
+       return ret;
+}
+
+static ssize_t
+ftrace_filter_read(struct file *file, char __user *ubuf,
+                      size_t cnt, loff_t *ppos)
+{
+       if (file->f_mode & FMODE_READ)
+               return seq_read(file, ubuf, cnt, ppos);
+       else
+               return -EPERM;
+}
+
+static loff_t
+ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
+{
+       loff_t ret;
+
+       if (file->f_mode & FMODE_READ)
+               ret = seq_lseek(file, offset, origin);
+       else
+               file->f_pos = ret = 1;
+
+       return ret;
+}
+
+enum {
+       MATCH_FULL,
+       MATCH_FRONT_ONLY,
+       MATCH_MIDDLE_ONLY,
+       MATCH_END_ONLY,
+};
+
+static void
+ftrace_match(unsigned char *buff, int len)
+{
+       char str[KSYM_SYMBOL_LEN];
+       char *search = NULL;
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       int type = MATCH_FULL;
+       unsigned i, match = 0, search_len = 0;
+
+       for (i = 0; i < len; i++) {
+               if (buff[i] == '*') {
+                       if (!i) {
+                               search = buff + i + 1;
+                               type = MATCH_END_ONLY;
+                               search_len = len - (i + 1);
+                       } else {
+                               if (type == MATCH_END_ONLY) {
+                                       type = MATCH_MIDDLE_ONLY;
+                               } else {
+                                       match = i;
+                                       type = MATCH_FRONT_ONLY;
+                               }
+                               buff[i] = 0;
+                               break;
+                       }
+               }
+       }
+
+       /* keep kstop machine from running */
+       preempt_disable();
+       ftrace_filtered = 1;
+       pg = ftrace_pages_start;
+       while (pg) {
+               for (i = 0; i < pg->index; i++) {
+                       int matched = 0;
+                       char *ptr;
+
+                       rec = &pg->records[i];
+                       if (rec->flags & FTRACE_FL_FAILED)
+                               continue;
+                       kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
+                       switch (type) {
+                       case MATCH_FULL:
+                               if (strcmp(str, buff) == 0)
+                                       matched = 1;
+                               break;
+                       case MATCH_FRONT_ONLY:
+                               if (memcmp(str, buff, match) == 0)
+                                       matched = 1;
+                               break;
+                       case MATCH_MIDDLE_ONLY:
+                               if (strstr(str, search))
+                                       matched = 1;
+                               break;
+                       case MATCH_END_ONLY:
+                               ptr = strstr(str, search);
+                               if (ptr && (ptr[search_len] == 0))
+                                       matched = 1;
+                               break;
+                       }
+                       if (matched)
+                               rec->flags |= FTRACE_FL_FILTER;
+               }
+               pg = pg->next;
+       }
+       preempt_enable();
+}
+
+static ssize_t
+ftrace_filter_write(struct file *file, const char __user *ubuf,
+                   size_t cnt, loff_t *ppos)
+{
+       struct ftrace_iterator *iter;
+       char ch;
+       size_t read = 0;
+       ssize_t ret;
+
+       if (!cnt || cnt < 0)
+               return 0;
+
+       mutex_lock(&ftrace_filter_lock);
+
+       if (file->f_mode & FMODE_READ) {
+               struct seq_file *m = file->private_data;
+               iter = m->private;
+       } else
+               iter = file->private_data;
+
+       if (!*ppos) {
+               iter->flags &= ~FTRACE_ITER_CONT;
+               iter->buffer_idx = 0;
+       }
+
+       ret = get_user(ch, ubuf++);
+       if (ret)
+               goto out;
+       read++;
+       cnt--;
+
+       if (!(iter->flags & ~FTRACE_ITER_CONT)) {
+               /* skip white space */
+               while (cnt && isspace(ch)) {
+                       ret = get_user(ch, ubuf++);
+                       if (ret)
+                               goto out;
+                       read++;
+                       cnt--;
+               }
+
+
+               if (isspace(ch)) {
+                       file->f_pos += read;
+                       ret = read;
+                       goto out;
+               }
+
+               iter->buffer_idx = 0;
+       }
+
+       while (cnt && !isspace(ch)) {
+               if (iter->buffer_idx < FTRACE_BUFF_MAX)
+                       iter->buffer[iter->buffer_idx++] = ch;
+               else {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               ret = get_user(ch, ubuf++);
+               if (ret)
+                       goto out;
+               read++;
+               cnt--;
+       }
+
+       if (isspace(ch)) {
+               iter->filtered++;
+               iter->buffer[iter->buffer_idx] = 0;
+               ftrace_match(iter->buffer, iter->buffer_idx);
+               iter->buffer_idx = 0;
+       } else
+               iter->flags |= FTRACE_ITER_CONT;
+
+
+       file->f_pos += read;
+
+       ret = read;
+ out:
+       mutex_unlock(&ftrace_filter_lock);
+
+       return ret;
+}
+
+/**
+ * ftrace_set_filter - set a function to filter on in ftrace
+ * @buf - the string that holds the function filter text.
+ * @len - the length of the string.
+ * @reset - non zero to reset all filters before applying this filter.
+ *
+ * Filters denote which functions should be enabled when tracing is enabled.
+ * If @buf is NULL and reset is set, all functions will be enabled for tracing.
+ */
+void ftrace_set_filter(unsigned char *buf, int len, int reset)
+{
+       if (unlikely(ftrace_disabled))
+               return;
+
+       mutex_lock(&ftrace_filter_lock);
+       if (reset)
+               ftrace_filter_reset();
+       if (buf)
+               ftrace_match(buf, len);
+       mutex_unlock(&ftrace_filter_lock);
+}
+
+static int
+ftrace_filter_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *m = (struct seq_file *)file->private_data;
+       struct ftrace_iterator *iter;
+
+       mutex_lock(&ftrace_filter_lock);
+       if (file->f_mode & FMODE_READ) {
+               iter = m->private;
+
+               seq_release(inode, file);
+       } else
+               iter = file->private_data;
+
+       if (iter->buffer_idx) {
+               iter->filtered++;
+               iter->buffer[iter->buffer_idx] = 0;
+               ftrace_match(iter->buffer, iter->buffer_idx);
+       }
+
+       mutex_lock(&ftrace_sysctl_lock);
+       mutex_lock(&ftraced_lock);
+       if (iter->filtered && ftraced_suspend && ftrace_enabled)
+               ftrace_run_update_code(FTRACE_ENABLE_CALLS);
+       mutex_unlock(&ftraced_lock);
+       mutex_unlock(&ftrace_sysctl_lock);
+
+       kfree(iter);
+       mutex_unlock(&ftrace_filter_lock);
+       return 0;
+}
+
+static struct file_operations ftrace_avail_fops = {
+       .open = ftrace_avail_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = ftrace_avail_release,
+};
+
+static struct file_operations ftrace_filter_fops = {
+       .open = ftrace_filter_open,
+       .read = ftrace_filter_read,
+       .write = ftrace_filter_write,
+       .llseek = ftrace_filter_lseek,
+       .release = ftrace_filter_release,
+};
+
+/**
+ * ftrace_force_update - force an update to all recording ftrace functions
+ *
+ * The ftrace dynamic update daemon only wakes up once a second.
+ * There may be cases where an update needs to be done immediately
+ * for tests or internal kernel tracing to begin. This function
+ * wakes the daemon to do an update and will not return until the
+ * update is complete.
+ */
+int ftrace_force_update(void)
+{
+       unsigned long last_counter;
+       DECLARE_WAITQUEUE(wait, current);
+       int ret = 0;
+
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       mutex_lock(&ftraced_lock);
+       last_counter = ftraced_iteration_counter;
+
+       set_current_state(TASK_INTERRUPTIBLE);
+       add_wait_queue(&ftraced_waiters, &wait);
+
+       if (unlikely(!ftraced_task)) {
+               ret = -ENODEV;
+               goto out;
+       }
+
+       do {
+               mutex_unlock(&ftraced_lock);
+               wake_up_process(ftraced_task);
+               schedule();
+               mutex_lock(&ftraced_lock);
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       break;
+               }
+               set_current_state(TASK_INTERRUPTIBLE);
+       } while (last_counter == ftraced_iteration_counter);
+
+ out:
+       mutex_unlock(&ftraced_lock);
+       remove_wait_queue(&ftraced_waiters, &wait);
+       set_current_state(TASK_RUNNING);
+
+       return ret;
+}
+
+static void ftrace_force_shutdown(void)
+{
+       struct task_struct *task;
+       int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
+
+       mutex_lock(&ftraced_lock);
+       task = ftraced_task;
+       ftraced_task = NULL;
+       ftraced_suspend = -1;
+       ftrace_run_update_code(command);
+       mutex_unlock(&ftraced_lock);
+
+       if (task)
+               kthread_stop(task);
+}
+
+static __init int ftrace_init_debugfs(void)
+{
+       struct dentry *d_tracer;
+       struct dentry *entry;
+
+       d_tracer = tracing_init_dentry();
+
+       entry = debugfs_create_file("available_filter_functions", 0444,
+                                   d_tracer, NULL, &ftrace_avail_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'available_filter_functions' entry\n");
+
+       entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
+                                   NULL, &ftrace_filter_fops);
+       if (!entry)
+               pr_warning("Could not create debugfs "
+                          "'set_ftrace_filter' entry\n");
+       return 0;
+}
+
+fs_initcall(ftrace_init_debugfs);
+
+static int __init ftrace_dynamic_init(void)
+{
+       struct task_struct *p;
+       unsigned long addr;
+       int ret;
+
+       addr = (unsigned long)ftrace_record_ip;
+
+       stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
+
+       /* ftrace_dyn_arch_init places the return code in addr */
+       if (addr) {
+               ret = (int)addr;
+               goto failed;
+       }
+
+       ret = ftrace_dyn_table_alloc();
+       if (ret)
+               goto failed;
+
+       p = kthread_run(ftraced, NULL, "ftraced");
+       if (IS_ERR(p)) {
+               ret = -1;
+               goto failed;
+       }
+
+       last_ftrace_enabled = ftrace_enabled = 1;
+       ftraced_task = p;
+
+       return 0;
+
+ failed:
+       ftrace_disabled = 1;
+       return ret;
+}
+
+core_initcall(ftrace_dynamic_init);
+#else
+# define ftrace_startup()              do { } while (0)
+# define ftrace_shutdown()             do { } while (0)
+# define ftrace_startup_sysctl()       do { } while (0)
+# define ftrace_shutdown_sysctl()      do { } while (0)
+# define ftrace_force_shutdown()       do { } while (0)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+/**
+ * ftrace_kill - totally shutdown ftrace
+ *
+ * This is a safety measure. If something was detected that seems
+ * wrong, calling this function will keep ftrace from doing
+ * any more modifications, and updates.
+ * used when something went wrong.
+ */
+void ftrace_kill(void)
+{
+       mutex_lock(&ftrace_sysctl_lock);
+       ftrace_disabled = 1;
+       ftrace_enabled = 0;
+
+       clear_ftrace_function();
+       mutex_unlock(&ftrace_sysctl_lock);
+
+       /* Try to totally disable ftrace */
+       ftrace_force_shutdown();
+}
+
+/**
+ * register_ftrace_function - register a function for profiling
+ * @ops - ops structure that holds the function for profiling.
+ *
+ * Register a function to be called by all functions in the
+ * kernel.
+ *
+ * Note: @ops->func and all the functions it calls must be labeled
+ *       with "notrace", otherwise it will go into a
+ *       recursive loop.
+ */
+int register_ftrace_function(struct ftrace_ops *ops)
+{
+       int ret;
+
+       if (unlikely(ftrace_disabled))
+               return -1;
+
+       mutex_lock(&ftrace_sysctl_lock);
+       ret = __register_ftrace_function(ops);
+       ftrace_startup();
+       mutex_unlock(&ftrace_sysctl_lock);
+
+       return ret;
+}
+
+/**
+ * unregister_ftrace_function - unresgister a function for profiling.
+ * @ops - ops structure that holds the function to unregister
+ *
+ * Unregister a function that was added to be called by ftrace profiling.
+ */
+int unregister_ftrace_function(struct ftrace_ops *ops)
+{
+       int ret;
+
+       mutex_lock(&ftrace_sysctl_lock);
+       ret = __unregister_ftrace_function(ops);
+       ftrace_shutdown();
+       mutex_unlock(&ftrace_sysctl_lock);
+
+       return ret;
+}
+
+int
+ftrace_enable_sysctl(struct ctl_table *table, int write,
+                    struct file *file, void __user *buffer, size_t *lenp,
+                    loff_t *ppos)
+{
+       int ret;
+
+       if (unlikely(ftrace_disabled))
+               return -ENODEV;
+
+       mutex_lock(&ftrace_sysctl_lock);
+
+       ret  = proc_dointvec(table, write, file, buffer, lenp, ppos);
+
+       if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
+               goto out;
+
+       last_ftrace_enabled = ftrace_enabled;
+
+       if (ftrace_enabled) {
+
+               ftrace_startup_sysctl();
+
+               /* we are starting ftrace again */
+               if (ftrace_list != &ftrace_list_end) {
+                       if (ftrace_list->next == &ftrace_list_end)
+                               ftrace_trace_function = ftrace_list->func;
+                       else
+                               ftrace_trace_function = ftrace_list_func;
+               }
+
+       } else {
+               /* stopping ftrace calls (just send to ftrace_stub) */
+               ftrace_trace_function = ftrace_stub;
+
+               ftrace_shutdown_sysctl();
+       }
+
+ out:
+       mutex_unlock(&ftrace_sysctl_lock);
+       return ret;
+}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
new file mode 100644 (file)
index 0000000..4dcc4e8
--- /dev/null
@@ -0,0 +1,3073 @@
+/*
+ * ring buffer based function tracer
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
+ *
+ * Originally taken from the RT patch by:
+ *    Arnaldo Carvalho de Melo <acme@redhat.com>
+ *
+ * Based on code from the latency_tracer, that is:
+ *  Copyright (C) 2004-2006 Ingo Molnar
+ *  Copyright (C) 2004 William Lee Irwin III
+ */
+#include <linux/utsrelease.h>
+#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/pagemap.h>
+#include <linux/hardirq.h>
+#include <linux/linkage.h>
+#include <linux/uaccess.h>
+#include <linux/ftrace.h>
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/ctype.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/gfp.h>
+#include <linux/fs.h>
+#include <linux/writeback.h>
+
+#include <linux/stacktrace.h>
+
+#include "trace.h"
+
+unsigned long __read_mostly    tracing_max_latency = (cycle_t)ULONG_MAX;
+unsigned long __read_mostly    tracing_thresh;
+
+static unsigned long __read_mostly     tracing_nr_buffers;
+static cpumask_t __read_mostly         tracing_buffer_mask;
+
+#define for_each_tracing_cpu(cpu)      \
+       for_each_cpu_mask(cpu, tracing_buffer_mask)
+
+/* dummy trace to disable tracing */
+static struct tracer no_tracer __read_mostly = {
+       .name           = "none",
+};
+
+static int trace_alloc_page(void);
+static int trace_free_page(void);
+
+static int tracing_disabled = 1;
+
+static unsigned long tracing_pages_allocated;
+
+long
+ns2usecs(cycle_t nsec)
+{
+       nsec += 500;
+       do_div(nsec, 1000);
+       return nsec;
+}
+
+cycle_t ftrace_now(int cpu)
+{
+       return cpu_clock(cpu);
+}
+
+/*
+ * The global_trace is the descriptor that holds the tracing
+ * buffers for the live tracing. For each CPU, it contains
+ * a link list of pages that will store trace entries. The
+ * page descriptor of the pages in the memory is used to hold
+ * the link list by linking the lru item in the page descriptor
+ * to each of the pages in the buffer per CPU.
+ *
+ * For each active CPU there is a data field that holds the
+ * pages for the buffer for that CPU. Each CPU has the same number
+ * of pages allocated for its buffer.
+ */
+static struct trace_array      global_trace;
+
+static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
+
+/*
+ * The max_tr is used to snapshot the global_trace when a maximum
+ * latency is reached. Some tracers will use this to store a maximum
+ * trace while it continues examining live traces.
+ *
+ * The buffers for the max_tr are set up the same as the global_trace.
+ * When a snapshot is taken, the link list of the max_tr is swapped
+ * with the link list of the global_trace and the buffers are reset for
+ * the global_trace so the tracing can continue.
+ */
+static struct trace_array      max_tr;
+
+static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
+
+/* tracer_enabled is used to toggle activation of a tracer */
+static int                     tracer_enabled = 1;
+
+/*
+ * trace_nr_entries is the number of entries that is allocated
+ * for a buffer. Note, the number of entries is always rounded
+ * to ENTRIES_PER_PAGE.
+ */
+static unsigned long           trace_nr_entries = 65536UL;
+
+/* trace_types holds a link list of available tracers. */
+static struct tracer           *trace_types __read_mostly;
+
+/* current_trace points to the tracer that is currently active */
+static struct tracer           *current_trace __read_mostly;
+
+/*
+ * max_tracer_type_len is used to simplify the allocating of
+ * buffers to read userspace tracer names. We keep track of
+ * the longest tracer name registered.
+ */
+static int                     max_tracer_type_len;
+
+/*
+ * trace_types_lock is used to protect the trace_types list.
+ * This lock is also used to keep user access serialized.
+ * Accesses from userspace will grab this lock while userspace
+ * activities happen inside the kernel.
+ */
+static DEFINE_MUTEX(trace_types_lock);
+
+/* trace_wait is a waitqueue for tasks blocked on trace_poll */
+static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
+
+/* trace_flags holds iter_ctrl options */
+unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
+
+/**
+ * trace_wake_up - wake up tasks waiting for trace input
+ *
+ * Simply wakes up any task that is blocked on the trace_wait
+ * queue. These is used with trace_poll for tasks polling the trace.
+ */
+void trace_wake_up(void)
+{
+       /*
+        * The runqueue_is_locked() can fail, but this is the best we
+        * have for now:
+        */
+       if (!(trace_flags & TRACE_ITER_BLOCK) && !runqueue_is_locked())
+               wake_up(&trace_wait);
+}
+
+#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry))
+
+static int __init set_nr_entries(char *str)
+{
+       unsigned long nr_entries;
+       int ret;
+
+       if (!str)
+               return 0;
+       ret = strict_strtoul(str, 0, &nr_entries);
+       /* nr_entries can not be zero */
+       if (ret < 0 || nr_entries == 0)
+               return 0;
+       trace_nr_entries = nr_entries;
+       return 1;
+}
+__setup("trace_entries=", set_nr_entries);
+
+unsigned long nsecs_to_usecs(unsigned long nsecs)
+{
+       return nsecs / 1000;
+}
+
+/*
+ * trace_flag_type is an enumeration that holds different
+ * states when a trace occurs. These are:
+ *  IRQS_OFF   - interrupts were disabled
+ *  NEED_RESCED - reschedule is requested
+ *  HARDIRQ    - inside an interrupt handler
+ *  SOFTIRQ    - inside a softirq handler
+ */
+enum trace_flag_type {
+       TRACE_FLAG_IRQS_OFF             = 0x01,
+       TRACE_FLAG_NEED_RESCHED         = 0x02,
+       TRACE_FLAG_HARDIRQ              = 0x04,
+       TRACE_FLAG_SOFTIRQ              = 0x08,
+};
+
+/*
+ * TRACE_ITER_SYM_MASK masks the options in trace_flags that
+ * control the output of kernel symbols.
+ */
+#define TRACE_ITER_SYM_MASK \
+       (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
+
+/* These must match the bit postions in trace_iterator_flags */
+static const char *trace_options[] = {
+       "print-parent",
+       "sym-offset",
+       "sym-addr",
+       "verbose",
+       "raw",
+       "hex",
+       "bin",
+       "block",
+       "stacktrace",
+       "sched-tree",
+       NULL
+};
+
+/*
+ * ftrace_max_lock is used to protect the swapping of buffers
+ * when taking a max snapshot. The buffers themselves are
+ * protected by per_cpu spinlocks. But the action of the swap
+ * needs its own lock.
+ *
+ * This is defined as a raw_spinlock_t in order to help
+ * with performance when lockdep debugging is enabled.
+ */
+static raw_spinlock_t ftrace_max_lock =
+       (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
+
+/*
+ * Copy the new maximum trace into the separate maximum-trace
+ * structure. (this way the maximum trace is permanently saved,
+ * for later retrieval via /debugfs/tracing/latency_trace)
+ */
+static void
+__update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+{
+       struct trace_array_cpu *data = tr->data[cpu];
+
+       max_tr.cpu = cpu;
+       max_tr.time_start = data->preempt_timestamp;
+
+       data = max_tr.data[cpu];
+       data->saved_latency = tracing_max_latency;
+
+       memcpy(data->comm, tsk->comm, TASK_COMM_LEN);
+       data->pid = tsk->pid;
+       data->uid = tsk->uid;
+       data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
+       data->policy = tsk->policy;
+       data->rt_priority = tsk->rt_priority;
+
+       /* record this tasks comm */
+       tracing_record_cmdline(current);
+}
+
+/**
+ * check_pages - integrity check of trace buffers
+ *
+ * As a safty measure we check to make sure the data pages have not
+ * been corrupted. TODO: configure to disable this because it adds
+ * a bit of overhead.
+ */
+void check_pages(struct trace_array_cpu *data)
+{
+       struct page *page, *tmp;
+
+       BUG_ON(data->trace_pages.next->prev != &data->trace_pages);
+       BUG_ON(data->trace_pages.prev->next != &data->trace_pages);
+
+       list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
+               BUG_ON(page->lru.next->prev != &page->lru);
+               BUG_ON(page->lru.prev->next != &page->lru);
+       }
+}
+
+/**
+ * head_page - page address of the first page in per_cpu buffer.
+ *
+ * head_page returns the page address of the first page in
+ * a per_cpu buffer. This also preforms various consistency
+ * checks to make sure the buffer has not been corrupted.
+ */
+void *head_page(struct trace_array_cpu *data)
+{
+       struct page *page;
+
+       check_pages(data);
+       if (list_empty(&data->trace_pages))
+               return NULL;
+
+       page = list_entry(data->trace_pages.next, struct page, lru);
+       BUG_ON(&page->lru == &data->trace_pages);
+
+       return page_address(page);
+}
+
+/**
+ * trace_seq_printf - sequence printing of trace information
+ * @s: trace sequence descriptor
+ * @fmt: printf format string
+ *
+ * The tracer may use either sequence operations or its own
+ * copy to user routines. To simplify formating of a trace
+ * trace_seq_printf is used to store strings into a special
+ * buffer (@s). Then the output may be either used by
+ * the sequencer or pulled into another buffer.
+ */
+int
+trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+{
+       int len = (PAGE_SIZE - 1) - s->len;
+       va_list ap;
+       int ret;
+
+       if (!len)
+               return 0;
+
+       va_start(ap, fmt);
+       ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
+       va_end(ap);
+
+       /* If we can't write it all, don't bother writing anything */
+       if (ret >= len)
+               return 0;
+
+       s->len += ret;
+
+       return len;
+}
+
+/**
+ * trace_seq_puts - trace sequence printing of simple string
+ * @s: trace sequence descriptor
+ * @str: simple string to record
+ *
+ * The tracer may use either the sequence operations or its own
+ * copy to user routines. This function records a simple string
+ * into a special buffer (@s) for later retrieval by a sequencer
+ * or other mechanism.
+ */
+static int
+trace_seq_puts(struct trace_seq *s, const char *str)
+{
+       int len = strlen(str);
+
+       if (len > ((PAGE_SIZE - 1) - s->len))
+               return 0;
+
+       memcpy(s->buffer + s->len, str, len);
+       s->len += len;
+
+       return len;
+}
+
+static int
+trace_seq_putc(struct trace_seq *s, unsigned char c)
+{
+       if (s->len >= (PAGE_SIZE - 1))
+               return 0;
+
+       s->buffer[s->len++] = c;
+
+       return 1;
+}
+
+static int
+trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
+{
+       if (len > ((PAGE_SIZE - 1) - s->len))
+               return 0;
+
+       memcpy(s->buffer + s->len, mem, len);
+       s->len += len;
+
+       return len;
+}
+
+#define HEX_CHARS 17
+static const char hex2asc[] = "0123456789abcdef";
+
+static int
+trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
+{
+       unsigned char hex[HEX_CHARS];
+       unsigned char *data = mem;
+       unsigned char byte;
+       int i, j;
+
+       BUG_ON(len >= HEX_CHARS);
+
+#ifdef __BIG_ENDIAN
+       for (i = 0, j = 0; i < len; i++) {
+#else
+       for (i = len-1, j = 0; i >= 0; i--) {
+#endif
+               byte = data[i];
+
+               hex[j++] = hex2asc[byte & 0x0f];
+               hex[j++] = hex2asc[byte >> 4];
+       }
+       hex[j++] = ' ';
+
+       return trace_seq_putmem(s, hex, j);
+}
+
+static void
+trace_seq_reset(struct trace_seq *s)
+{
+       s->len = 0;
+       s->readpos = 0;
+}
+
+ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
+{
+       int len;
+       int ret;
+
+       if (s->len <= s->readpos)
+               return -EBUSY;
+
+       len = s->len - s->readpos;
+       if (cnt > len)
+               cnt = len;
+       ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
+       if (ret)
+               return -EFAULT;
+
+       s->readpos += len;
+       return cnt;
+}
+
+static void
+trace_print_seq(struct seq_file *m, struct trace_seq *s)
+{
+       int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
+
+       s->buffer[len] = 0;
+       seq_puts(m, s->buffer);
+
+       trace_seq_reset(s);
+}
+
+/*
+ * flip the trace buffers between two trace descriptors.
+ * This usually is the buffers between the global_trace and
+ * the max_tr to record a snapshot of a current trace.
+ *
+ * The ftrace_max_lock must be held.
+ */
+static void
+flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
+{
+       struct list_head flip_pages;
+
+       INIT_LIST_HEAD(&flip_pages);
+
+       memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
+               sizeof(struct trace_array_cpu) -
+               offsetof(struct trace_array_cpu, trace_head_idx));
+
+       check_pages(tr1);
+       check_pages(tr2);
+       list_splice_init(&tr1->trace_pages, &flip_pages);
+       list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
+       list_splice_init(&flip_pages, &tr2->trace_pages);
+       BUG_ON(!list_empty(&flip_pages));
+       check_pages(tr1);
+       check_pages(tr2);
+}
+
+/**
+ * update_max_tr - snapshot all trace buffers from global_trace to max_tr
+ * @tr: tracer
+ * @tsk: the task with the latency
+ * @cpu: The cpu that initiated the trace.
+ *
+ * Flip the buffers between the @tr and the max_tr and record information
+ * about which task was the cause of this latency.
+ */
+void
+update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
+{
+       struct trace_array_cpu *data;
+       int i;
+
+       WARN_ON_ONCE(!irqs_disabled());
+       __raw_spin_lock(&ftrace_max_lock);
+       /* clear out all the previous traces */
+       for_each_tracing_cpu(i) {
+               data = tr->data[i];
+               flip_trace(max_tr.data[i], data);
+               tracing_reset(data);
+       }
+
+       __update_max_tr(tr, tsk, cpu);
+       __raw_spin_unlock(&ftrace_max_lock);
+}
+
+/**
+ * update_max_tr_single - only copy one trace over, and reset the rest
+ * @tr - tracer
+ * @tsk - task with the latency
+ * @cpu - the cpu of the buffer to copy.
+ *
+ * Flip the trace of a single CPU buffer between the @tr and the max_tr.
+ */
+void
+update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
+{
+       struct trace_array_cpu *data = tr->data[cpu];
+       int i;
+
+       WARN_ON_ONCE(!irqs_disabled());
+       __raw_spin_lock(&ftrace_max_lock);
+       for_each_tracing_cpu(i)
+               tracing_reset(max_tr.data[i]);
+
+       flip_trace(max_tr.data[cpu], data);
+       tracing_reset(data);
+
+       __update_max_tr(tr, tsk, cpu);
+       __raw_spin_unlock(&ftrace_max_lock);
+}
+
+/**
+ * register_tracer - register a tracer with the ftrace system.
+ * @type - the plugin for the tracer
+ *
+ * Register a new plugin tracer.
+ */
+int register_tracer(struct tracer *type)
+{
+       struct tracer *t;
+       int len;
+       int ret = 0;
+
+       if (!type->name) {
+               pr_info("Tracer must have a name\n");
+               return -1;
+       }
+
+       mutex_lock(&trace_types_lock);
+       for (t = trace_types; t; t = t->next) {
+               if (strcmp(type->name, t->name) == 0) {
+                       /* already found */
+                       pr_info("Trace %s already registered\n",
+                               type->name);
+                       ret = -1;
+                       goto out;
+               }
+       }
+
+#ifdef CONFIG_FTRACE_STARTUP_TEST
+       if (type->selftest) {
+               struct tracer *saved_tracer = current_trace;
+               struct trace_array_cpu *data;
+               struct trace_array *tr = &global_trace;
+               int saved_ctrl = tr->ctrl;
+               int i;
+               /*
+                * Run a selftest on this tracer.
+                * Here we reset the trace buffer, and set the current
+                * tracer to be this tracer. The tracer can then run some
+                * internal tracing to verify that everything is in order.
+                * If we fail, we do not register this tracer.
+                */
+               for_each_tracing_cpu(i) {
+                       data = tr->data[i];
+                       if (!head_page(data))
+                               continue;
+                       tracing_reset(data);
+               }
+               current_trace = type;
+               tr->ctrl = 0;
+               /* the test is responsible for initializing and enabling */
+               pr_info("Testing tracer %s: ", type->name);
+               ret = type->selftest(type, tr);
+               /* the test is responsible for resetting too */
+               current_trace = saved_tracer;
+               tr->ctrl = saved_ctrl;
+               if (ret) {
+                       printk(KERN_CONT "FAILED!\n");
+                       goto out;
+               }
+               /* Only reset on passing, to avoid touching corrupted buffers */
+               for_each_tracing_cpu(i) {
+                       data = tr->data[i];
+                       if (!head_page(data))
+                               continue;
+                       tracing_reset(data);
+               }
+               printk(KERN_CONT "PASSED\n");
+       }
+#endif
+
+       type->next = trace_types;
+       trace_types = type;
+       len = strlen(type->name);
+       if (len > max_tracer_type_len)
+               max_tracer_type_len = len;
+
+ out:
+       mutex_unlock(&trace_types_lock);
+
+       return ret;
+}
+
+void unregister_tracer(struct tracer *type)
+{
+       struct tracer **t;
+       int len;
+
+       mutex_lock(&trace_types_lock);
+       for (t = &trace_types; *t; t = &(*t)->next) {
+               if (*t == type)
+                       goto found;
+       }
+       pr_info("Trace %s not registered\n", type->name);
+       goto out;
+
+ found:
+       *t = (*t)->next;
+       if (strlen(type->name) != max_tracer_type_len)
+               goto out;
+
+       max_tracer_type_len = 0;
+       for (t = &trace_types; *t; t = &(*t)->next) {
+               len = strlen((*t)->name);
+               if (len > max_tracer_type_len)
+                       max_tracer_type_len = len;
+       }
+ out:
+       mutex_unlock(&trace_types_lock);
+}
+
+void tracing_reset(struct trace_array_cpu *data)
+{
+       data->trace_idx = 0;
+       data->overrun = 0;
+       data->trace_head = data->trace_tail = head_page(data);
+       data->trace_head_idx = 0;
+       data->trace_tail_idx = 0;
+}
+
+#define SAVED_CMDLINES 128
+static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
+static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
+static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
+static int cmdline_idx;
+static DEFINE_SPINLOCK(trace_cmdline_lock);
+
+/* trace in all context switches */
+atomic_t trace_record_cmdline_enabled __read_mostly;
+
+/* temporary disable recording */
+atomic_t trace_record_cmdline_disabled __read_mostly;
+
+static void trace_init_cmdlines(void)
+{
+       memset(&map_pid_to_cmdline, -1, sizeof(map_pid_to_cmdline));
+       memset(&map_cmdline_to_pid, -1, sizeof(map_cmdline_to_pid));
+       cmdline_idx = 0;
+}
+
+void trace_stop_cmdline_recording(void);
+
+static void trace_save_cmdline(struct task_struct *tsk)
+{
+       unsigned map;
+       unsigned idx;
+
+       if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
+               return;
+
+       /*
+        * It's not the end of the world if we don't get
+        * the lock, but we also don't want to spin
+        * nor do we want to disable interrupts,
+        * so if we miss here, then better luck next time.
+        */
+       if (!spin_trylock(&trace_cmdline_lock))
+               return;
+
+       idx = map_pid_to_cmdline[tsk->pid];
+       if (idx >= SAVED_CMDLINES) {
+               idx = (cmdline_idx + 1) % SAVED_CMDLINES;
+
+               map = map_cmdline_to_pid[idx];
+               if (map <= PID_MAX_DEFAULT)
+                       map_pid_to_cmdline[map] = (unsigned)-1;
+
+               map_pid_to_cmdline[tsk->pid] = idx;
+
+               cmdline_idx = idx;
+       }
+
+       memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
+
+       spin_unlock(&trace_cmdline_lock);
+}
+
+static char *trace_find_cmdline(int pid)
+{
+       char *cmdline = "<...>";
+       unsigned map;
+
+       if (!pid)
+               return "<idle>";
+
+       if (pid > PID_MAX_DEFAULT)
+               goto out;
+
+       map = map_pid_to_cmdline[pid];
+       if (map >= SAVED_CMDLINES)
+               goto out;
+
+       cmdline = saved_cmdlines[map];
+
+ out:
+       return cmdline;
+}
+
+void tracing_record_cmdline(struct task_struct *tsk)
+{
+       if (atomic_read(&trace_record_cmdline_disabled))
+               return;
+
+       trace_save_cmdline(tsk);
+}
+
+static inline struct list_head *
+trace_next_list(struct trace_array_cpu *data, struct list_head *next)
+{
+       /*
+        * Roundrobin - but skip the head (which is not a real page):
+        */
+       next = next->next;
+       if (unlikely(next == &data->trace_pages))
+               next = next->next;
+       BUG_ON(next == &data->trace_pages);
+
+       return next;
+}
+
+static inline void *
+trace_next_page(struct trace_array_cpu *data, void *addr)
+{
+       struct list_head *next;
+       struct page *page;
+
+       page = virt_to_page(addr);
+
+       next = trace_next_list(data, &page->lru);
+       page = list_entry(next, struct page, lru);
+
+       return page_address(page);
+}
+
+static inline struct trace_entry *
+tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
+{
+       unsigned long idx, idx_next;
+       struct trace_entry *entry;
+
+       data->trace_idx++;
+       idx = data->trace_head_idx;
+       idx_next = idx + 1;
+
+       BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
+
+       entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
+
+       if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
+               data->trace_head = trace_next_page(data, data->trace_head);
+               idx_next = 0;
+       }
+
+       if (data->trace_head == data->trace_tail &&
+           idx_next == data->trace_tail_idx) {
+               /* overrun */
+               data->overrun++;
+               data->trace_tail_idx++;
+               if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
+                       data->trace_tail =
+                               trace_next_page(data, data->trace_tail);
+                       data->trace_tail_idx = 0;
+               }
+       }
+
+       data->trace_head_idx = idx_next;
+
+       return entry;
+}
+
+static inline void
+tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
+{
+       struct task_struct *tsk = current;
+       unsigned long pc;
+
+       pc = preempt_count();
+
+       entry->preempt_count    = pc & 0xff;
+       entry->pid              = (tsk) ? tsk->pid : 0;
+       entry->t                = ftrace_now(raw_smp_processor_id());
+       entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
+               ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
+               ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+               (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
+}
+
+void
+trace_function(struct trace_array *tr, struct trace_array_cpu *data,
+              unsigned long ip, unsigned long parent_ip, unsigned long flags)
+{
+       struct trace_entry *entry;
+       unsigned long irq_flags;
+
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
+       entry                   = tracing_get_trace_entry(tr, data);
+       tracing_generic_entry_update(entry, flags);
+       entry->type             = TRACE_FN;
+       entry->fn.ip            = ip;
+       entry->fn.parent_ip     = parent_ip;
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
+}
+
+void
+ftrace(struct trace_array *tr, struct trace_array_cpu *data,
+       unsigned long ip, unsigned long parent_ip, unsigned long flags)
+{
+       if (likely(!atomic_read(&data->disabled)))
+               trace_function(tr, data, ip, parent_ip, flags);
+}
+
+#ifdef CONFIG_MMIOTRACE
+void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data,
+                                               struct mmiotrace_rw *rw)
+{
+       struct trace_entry *entry;
+       unsigned long irq_flags;
+
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
+
+       entry                   = tracing_get_trace_entry(tr, data);
+       tracing_generic_entry_update(entry, 0);
+       entry->type             = TRACE_MMIO_RW;
+       entry->mmiorw           = *rw;
+
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
+
+       trace_wake_up();
+}
+
+void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data,
+                                               struct mmiotrace_map *map)
+{
+       struct trace_entry *entry;
+       unsigned long irq_flags;
+
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
+
+       entry                   = tracing_get_trace_entry(tr, data);
+       tracing_generic_entry_update(entry, 0);
+       entry->type             = TRACE_MMIO_MAP;
+       entry->mmiomap          = *map;
+
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
+
+       trace_wake_up();
+}
+#endif
+
+void __trace_stack(struct trace_array *tr,
+                  struct trace_array_cpu *data,
+                  unsigned long flags,
+                  int skip)
+{
+       struct trace_entry *entry;
+       struct stack_trace trace;
+
+       if (!(trace_flags & TRACE_ITER_STACKTRACE))
+               return;
+
+       entry                   = tracing_get_trace_entry(tr, data);
+       tracing_generic_entry_update(entry, flags);
+       entry->type             = TRACE_STACK;
+
+       memset(&entry->stack, 0, sizeof(entry->stack));
+
+       trace.nr_entries        = 0;
+       trace.max_entries       = FTRACE_STACK_ENTRIES;
+       trace.skip              = skip;
+       trace.entries           = entry->stack.caller;
+
+       save_stack_trace(&trace);
+}
+
+void
+__trace_special(void *__tr, void *__data,
+               unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+       struct trace_array_cpu *data = __data;
+       struct trace_array *tr = __tr;
+       struct trace_entry *entry;
+       unsigned long irq_flags;
+
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
+       entry                   = tracing_get_trace_entry(tr, data);
+       tracing_generic_entry_update(entry, 0);
+       entry->type             = TRACE_SPECIAL;
+       entry->special.arg1     = arg1;
+       entry->special.arg2     = arg2;
+       entry->special.arg3     = arg3;
+       __trace_stack(tr, data, irq_flags, 4);
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
+
+       trace_wake_up();
+}
+
+void
+tracing_sched_switch_trace(struct trace_array *tr,
+                          struct trace_array_cpu *data,
+                          struct task_struct *prev,
+                          struct task_struct *next,
+                          unsigned long flags)
+{
+       struct trace_entry *entry;
+       unsigned long irq_flags;
+
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
+       entry                   = tracing_get_trace_entry(tr, data);
+       tracing_generic_entry_update(entry, flags);
+       entry->type             = TRACE_CTX;
+       entry->ctx.prev_pid     = prev->pid;
+       entry->ctx.prev_prio    = prev->prio;
+       entry->ctx.prev_state   = prev->state;
+       entry->ctx.next_pid     = next->pid;
+       entry->ctx.next_prio    = next->prio;
+       entry->ctx.next_state   = next->state;
+       __trace_stack(tr, data, flags, 5);
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
+}
+
+void
+tracing_sched_wakeup_trace(struct trace_array *tr,
+                          struct trace_array_cpu *data,
+                          struct task_struct *wakee,
+                          struct task_struct *curr,
+                          unsigned long flags)
+{
+       struct trace_entry *entry;
+       unsigned long irq_flags;
+
+       raw_local_irq_save(irq_flags);
+       __raw_spin_lock(&data->lock);
+       entry                   = tracing_get_trace_entry(tr, data);
+       tracing_generic_entry_update(entry, flags);
+       entry->type             = TRACE_WAKE;
+       entry->ctx.prev_pid     = curr->pid;
+       entry->ctx.prev_prio    = curr->prio;
+       entry->ctx.prev_state   = curr->state;
+       entry->ctx.next_pid     = wakee->pid;
+       entry->ctx.next_prio    = wakee->prio;
+       entry->ctx.next_state   = wakee->state;
+       __trace_stack(tr, data, flags, 6);
+       __raw_spin_unlock(&data->lock);
+       raw_local_irq_restore(irq_flags);
+
+       trace_wake_up();
+}
+
+#ifdef CONFIG_FTRACE
+static void
+function_trace_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct trace_array *tr = &global_trace;
+       struct trace_array_cpu *data;
+       unsigned long flags;
+       long disabled;
+       int cpu;
+
+       if (unlikely(!tracer_enabled))
+               return;
+
+       local_irq_save(flags);
+       cpu = raw_smp_processor_id();
+       data = tr->data[cpu];
+       disabled = atomic_inc_return(&data->disabled);
+
+       if (likely(disabled == 1))
+               trace_function(tr, data, ip, parent_ip, flags);
+
+       atomic_dec(&data->disabled);
+       local_irq_restore(flags);
+}
+
+static struct ftrace_ops trace_ops __read_mostly =
+{
+       .func = function_trace_call,
+};
+
+void tracing_start_function_trace(void)
+{
+       register_ftrace_function(&trace_ops);
+}
+
+void tracing_stop_function_trace(void)
+{
+       unregister_ftrace_function(&trace_ops);
+}
+#endif
+
+enum trace_file_type {
+       TRACE_FILE_LAT_FMT      = 1,
+};
+
+static struct trace_entry *
+trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
+               struct trace_iterator *iter, int cpu)
+{
+       struct page *page;
+       struct trace_entry *array;
+
+       if (iter->next_idx[cpu] >= tr->entries ||
+           iter->next_idx[cpu] >= data->trace_idx ||
+           (data->trace_head == data->trace_tail &&
+            data->trace_head_idx == data->trace_tail_idx))
+               return NULL;
+
+       if (!iter->next_page[cpu]) {
+               /* Initialize the iterator for this cpu trace buffer */
+               WARN_ON(!data->trace_tail);
+               page = virt_to_page(data->trace_tail);
+               iter->next_page[cpu] = &page->lru;
+               iter->next_page_idx[cpu] = data->trace_tail_idx;
+       }
+
+       page = list_entry(iter->next_page[cpu], struct page, lru);
+       BUG_ON(&data->trace_pages == &page->lru);
+
+       array = page_address(page);
+
+       WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
+       return &array[iter->next_page_idx[cpu]];
+}
+
+static struct trace_entry *
+find_next_entry(struct trace_iterator *iter, int *ent_cpu)
+{
+       struct trace_array *tr = iter->tr;
+       struct trace_entry *ent, *next = NULL;
+       int next_cpu = -1;
+       int cpu;
+
+       for_each_tracing_cpu(cpu) {
+               if (!head_page(tr->data[cpu]))
+                       continue;
+               ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu);
+               /*
+                * Pick the entry with the smallest timestamp:
+                */
+               if (ent && (!next || ent->t < next->t)) {
+                       next = ent;
+                       next_cpu = cpu;
+               }
+       }
+
+       if (ent_cpu)
+               *ent_cpu = next_cpu;
+
+       return next;
+}
+
+static void trace_iterator_increment(struct trace_iterator *iter)
+{
+       iter->idx++;
+       iter->next_idx[iter->cpu]++;
+       iter->next_page_idx[iter->cpu]++;
+
+       if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
+               struct trace_array_cpu *data = iter->tr->data[iter->cpu];
+
+               iter->next_page_idx[iter->cpu] = 0;
+               iter->next_page[iter->cpu] =
+                       trace_next_list(data, iter->next_page[iter->cpu]);
+       }
+}
+
+static void trace_consume(struct trace_iterator *iter)
+{
+       struct trace_array_cpu *data = iter->tr->data[iter->cpu];
+
+       data->trace_tail_idx++;
+       if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
+               data->trace_tail = trace_next_page(data, data->trace_tail);
+               data->trace_tail_idx = 0;
+       }
+
+       /* Check if we empty it, then reset the index */
+       if (data->trace_head == data->trace_tail &&
+           data->trace_head_idx == data->trace_tail_idx)
+               data->trace_idx = 0;
+}
+
+static void *find_next_entry_inc(struct trace_iterator *iter)
+{
+       struct trace_entry *next;
+       int next_cpu = -1;
+
+       next = find_next_entry(iter, &next_cpu);
+
+       iter->prev_ent = iter->ent;
+       iter->prev_cpu = iter->cpu;
+
+       iter->ent = next;
+       iter->cpu = next_cpu;
+
+       if (next)
+               trace_iterator_increment(iter);
+
+       return next ? iter : NULL;
+}
+
+static void *s_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct trace_iterator *iter = m->private;
+       void *last_ent = iter->ent;
+       int i = (int)*pos;
+       void *ent;
+
+       (*pos)++;
+
+       /* can't go backwards */
+       if (iter->idx > i)
+               return NULL;
+
+       if (iter->idx < 0)
+               ent = find_next_entry_inc(iter);
+       else
+               ent = iter;
+
+       while (ent && iter->idx < i)
+               ent = find_next_entry_inc(iter);
+
+       iter->pos = *pos;
+
+       if (last_ent && !ent)
+               seq_puts(m, "\n\nvim:ft=help\n");
+
+       return ent;
+}
+
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+       struct trace_iterator *iter = m->private;
+       void *p = NULL;
+       loff_t l = 0;
+       int i;
+
+       mutex_lock(&trace_types_lock);
+
+       if (!current_trace || current_trace != iter->trace) {
+               mutex_unlock(&trace_types_lock);
+               return NULL;
+       }
+
+       atomic_inc(&trace_record_cmdline_disabled);
+
+       /* let the tracer grab locks here if needed */
+       if (current_trace->start)
+               current_trace->start(iter);
+
+       if (*pos != iter->pos) {
+               iter->ent = NULL;
+               iter->cpu = 0;
+               iter->idx = -1;
+               iter->prev_ent = NULL;
+               iter->prev_cpu = -1;
+
+               for_each_tracing_cpu(i) {
+                       iter->next_idx[i] = 0;
+                       iter->next_page[i] = NULL;
+               }
+
+               for (p = iter; p && l < *pos; p = s_next(m, p, &l))
+                       ;
+
+       } else {
+               l = *pos - 1;
+               p = s_next(m, p, &l);
+       }
+
+       return p;
+}
+
+static void s_stop(struct seq_file *m, void *p)
+{
+       struct trace_iterator *iter = m->private;
+
+       atomic_dec(&trace_record_cmdline_disabled);
+
+       /* let the tracer release locks here if needed */
+       if (current_trace && current_trace == iter->trace && iter->trace->stop)
+               iter->trace->stop(iter);
+
+       mutex_unlock(&trace_types_lock);
+}
+
+static int
+seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
+{
+#ifdef CONFIG_KALLSYMS
+       char str[KSYM_SYMBOL_LEN];
+
+       kallsyms_lookup(address, NULL, NULL, NULL, str);
+
+       return trace_seq_printf(s, fmt, str);
+#endif
+       return 1;
+}
+
+static int
+seq_print_sym_offset(struct trace_seq *s, const char *fmt,
+                    unsigned long address)
+{
+#ifdef CONFIG_KALLSYMS
+       char str[KSYM_SYMBOL_LEN];
+
+       sprint_symbol(str, address);
+       return trace_seq_printf(s, fmt, str);
+#endif
+       return 1;
+}
+
+#ifndef CONFIG_64BIT
+# define IP_FMT "%08lx"
+#else
+# define IP_FMT "%016lx"
+#endif
+
+static int
+seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
+{
+       int ret;
+
+       if (!ip)
+               return trace_seq_printf(s, "0");
+
+       if (sym_flags & TRACE_ITER_SYM_OFFSET)
+               ret = seq_print_sym_offset(s, "%s", ip);
+       else
+               ret = seq_print_sym_short(s, "%s", ip);
+
+       if (!ret)
+               return 0;
+
+       if (sym_flags & TRACE_ITER_SYM_ADDR)
+               ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
+       return ret;
+}
+
+static void print_lat_help_header(struct seq_file *m)
+{
+       seq_puts(m, "#                _------=> CPU#            \n");
+       seq_puts(m, "#               / _-----=> irqs-off        \n");
+       seq_puts(m, "#              | / _----=> need-resched    \n");
+       seq_puts(m, "#              || / _---=> hardirq/softirq \n");
+       seq_puts(m, "#              ||| / _--=> preempt-depth   \n");
+       seq_puts(m, "#              |||| /                      \n");
+       seq_puts(m, "#              |||||     delay             \n");
+       seq_puts(m, "#  cmd     pid ||||| time  |   caller      \n");
+       seq_puts(m, "#     \\   /    |||||   \\   |   /           \n");
+}
+
+static void print_func_help_header(struct seq_file *m)
+{
+       seq_puts(m, "#           TASK-PID   CPU#    TIMESTAMP  FUNCTION\n");
+       seq_puts(m, "#              | |      |          |         |\n");
+}
+
+
+static void
+print_trace_header(struct seq_file *m, struct trace_iterator *iter)
+{
+       unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
+       struct trace_array *tr = iter->tr;
+       struct trace_array_cpu *data = tr->data[tr->cpu];
+       struct tracer *type = current_trace;
+       unsigned long total   = 0;
+       unsigned long entries = 0;
+       int cpu;
+       const char *name = "preemption";
+
+       if (type)
+               name = type->name;
+
+       for_each_tracing_cpu(cpu) {
+               if (head_page(tr->data[cpu])) {
+                       total += tr->data[cpu]->trace_idx;
+                       if (tr->data[cpu]->trace_idx > tr->entries)
+                               entries += tr->entries;
+                       else
+                               entries += tr->data[cpu]->trace_idx;
+               }
+       }
+
+       seq_printf(m, "%s latency trace v1.1.5 on %s\n",
+                  name, UTS_RELEASE);
+       seq_puts(m, "-----------------------------------"
+                "---------------------------------\n");
+       seq_printf(m, " latency: %lu us, #%lu/%lu, CPU#%d |"
+                  " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
+                  nsecs_to_usecs(data->saved_latency),
+                  entries,
+                  total,
+                  tr->cpu,
+#if defined(CONFIG_PREEMPT_NONE)
+                  "server",
+#elif defined(CONFIG_PREEMPT_VOLUNTARY)
+                  "desktop",
+#elif defined(CONFIG_PREEMPT_DESKTOP)
+                  "preempt",
+#else
+                  "unknown",
+#endif
+                  /* These are reserved for later use */
+                  0, 0, 0, 0);
+#ifdef CONFIG_SMP
+       seq_printf(m, " #P:%d)\n", num_online_cpus());
+#else
+       seq_puts(m, ")\n");
+#endif
+       seq_puts(m, "    -----------------\n");
+       seq_printf(m, "    | task: %.16s-%d "
+                  "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
+                  data->comm, data->pid, data->uid, data->nice,
+                  data->policy, data->rt_priority);
+       seq_puts(m, "    -----------------\n");
+
+       if (data->critical_start) {
+               seq_puts(m, " => started at: ");
+               seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
+               trace_print_seq(m, &iter->seq);
+               seq_puts(m, "\n => ended at:   ");
+               seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
+               trace_print_seq(m, &iter->seq);
+               seq_puts(m, "\n");
+       }
+
+       seq_puts(m, "\n");
+}
+
+static void
+lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
+{
+       int hardirq, softirq;
+       char *comm;
+
+       comm = trace_find_cmdline(entry->pid);
+
+       trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
+       trace_seq_printf(s, "%d", cpu);
+       trace_seq_printf(s, "%c%c",
+                       (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
+                       ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
+
+       hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
+       softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
+       if (hardirq && softirq) {
+               trace_seq_putc(s, 'H');
+       } else {
+               if (hardirq) {
+                       trace_seq_putc(s, 'h');
+               } else {
+                       if (softirq)
+                               trace_seq_putc(s, 's');
+                       else
+                               trace_seq_putc(s, '.');
+               }
+       }
+
+       if (entry->preempt_count)
+               trace_seq_printf(s, "%x", entry->preempt_count);
+       else
+               trace_seq_puts(s, ".");
+}
+
+unsigned long preempt_mark_thresh = 100;
+
+static void
+lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
+                   unsigned long rel_usecs)
+{
+       trace_seq_printf(s, " %4lldus", abs_usecs);
+       if (rel_usecs > preempt_mark_thresh)
+               trace_seq_puts(s, "!: ");
+       else if (rel_usecs > 1)
+               trace_seq_puts(s, "+: ");
+       else
+               trace_seq_puts(s, " : ");
+}
+
+static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
+
+static int
+print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
+{
+       struct trace_seq *s = &iter->seq;
+       unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
+       struct trace_entry *next_entry = find_next_entry(iter, NULL);
+       unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
+       struct trace_entry *entry = iter->ent;
+       unsigned long abs_usecs;
+       unsigned long rel_usecs;
+       char *comm;
+       int S, T;
+       int i;
+       unsigned state;
+
+       if (!next_entry)
+               next_entry = entry;
+       rel_usecs = ns2usecs(next_entry->t - entry->t);
+       abs_usecs = ns2usecs(entry->t - iter->tr->time_start);
+
+       if (verbose) {
+               comm = trace_find_cmdline(entry->pid);
+               trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]"
+                                " %ld.%03ldms (+%ld.%03ldms): ",
+                                comm,
+                                entry->pid, cpu, entry->flags,
+                                entry->preempt_count, trace_idx,
+                                ns2usecs(entry->t),
+                                abs_usecs/1000,
+                                abs_usecs % 1000, rel_usecs/1000,
+                                rel_usecs % 1000);
+       } else {
+               lat_print_generic(s, entry, cpu);
+               lat_print_timestamp(s, abs_usecs, rel_usecs);
+       }
+       switch (entry->type) {
+       case TRACE_FN:
+               seq_print_ip_sym(s, entry->fn.ip, sym_flags);
+               trace_seq_puts(s, " (");
+               seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags);
+               trace_seq_puts(s, ")\n");
+               break;
+       case TRACE_CTX:
+       case TRACE_WAKE:
+               T = entry->ctx.next_state < sizeof(state_to_char) ?
+                       state_to_char[entry->ctx.next_state] : 'X';
+
+               state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0;
+               S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
+               comm = trace_find_cmdline(entry->ctx.next_pid);
+               trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n",
+                                entry->ctx.prev_pid,
+                                entry->ctx.prev_prio,
+                                S, entry->type == TRACE_CTX ? "==>" : "  +",
+                                entry->ctx.next_pid,
+                                entry->ctx.next_prio,
+                                T, comm);
+               break;
+       case TRACE_SPECIAL:
+               trace_seq_printf(s, "# %ld %ld %ld\n",
+                                entry->special.arg1,
+                                entry->special.arg2,
+                                entry->special.arg3);
+               break;
+       case TRACE_STACK:
+               for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+                       if (i)
+                               trace_seq_puts(s, " <= ");
+                       seq_print_ip_sym(s, entry->stack.caller[i], sym_flags);
+               }
+               trace_seq_puts(s, "\n");
+               break;
+       default:
+               trace_seq_printf(s, "Unknown type %d\n", entry->type);
+       }
+       return 1;
+}
+
+static int print_trace_fmt(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+       unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
+       struct trace_entry *entry;
+       unsigned long usec_rem;
+       unsigned long long t;
+       unsigned long secs;
+       char *comm;
+       int ret;
+       int S, T;
+       int i;
+
+       entry = iter->ent;
+
+       comm = trace_find_cmdline(iter->ent->pid);
+
+       t = ns2usecs(entry->t);
+       usec_rem = do_div(t, 1000000ULL);
+       secs = (unsigned long)t;
+
+       ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+       if (!ret)
+               return 0;
+       ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
+       if (!ret)
+               return 0;
+       ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
+       if (!ret)
+               return 0;
+
+       switch (entry->type) {
+       case TRACE_FN:
+               ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags);
+               if (!ret)
+                       return 0;
+               if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
+                                               entry->fn.parent_ip) {
+                       ret = trace_seq_printf(s, " <-");
+                       if (!ret)
+                               return 0;
+                       ret = seq_print_ip_sym(s, entry->fn.parent_ip,
+                                              sym_flags);
+                       if (!ret)
+                               return 0;
+               }
+               ret = trace_seq_printf(s, "\n");
+               if (!ret)
+                       return 0;
+               break;
+       case TRACE_CTX:
+       case TRACE_WAKE:
+               S = entry->ctx.prev_state < sizeof(state_to_char) ?
+                       state_to_char[entry->ctx.prev_state] : 'X';
+               T = entry->ctx.next_state < sizeof(state_to_char) ?
+                       state_to_char[entry->ctx.next_state] : 'X';
+               ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n",
+                                      entry->ctx.prev_pid,
+                                      entry->ctx.prev_prio,
+                                      S,
+                                      entry->type == TRACE_CTX ? "==>" : "  +",
+                                      entry->ctx.next_pid,
+                                      entry->ctx.next_prio,
+                                      T);
+               if (!ret)
+                       return 0;
+               break;
+       case TRACE_SPECIAL:
+               ret = trace_seq_printf(s, "# %ld %ld %ld\n",
+                                entry->special.arg1,
+                                entry->special.arg2,
+                                entry->special.arg3);
+               if (!ret)
+                       return 0;
+               break;
+       case TRACE_STACK:
+               for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
+                       if (i) {
+                               ret = trace_seq_puts(s, " <= ");
+                               if (!ret)
+                                       return 0;
+                       }
+                       ret = seq_print_ip_sym(s, entry->stack.caller[i],
+                                              sym_flags);
+                       if (!ret)
+                               return 0;
+               }
+               ret = trace_seq_puts(s, "\n");
+               if (!ret)
+                       return 0;
+               break;
+       }
+       return 1;
+}
+
+static int print_raw_fmt(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+       struct trace_entry *entry;
+       int ret;
+       int S, T;
+
+       entry = iter->ent;
+
+       ret = trace_seq_printf(s, "%d %d %llu ",
+               entry->pid, iter->cpu, entry->t);
+       if (!ret)
+               return 0;
+
+       switch (entry->type) {
+       case TRACE_FN:
+               ret = trace_seq_printf(s, "%x %x\n",
+                                       entry->fn.ip, entry->fn.parent_ip);
+               if (!ret)
+                       return 0;
+               break;
+       case TRACE_CTX:
+       case TRACE_WAKE:
+               S = entry->ctx.prev_state < sizeof(state_to_char) ?
+                       state_to_char[entry->ctx.prev_state] : 'X';
+               T = entry->ctx.next_state < sizeof(state_to_char) ?
+                       state_to_char[entry->ctx.next_state] : 'X';
+               if (entry->type == TRACE_WAKE)
+                       S = '+';
+               ret = trace_seq_printf(s, "%d %d %c %d %d %c\n",
+                                      entry->ctx.prev_pid,
+                                      entry->ctx.prev_prio,
+                                      S,
+                                      entry->ctx.next_pid,
+                                      entry->ctx.next_prio,
+                                      T);
+               if (!ret)
+                       return 0;
+               break;
+       case TRACE_SPECIAL:
+       case TRACE_STACK:
+               ret = trace_seq_printf(s, "# %ld %ld %ld\n",
+                                entry->special.arg1,
+                                entry->special.arg2,
+                                entry->special.arg3);
+               if (!ret)
+                       return 0;
+               break;
+       }
+       return 1;
+}
+
+#define SEQ_PUT_FIELD_RET(s, x)                                \
+do {                                                   \
+       if (!trace_seq_putmem(s, &(x), sizeof(x)))      \
+               return 0;                               \
+} while (0)
+
+#define SEQ_PUT_HEX_FIELD_RET(s, x)                    \
+do {                                                   \
+       if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))  \
+               return 0;                               \
+} while (0)
+
+static int print_hex_fmt(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+       unsigned char newline = '\n';
+       struct trace_entry *entry;
+       int S, T;
+
+       entry = iter->ent;
+
+       SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
+       SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
+       SEQ_PUT_HEX_FIELD_RET(s, entry->t);
+
+       switch (entry->type) {
+       case TRACE_FN:
+               SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip);
+               SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
+               break;
+       case TRACE_CTX:
+       case TRACE_WAKE:
+               S = entry->ctx.prev_state < sizeof(state_to_char) ?
+                       state_to_char[entry->ctx.prev_state] : 'X';
+               T = entry->ctx.next_state < sizeof(state_to_char) ?
+                       state_to_char[entry->ctx.next_state] : 'X';
+               if (entry->type == TRACE_WAKE)
+                       S = '+';
+               SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
+               SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio);
+               SEQ_PUT_HEX_FIELD_RET(s, S);
+               SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid);
+               SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio);
+               SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
+               SEQ_PUT_HEX_FIELD_RET(s, T);
+               break;
+       case TRACE_SPECIAL:
+       case TRACE_STACK:
+               SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1);
+               SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2);
+               SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3);
+               break;
+       }
+       SEQ_PUT_FIELD_RET(s, newline);
+
+       return 1;
+}
+
+static int print_bin_fmt(struct trace_iterator *iter)
+{
+       struct trace_seq *s = &iter->seq;
+       struct trace_entry *entry;
+
+       entry = iter->ent;
+
+       SEQ_PUT_FIELD_RET(s, entry->pid);
+       SEQ_PUT_FIELD_RET(s, entry->cpu);
+       SEQ_PUT_FIELD_RET(s, entry->t);
+
+       switch (entry->type) {
+       case TRACE_FN:
+               SEQ_PUT_FIELD_RET(s, entry->fn.ip);
+               SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip);
+               break;
+       case TRACE_CTX:
+               SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid);
+               SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio);
+               SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
+               SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
+               SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
+               SEQ_PUT_FIELD_RET(s, entry->ctx.next_state);
+               break;
+       case TRACE_SPECIAL:
+       case TRACE_STACK:
+               SEQ_PUT_FIELD_RET(s, entry->special.arg1);
+               SEQ_PUT_FIELD_RET(s, entry->special.arg2);
+               SEQ_PUT_FIELD_RET(s, entry->special.arg3);
+               break;
+       }
+       return 1;
+}
+
+static int trace_empty(struct trace_iterator *iter)
+{
+       struct trace_array_cpu *data;
+       int cpu;
+
+       for_each_tracing_cpu(cpu) {
+               data = iter->tr->data[cpu];
+
+               if (head_page(data) && data->trace_idx &&
+                   (data->trace_tail != data->trace_head ||
+                    data->trace_tail_idx != data->trace_head_idx))
+                       return 0;
+       }
+       return 1;
+}
+
+static int print_trace_line(struct trace_iterator *iter)
+{
+       if (iter->trace && iter->trace->print_line)
+               return iter->trace->print_line(iter);
+
+       if (trace_flags & TRACE_ITER_BIN)
+               return print_bin_fmt(iter);
+
+       if (trace_flags & TRACE_ITER_HEX)
+               return print_hex_fmt(iter);
+
+       if (trace_flags & TRACE_ITER_RAW)
+               return print_raw_fmt(iter);
+
+       if (iter->iter_flags & TRACE_FILE_LAT_FMT)
+               return print_lat_fmt(iter, iter->idx, iter->cpu);
+
+       return print_trace_fmt(iter);
+}
+
+static int s_show(struct seq_file *m, void *v)
+{
+       struct trace_iterator *iter = v;
+
+       if (iter->ent == NULL) {
+               if (iter->tr) {
+                       seq_printf(m, "# tracer: %s\n", iter->trace->name);
+                       seq_puts(m, "#\n");
+               }
+               if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
+                       /* print nothing if the buffers are empty */
+                       if (trace_empty(iter))
+                               return 0;
+                       print_trace_header(m, iter);
+                       if (!(trace_flags & TRACE_ITER_VERBOSE))
+                               print_lat_help_header(m);
+               } else {
+                       if (!(trace_flags & TRACE_ITER_VERBOSE))
+                               print_func_help_header(m);
+               }
+       } else {
+               print_trace_line(iter);
+               trace_print_seq(m, &iter->seq);
+       }
+
+       return 0;
+}
+
+static struct seq_operations tracer_seq_ops = {
+       .start          = s_start,
+       .next           = s_next,
+       .stop           = s_stop,
+       .show           = s_show,
+};
+
+static struct trace_iterator *
+__tracing_open(struct inode *inode, struct file *file, int *ret)
+{
+       struct trace_iterator *iter;
+
+       if (tracing_disabled) {
+               *ret = -ENODEV;
+               return NULL;
+       }
+
+       iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+       if (!iter) {
+               *ret = -ENOMEM;
+               goto out;
+       }
+
+       mutex_lock(&trace_types_lock);
+       if (current_trace && current_trace->print_max)
+               iter->tr = &max_tr;
+       else
+               iter->tr = inode->i_private;
+       iter->trace = current_trace;
+       iter->pos = -1;
+
+       /* TODO stop tracer */
+       *ret = seq_open(file, &tracer_seq_ops);
+       if (!*ret) {
+               struct seq_file *m = file->private_data;
+               m->private = iter;
+
+               /* stop the trace while dumping */
+               if (iter->tr->ctrl)
+                       tracer_enabled = 0;
+
+               if (iter->trace && iter->trace->open)
+                       iter->trace->open(iter);
+       } else {
+               kfree(iter);
+               iter = NULL;
+       }
+       mutex_unlock(&trace_types_lock);
+
+ out:
+       return iter;
+}
+
+int tracing_open_generic(struct inode *inode, struct file *filp)
+{
+       if (tracing_disabled)
+               return -ENODEV;
+
+       filp->private_data = inode->i_private;
+       return 0;
+}
+
+int tracing_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *m = (struct seq_file *)file->private_data;
+       struct trace_iterator *iter = m->private;
+
+       mutex_lock(&trace_types_lock);
+       if (iter->trace && iter->trace->close)
+               iter->trace->close(iter);
+
+       /* reenable tracing if it was previously enabled */
+       if (iter->tr->ctrl)
+               tracer_enabled = 1;
+       mutex_unlock(&trace_types_lock);
+
+       seq_release(inode, file);
+       kfree(iter);
+       return 0;
+}
+
+static int tracing_open(struct inode *inode, struct file *file)
+{
+       int ret;
+
+       __tracing_open(inode, file, &ret);
+
+       return ret;
+}
+
+static int tracing_lt_open(struct inode *inode, struct file *file)
+{
+       struct trace_iterator *iter;
+       int ret;
+
+       iter = __tracing_open(inode, file, &ret);
+
+       if (!ret)
+               iter->iter_flags |= TRACE_FILE_LAT_FMT;
+
+       return ret;
+}
+
+
+static void *
+t_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       struct tracer *t = m->private;
+
+       (*pos)++;
+
+       if (t)
+               t = t->next;
+
+       m->private = t;
+
+       return t;
+}
+
+static void *t_start(struct seq_file *m, loff_t *pos)
+{
+       struct tracer *t = m->private;
+       loff_t l = 0;
+
+       mutex_lock(&trace_types_lock);
+       for (; t && l < *pos; t = t_next(m, t, &l))
+               ;
+
+       return t;
+}
+
+static void t_stop(struct seq_file *m, void *p)
+{
+       mutex_unlock(&trace_types_lock);
+}
+
+static int t_show(struct seq_file *m, void *v)
+{
+       struct tracer *t = v;
+
+       if (!t)
+               return 0;
+
+       seq_printf(m, "%s", t->name);
+       if (t->next)
+               seq_putc(m, ' ');
+       else
+               seq_putc(m, '\n');
+
+       return 0;
+}
+
+static struct seq_operations show_traces_seq_ops = {
+       .start          = t_start,
+       .next           = t_next,
+       .stop           = t_stop,
+       .show           = t_show,
+};
+
+static int show_traces_open(struct inode *inode, struct file *file)
+{
+       int ret;
+
+       if (tracing_disabled)
+               return -ENODEV;
+
+       ret = seq_open(file, &show_traces_seq_ops);
+       if (!ret) {
+               struct seq_file *m = file->private_data;
+               m->private = trace_types;
+       }
+
+       return ret;
+}
+
+static struct file_operations tracing_fops = {
+       .open           = tracing_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = tracing_release,
+};
+
+static struct file_operations tracing_lt_fops = {
+       .open           = tracing_lt_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = tracing_release,
+};
+
+static struct file_operations show_traces_fops = {
+       .open           = show_traces_open,
+       .read           = seq_read,
+       .release        = seq_release,
+};
+
+/*
+ * Only trace on a CPU if the bitmask is set:
+ */
+static cpumask_t tracing_cpumask = CPU_MASK_ALL;
+
+/*
+ * When tracing/tracing_cpu_mask is modified then this holds
+ * the new bitmask we are about to install:
+ */
+static cpumask_t tracing_cpumask_new;
+
+/*
+ * The tracer itself will not take this lock, but still we want
+ * to provide a consistent cpumask to user-space:
+ */
+static DEFINE_MUTEX(tracing_cpumask_update_lock);
+
+/*
+ * Temporary storage for the character representation of the
+ * CPU bitmask (and one more byte for the newline):
+ */
+static char mask_str[NR_CPUS + 1];
+
+static ssize_t
+tracing_cpumask_read(struct file *filp, char __user *ubuf,
+                    size_t count, loff_t *ppos)
+{
+       int len;
+
+       mutex_lock(&tracing_cpumask_update_lock);
+
+       len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
+       if (count - len < 2) {
+               count = -EINVAL;
+               goto out_err;
+       }
+       len += sprintf(mask_str + len, "\n");
+       count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
+
+out_err:
+       mutex_unlock(&tracing_cpumask_update_lock);
+
+       return count;
+}
+
+static ssize_t
+tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+                     size_t count, loff_t *ppos)
+{
+       int err, cpu;
+
+       mutex_lock(&tracing_cpumask_update_lock);
+       err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
+       if (err)
+               goto err_unlock;
+
+       raw_local_irq_disable();
+       __raw_spin_lock(&ftrace_max_lock);
+       for_each_tracing_cpu(cpu) {
+               /*
+                * Increase/decrease the disabled counter if we are
+                * about to flip a bit in the cpumask:
+                */
+               if (cpu_isset(cpu, tracing_cpumask) &&
+                               !cpu_isset(cpu, tracing_cpumask_new)) {
+                       atomic_inc(&global_trace.data[cpu]->disabled);
+               }
+               if (!cpu_isset(cpu, tracing_cpumask) &&
+                               cpu_isset(cpu, tracing_cpumask_new)) {
+                       atomic_dec(&global_trace.data[cpu]->disabled);
+               }
+       }
+       __raw_spin_unlock(&ftrace_max_lock);
+       raw_local_irq_enable();
+
+       tracing_cpumask = tracing_cpumask_new;
+
+       mutex_unlock(&tracing_cpumask_update_lock);
+
+       return count;
+
+err_unlock:
+       mutex_unlock(&tracing_cpumask_update_lock);
+
+       return err;
+}
+
+static struct file_operations tracing_cpumask_fops = {
+       .open           = tracing_open_generic,
+       .read           = tracing_cpumask_read,
+       .write          = tracing_cpumask_write,
+};
+
+static ssize_t
+tracing_iter_ctrl_read(struct file *filp, char __user *ubuf,
+                      size_t cnt, loff_t *ppos)
+{
+       char *buf;
+       int r = 0;
+       int len = 0;
+       int i;
+
+       /* calulate max size */
+       for (i = 0; trace_options[i]; i++) {
+               len += strlen(trace_options[i]);
+               len += 3; /* "no" and space */
+       }
+
+       /* +2 for \n and \0 */
+       buf = kmalloc(len + 2, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       for (i = 0; trace_options[i]; i++) {
+               if (trace_flags & (1 << i))
+                       r += sprintf(buf + r, "%s ", trace_options[i]);
+               else
+                       r += sprintf(buf + r, "no%s ", trace_options[i]);
+       }
+
+       r += sprintf(buf + r, "\n");
+       WARN_ON(r >= len + 2);
+
+       r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+
+       kfree(buf);
+
+       return r;
+}
+
+static ssize_t
+tracing_iter_ctrl_write(struct file *filp, const char __user *ubuf,
+                       size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       char *cmp = buf;
+       int neg = 0;
+       int i;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       if (strncmp(buf, "no", 2) == 0) {
+               neg = 1;
+               cmp += 2;
+       }
+
+       for (i = 0; trace_options[i]; i++) {
+               int len = strlen(trace_options[i]);
+
+               if (strncmp(cmp, trace_options[i], len) == 0) {
+                       if (neg)
+                               trace_flags &= ~(1 << i);
+                       else
+                               trace_flags |= (1 << i);
+                       break;
+               }
+       }
+       /*
+        * If no option could be set, return an error:
+        */
+       if (!trace_options[i])
+               return -EINVAL;
+
+       filp->f_pos += cnt;
+
+       return cnt;
+}
+
+static struct file_operations tracing_iter_fops = {
+       .open           = tracing_open_generic,
+       .read           = tracing_iter_ctrl_read,
+       .write          = tracing_iter_ctrl_write,
+};
+
+static const char readme_msg[] =
+       "tracing mini-HOWTO:\n\n"
+       "# mkdir /debug\n"
+       "# mount -t debugfs nodev /debug\n\n"
+       "# cat /debug/tracing/available_tracers\n"
+       "wakeup preemptirqsoff preemptoff irqsoff ftrace sched_switch none\n\n"
+       "# cat /debug/tracing/current_tracer\n"
+       "none\n"
+       "# echo sched_switch > /debug/tracing/current_tracer\n"
+       "# cat /debug/tracing/current_tracer\n"
+       "sched_switch\n"
+       "# cat /debug/tracing/iter_ctrl\n"
+       "noprint-parent nosym-offset nosym-addr noverbose\n"
+       "# echo print-parent > /debug/tracing/iter_ctrl\n"
+       "# echo 1 > /debug/tracing/tracing_enabled\n"
+       "# cat /debug/tracing/trace > /tmp/trace.txt\n"
+       "echo 0 > /debug/tracing/tracing_enabled\n"
+;
+
+static ssize_t
+tracing_readme_read(struct file *filp, char __user *ubuf,
+                      size_t cnt, loff_t *ppos)
+{
+       return simple_read_from_buffer(ubuf, cnt, ppos,
+                                       readme_msg, strlen(readme_msg));
+}
+
+static struct file_operations tracing_readme_fops = {
+       .open           = tracing_open_generic,
+       .read           = tracing_readme_read,
+};
+
+static ssize_t
+tracing_ctrl_read(struct file *filp, char __user *ubuf,
+                 size_t cnt, loff_t *ppos)
+{
+       struct trace_array *tr = filp->private_data;
+       char buf[64];
+       int r;
+
+       r = sprintf(buf, "%ld\n", tr->ctrl);
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+tracing_ctrl_write(struct file *filp, const char __user *ubuf,
+                  size_t cnt, loff_t *ppos)
+{
+       struct trace_array *tr = filp->private_data;
+       char buf[64];
+       long val;
+       int ret;
+
+       if (cnt >= sizeof(buf))
+               return -EINVAL;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       ret = strict_strtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+
+       val = !!val;
+
+       mutex_lock(&trace_types_lock);
+       if (tr->ctrl ^ val) {
+               if (val)
+                       tracer_enabled = 1;
+               else
+                       tracer_enabled = 0;
+
+               tr->ctrl = val;
+
+               if (current_trace && current_trace->ctrl_update)
+                       current_trace->ctrl_update(tr);
+       }
+       mutex_unlock(&trace_types_lock);
+
+       filp->f_pos += cnt;
+
+       return cnt;
+}
+
+static ssize_t
+tracing_set_trace_read(struct file *filp, char __user *ubuf,
+                      size_t cnt, loff_t *ppos)
+{
+       char buf[max_tracer_type_len+2];
+       int r;
+
+       mutex_lock(&trace_types_lock);
+       if (current_trace)
+               r = sprintf(buf, "%s\n", current_trace->name);
+       else
+               r = sprintf(buf, "\n");
+       mutex_unlock(&trace_types_lock);
+
+       return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+tracing_set_trace_write(struct file *filp, const char __user *ubuf,
+                       size_t cnt, loff_t *ppos)
+{
+       struct trace_array *tr = &global_trace;
+       struct tracer *t;
+       char buf[max_tracer_type_len+1];
+       int i;
+
+       if (cnt > max_tracer_type_len)
+               cnt = max_tracer_type_len;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+
+       /* strip ending whitespace. */
+       for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
+               buf[i] = 0;
+
+       mutex_lock(&trace_types_lock);
+       for (t = trace_types; t; t = t->next) {
+               if (strcmp(t->name, buf) == 0)
+                       break;
+       }
+&nb