Merge branch 'perf/urgent' into perf/core
Ingo Molnar [Mon, 12 Mar 2012 19:44:07 +0000 (20:44 +0100)]
Merge reason: We are going to queue up a dependent patch.

Signed-off-by: Ingo Molnar <mingo@elte.hu>

149 files changed:
Documentation/lockup-watchdogs.txt [new file with mode: 0644]
Documentation/nmi_watchdog.txt [deleted file]
Documentation/static-keys.txt [new file with mode: 0644]
Documentation/trace/ftrace.txt
arch/Kconfig
arch/arm/include/asm/perf_event.h
arch/frv/include/asm/perf_event.h
arch/hexagon/include/asm/perf_event.h
arch/ia64/include/asm/paravirt.h
arch/ia64/kernel/paravirt.c
arch/mips/include/asm/jump_label.h
arch/powerpc/include/asm/jump_label.h
arch/powerpc/include/asm/perf_event_server.h
arch/powerpc/kernel/perf_event.c
arch/s390/include/asm/jump_label.h
arch/s390/include/asm/perf_event.h
arch/sparc/include/asm/jump_label.h
arch/x86/include/asm/inat.h
arch/x86/include/asm/insn.h
arch/x86/include/asm/jump_label.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/perf_event.h
arch/x86/kernel/Makefile
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/kprobes-common.h [new file with mode: 0644]
arch/x86/kernel/kprobes-opt.c [new file with mode: 0644]
arch/x86/kernel/kprobes.c
arch/x86/kernel/kvm.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/process.c
arch/x86/kvm/mmu_audit.c
arch/x86/lib/inat.c
arch/x86/lib/insn.c
drivers/cpuidle/cpuidle.c
fs/exec.c
include/linux/ftrace.h
include/linux/ftrace_event.h
include/linux/interrupt.h
include/linux/jump_label.h
include/linux/netdevice.h
include/linux/netfilter.h
include/linux/perf_event.h
include/linux/static_key.h [new file with mode: 0644]
include/linux/tracepoint.h
include/net/sock.h
include/trace/events/power.h
include/trace/events/printk.h [new file with mode: 0644]
include/trace/events/sched.h
include/trace/events/signal.h
kernel/events/core.c
kernel/events/hw_breakpoint.c
kernel/irq/chip.c
kernel/jump_label.c
kernel/printk.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h
kernel/signal.c
kernel/softirq.c
kernel/trace/ftrace.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_entries.h
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_export.c
kernel/trace/trace_kprobe.c
kernel/trace/trace_output.c
kernel/trace/trace_syscalls.c
kernel/tracepoint.c
kernel/watchdog.c
lib/Kconfig.debug
net/core/dev.c
net/core/net-sysfs.c
net/core/sock.c
net/core/sysctl_net_core.c
net/ipv4/tcp_memcontrol.c
net/netfilter/core.c
tools/perf/Documentation/Makefile
tools/perf/Documentation/perf-lock.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Documentation/perf-stat.txt
tools/perf/Documentation/perf-top.txt
tools/perf/MANIFEST
tools/perf/Makefile
tools/perf/bench/bench.h
tools/perf/bench/mem-memcpy-x86-64-asm-def.h
tools/perf/bench/mem-memcpy-x86-64-asm.S
tools/perf/bench/mem-memcpy.c
tools/perf/bench/mem-memset-arch.h [new file with mode: 0644]
tools/perf/bench/mem-memset-x86-64-asm-def.h [new file with mode: 0644]
tools/perf/bench/mem-memset-x86-64-asm.S [new file with mode: 0644]
tools/perf/bench/mem-memset.c [new file with mode: 0644]
tools/perf/builtin-bench.c
tools/perf/builtin-lock.c
tools/perf/builtin-probe.c
tools/perf/builtin-record.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-test.c
tools/perf/builtin-top.c
tools/perf/perf.h
tools/perf/python/twatch.py
tools/perf/util/annotate.c
tools/perf/util/bitmap.c
tools/perf/util/cpumap.c
tools/perf/util/cpumap.h
tools/perf/util/ctype.c
tools/perf/util/debugfs.c
tools/perf/util/debugfs.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/hist.h
tools/perf/util/include/asm/dwarf2.h
tools/perf/util/include/linux/bitmap.h
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/python-ext-sources [new file with mode: 0644]
tools/perf/util/python.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/setup.py
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/sysfs.c [new file with mode: 0644]
tools/perf/util/sysfs.h [new file with mode: 0644]
tools/perf/util/thread_map.c
tools/perf/util/thread_map.h
tools/perf/util/top.c
tools/perf/util/top.h
tools/perf/util/trace-event-parse.c
tools/perf/util/trace-event-read.c
tools/perf/util/trace-event-scripting.c
tools/perf/util/ui/browsers/annotate.c
tools/perf/util/ui/browsers/hists.c
tools/perf/util/ui/browsers/map.c
tools/perf/util/usage.c
tools/perf/util/util.c
tools/perf/util/util.h

diff --git a/Documentation/lockup-watchdogs.txt b/Documentation/lockup-watchdogs.txt
new file mode 100644 (file)
index 0000000..d2a3660
--- /dev/null
@@ -0,0 +1,63 @@
+===============================================================
+Softlockup detector and hardlockup detector (aka nmi_watchdog)
+===============================================================
+
+The Linux kernel can act as a watchdog to detect both soft and hard
+lockups.
+
+A 'softlockup' is defined as a bug that causes the kernel to loop in
+kernel mode for more than 20 seconds (see "Implementation" below for
+details), without giving other tasks a chance to run. The current
+stack trace is displayed upon detection and, by default, the system
+will stay locked up. Alternatively, the kernel can be configured to
+panic; a sysctl, "kernel.softlockup_panic", a kernel parameter,
+"softlockup_panic" (see "Documentation/kernel-parameters.txt" for
+details), and a compile option, "BOOTPARAM_HARDLOCKUP_PANIC", are
+provided for this.
+
+A 'hardlockup' is defined as a bug that causes the CPU to loop in
+kernel mode for more than 10 seconds (see "Implementation" below for
+details), without letting other interrupts have a chance to run.
+Similarly to the softlockup case, the current stack trace is displayed
+upon detection and the system will stay locked up unless the default
+behavior is changed, which can be done through a compile time knob,
+"BOOTPARAM_HARDLOCKUP_PANIC", and a kernel parameter, "nmi_watchdog"
+(see "Documentation/kernel-parameters.txt" for details).
+
+The panic option can be used in combination with panic_timeout (this
+timeout is set through the confusingly named "kernel.panic" sysctl),
+to cause the system to reboot automatically after a specified amount
+of time.
+
+=== Implementation ===
+
+The soft and hard lockup detectors are built on top of the hrtimer and
+perf subsystems, respectively. A direct consequence of this is that,
+in principle, they should work in any architecture where these
+subsystems are present.
+
+A periodic hrtimer runs to generate interrupts and kick the watchdog
+task. An NMI perf event is generated every "watchdog_thresh"
+(compile-time initialized to 10 and configurable through sysctl of the
+same name) seconds to check for hardlockups. If any CPU in the system
+does not receive any hrtimer interrupt during that time the
+'hardlockup detector' (the handler for the NMI perf event) will
+generate a kernel warning or call panic, depending on the
+configuration.
+
+The watchdog task is a high priority kernel thread that updates a
+timestamp every time it is scheduled. If that timestamp is not updated
+for 2*watchdog_thresh seconds (the softlockup threshold) the
+'softlockup detector' (coded inside the hrtimer callback function)
+will dump useful debug information to the system log, after which it
+will call panic if it was instructed to do so or resume execution of
+other kernel code.
+
+The period of the hrtimer is 2*watchdog_thresh/5, which means it has
+two or three chances to generate an interrupt before the hardlockup
+detector kicks in.
+
+As explained above, a kernel knob is provided that allows
+administrators to configure the period of the hrtimer and the perf
+event. The right value for a particular environment is a trade-off
+between fast response to lockups and detection overhead.
diff --git a/Documentation/nmi_watchdog.txt b/Documentation/nmi_watchdog.txt
deleted file mode 100644 (file)
index bf9f80a..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-
-[NMI watchdog is available for x86 and x86-64 architectures]
-
-Is your system locking up unpredictably? No keyboard activity, just
-a frustrating complete hard lockup? Do you want to help us debugging
-such lockups? If all yes then this document is definitely for you.
-
-On many x86/x86-64 type hardware there is a feature that enables
-us to generate 'watchdog NMI interrupts'.  (NMI: Non Maskable Interrupt
-which get executed even if the system is otherwise locked up hard).
-This can be used to debug hard kernel lockups.  By executing periodic
-NMI interrupts, the kernel can monitor whether any CPU has locked up,
-and print out debugging messages if so.
-
-In order to use the NMI watchdog, you need to have APIC support in your
-kernel. For SMP kernels, APIC support gets compiled in automatically. For
-UP, enable either CONFIG_X86_UP_APIC (Processor type and features -> Local
-APIC support on uniprocessors) or CONFIG_X86_UP_IOAPIC (Processor type and
-features -> IO-APIC support on uniprocessors) in your kernel config.
-CONFIG_X86_UP_APIC is for uniprocessor machines without an IO-APIC.
-CONFIG_X86_UP_IOAPIC is for uniprocessor with an IO-APIC. [Note: certain
-kernel debugging options, such as Kernel Stack Meter or Kernel Tracer,
-may implicitly disable the NMI watchdog.]
-
-For x86-64, the needed APIC is always compiled in.
-
-Using local APIC (nmi_watchdog=2) needs the first performance register, so
-you can't use it for other purposes (such as high precision performance
-profiling.) However, at least oprofile and the perfctr driver disable the
-local APIC NMI watchdog automatically.
-
-To actually enable the NMI watchdog, use the 'nmi_watchdog=N' boot
-parameter.  Eg. the relevant lilo.conf entry:
-
-        append="nmi_watchdog=1"
-
-For SMP machines and UP machines with an IO-APIC use nmi_watchdog=1.
-For UP machines without an IO-APIC use nmi_watchdog=2, this only works
-for some processor types.  If in doubt, boot with nmi_watchdog=1 and
-check the NMI count in /proc/interrupts; if the count is zero then
-reboot with nmi_watchdog=2 and check the NMI count.  If it is still
-zero then log a problem, you probably have a processor that needs to be
-added to the nmi code.
-
-A 'lockup' is the following scenario: if any CPU in the system does not
-execute the period local timer interrupt for more than 5 seconds, then
-the NMI handler generates an oops and kills the process. This
-'controlled crash' (and the resulting kernel messages) can be used to
-debug the lockup. Thus whenever the lockup happens, wait 5 seconds and
-the oops will show up automatically. If the kernel produces no messages
-then the system has crashed so hard (eg. hardware-wise) that either it
-cannot even accept NMI interrupts, or the crash has made the kernel
-unable to print messages.
-
-Be aware that when using local APIC, the frequency of NMI interrupts
-it generates, depends on the system load. The local APIC NMI watchdog,
-lacking a better source, uses the "cycles unhalted" event. As you may
-guess it doesn't tick when the CPU is in the halted state (which happens
-when the system is idle), but if your system locks up on anything but the
-"hlt" processor instruction, the watchdog will trigger very soon as the
-"cycles unhalted" event will happen every clock tick. If it locks up on
-"hlt", then you are out of luck -- the event will not happen at all and the
-watchdog won't trigger. This is a shortcoming of the local APIC watchdog
--- unfortunately there is no "clock ticks" event that would work all the
-time. The I/O APIC watchdog is driven externally and has no such shortcoming.
-But its NMI frequency is much higher, resulting in a more significant hit
-to the overall system performance.
-
-On x86 nmi_watchdog is disabled by default so you have to enable it with
-a boot time parameter.
-
-It's possible to disable the NMI watchdog in run-time by writing "0" to
-/proc/sys/kernel/nmi_watchdog. Writing "1" to the same file will re-enable
-the NMI watchdog. Notice that you still need to use "nmi_watchdog=" parameter
-at boot time.
-
-NOTE: In kernels prior to 2.4.2-ac18 the NMI-oopser is enabled unconditionally
-on x86 SMP boxes.
-
-[ feel free to send bug reports, suggestions and patches to
-  Ingo Molnar <mingo@redhat.com> or the Linux SMP mailing
-  list at <linux-smp@vger.kernel.org> ]
-
diff --git a/Documentation/static-keys.txt b/Documentation/static-keys.txt
new file mode 100644 (file)
index 0000000..d93f3c0
--- /dev/null
@@ -0,0 +1,286 @@
+                       Static Keys
+                       -----------
+
+By: Jason Baron <jbaron@redhat.com>
+
+0) Abstract
+
+Static keys allows the inclusion of seldom used features in
+performance-sensitive fast-path kernel code, via a GCC feature and a code
+patching technique. A quick example:
+
+       struct static_key key = STATIC_KEY_INIT_FALSE;
+
+       ...
+
+        if (static_key_false(&key))
+                do unlikely code
+        else
+                do likely code
+
+       ...
+       static_key_slow_inc();
+       ...
+       static_key_slow_inc();
+       ...
+
+The static_key_false() branch will be generated into the code with as little
+impact to the likely code path as possible.
+
+
+1) Motivation
+
+
+Currently, tracepoints are implemented using a conditional branch. The
+conditional check requires checking a global variable for each tracepoint.
+Although the overhead of this check is small, it increases when the memory
+cache comes under pressure (memory cache lines for these global variables may
+be shared with other memory accesses). As we increase the number of tracepoints
+in the kernel this overhead may become more of an issue. In addition,
+tracepoints are often dormant (disabled) and provide no direct kernel
+functionality. Thus, it is highly desirable to reduce their impact as much as
+possible. Although tracepoints are the original motivation for this work, other
+kernel code paths should be able to make use of the static keys facility.
+
+
+2) Solution
+
+
+gcc (v4.5) adds a new 'asm goto' statement that allows branching to a label:
+
+http://gcc.gnu.org/ml/gcc-patches/2009-07/msg01556.html
+
+Using the 'asm goto', we can create branches that are either taken or not taken
+by default, without the need to check memory. Then, at run-time, we can patch
+the branch site to change the branch direction.
+
+For example, if we have a simple branch that is disabled by default:
+
+       if (static_key_false(&key))
+               printk("I am the true branch\n");
+
+Thus, by default the 'printk' will not be emitted. And the code generated will
+consist of a single atomic 'no-op' instruction (5 bytes on x86), in the
+straight-line code path. When the branch is 'flipped', we will patch the
+'no-op' in the straight-line codepath with a 'jump' instruction to the
+out-of-line true branch. Thus, changing branch direction is expensive but
+branch selection is basically 'free'. That is the basic tradeoff of this
+optimization.
+
+This lowlevel patching mechanism is called 'jump label patching', and it gives
+the basis for the static keys facility.
+
+3) Static key label API, usage and examples:
+
+
+In order to make use of this optimization you must first define a key:
+
+       struct static_key key;
+
+Which is initialized as:
+
+       struct static_key key = STATIC_KEY_INIT_TRUE;
+
+or:
+
+       struct static_key key = STATIC_KEY_INIT_FALSE;
+
+If the key is not initialized, it is default false. The 'struct static_key',
+must be a 'global'. That is, it can't be allocated on the stack or dynamically
+allocated at run-time.
+
+The key is then used in code as:
+
+        if (static_key_false(&key))
+                do unlikely code
+        else
+                do likely code
+
+Or:
+
+        if (static_key_true(&key))
+                do likely code
+        else
+                do unlikely code
+
+A key that is initialized via 'STATIC_KEY_INIT_FALSE', must be used in a
+'static_key_false()' construct. Likewise, a key initialized via
+'STATIC_KEY_INIT_TRUE' must be used in a 'static_key_true()' construct. A
+single key can be used in many branches, but all the branches must match the
+way that the key has been initialized.
+
+The branch(es) can then be switched via:
+
+       static_key_slow_inc(&key);
+       ...
+       static_key_slow_dec(&key);
+
+Thus, 'static_key_slow_inc()' means 'make the branch true', and
+'static_key_slow_dec()' means 'make the the branch false' with appropriate
+reference counting. For example, if the key is initialized true, a
+static_key_slow_dec(), will switch the branch to false. And a subsequent
+static_key_slow_inc(), will change the branch back to true. Likewise, if the
+key is initialized false, a 'static_key_slow_inc()', will change the branch to
+true. And then a 'static_key_slow_dec()', will again make the branch false.
+
+An example usage in the kernel is the implementation of tracepoints:
+
+        static inline void trace_##name(proto)                          \
+        {                                                               \
+                if (static_key_false(&__tracepoint_##name.key))                \
+                        __DO_TRACE(&__tracepoint_##name,                \
+                                TP_PROTO(data_proto),                   \
+                                TP_ARGS(data_args),                     \
+                                TP_CONDITION(cond));                    \
+        }
+
+Tracepoints are disabled by default, and can be placed in performance critical
+pieces of the kernel. Thus, by using a static key, the tracepoints can have
+absolutely minimal impact when not in use.
+
+
+4) Architecture level code patching interface, 'jump labels'
+
+
+There are a few functions and macros that architectures must implement in order
+to take advantage of this optimization. If there is no architecture support, we
+simply fall back to a traditional, load, test, and jump sequence.
+
+* select HAVE_ARCH_JUMP_LABEL, see: arch/x86/Kconfig
+
+* #define JUMP_LABEL_NOP_SIZE, see: arch/x86/include/asm/jump_label.h
+
+* __always_inline bool arch_static_branch(struct static_key *key), see:
+                                       arch/x86/include/asm/jump_label.h
+
+* void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type),
+                                       see: arch/x86/kernel/jump_label.c
+
+* __init_or_module void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type),
+                                       see: arch/x86/kernel/jump_label.c
+
+
+* struct jump_entry, see: arch/x86/include/asm/jump_label.h
+
+
+5) Static keys / jump label analysis, results (x86_64):
+
+
+As an example, let's add the following branch to 'getppid()', such that the
+system call now looks like:
+
+SYSCALL_DEFINE0(getppid)
+{
+        int pid;
+
++       if (static_key_false(&key))
++               printk("I am the true branch\n");
+
+        rcu_read_lock();
+        pid = task_tgid_vnr(rcu_dereference(current->real_parent));
+        rcu_read_unlock();
+
+        return pid;
+}
+
+The resulting instructions with jump labels generated by GCC is:
+
+ffffffff81044290 <sys_getppid>:
+ffffffff81044290:       55                      push   %rbp
+ffffffff81044291:       48 89 e5                mov    %rsp,%rbp
+ffffffff81044294:       e9 00 00 00 00          jmpq   ffffffff81044299 <sys_getppid+0x9>
+ffffffff81044299:       65 48 8b 04 25 c0 b6    mov    %gs:0xb6c0,%rax
+ffffffff810442a0:       00 00
+ffffffff810442a2:       48 8b 80 80 02 00 00    mov    0x280(%rax),%rax
+ffffffff810442a9:       48 8b 80 b0 02 00 00    mov    0x2b0(%rax),%rax
+ffffffff810442b0:       48 8b b8 e8 02 00 00    mov    0x2e8(%rax),%rdi
+ffffffff810442b7:       e8 f4 d9 00 00          callq  ffffffff81051cb0 <pid_vnr>
+ffffffff810442bc:       5d                      pop    %rbp
+ffffffff810442bd:       48 98                   cltq
+ffffffff810442bf:       c3                      retq
+ffffffff810442c0:       48 c7 c7 e3 54 98 81    mov    $0xffffffff819854e3,%rdi
+ffffffff810442c7:       31 c0                   xor    %eax,%eax
+ffffffff810442c9:       e8 71 13 6d 00          callq  ffffffff8171563f <printk>
+ffffffff810442ce:       eb c9                   jmp    ffffffff81044299 <sys_getppid+0x9>
+
+Without the jump label optimization it looks like:
+
+ffffffff810441f0 <sys_getppid>:
+ffffffff810441f0:       8b 05 8a 52 d8 00       mov    0xd8528a(%rip),%eax        # ffffffff81dc9480 <key>
+ffffffff810441f6:       55                      push   %rbp
+ffffffff810441f7:       48 89 e5                mov    %rsp,%rbp
+ffffffff810441fa:       85 c0                   test   %eax,%eax
+ffffffff810441fc:       75 27                   jne    ffffffff81044225 <sys_getppid+0x35>
+ffffffff810441fe:       65 48 8b 04 25 c0 b6    mov    %gs:0xb6c0,%rax
+ffffffff81044205:       00 00
+ffffffff81044207:       48 8b 80 80 02 00 00    mov    0x280(%rax),%rax
+ffffffff8104420e:       48 8b 80 b0 02 00 00    mov    0x2b0(%rax),%rax
+ffffffff81044215:       48 8b b8 e8 02 00 00    mov    0x2e8(%rax),%rdi
+ffffffff8104421c:       e8 2f da 00 00          callq  ffffffff81051c50 <pid_vnr>
+ffffffff81044221:       5d                      pop    %rbp
+ffffffff81044222:       48 98                   cltq
+ffffffff81044224:       c3                      retq
+ffffffff81044225:       48 c7 c7 13 53 98 81    mov    $0xffffffff81985313,%rdi
+ffffffff8104422c:       31 c0                   xor    %eax,%eax
+ffffffff8104422e:       e8 60 0f 6d 00          callq  ffffffff81715193 <printk>
+ffffffff81044233:       eb c9                   jmp    ffffffff810441fe <sys_getppid+0xe>
+ffffffff81044235:       66 66 2e 0f 1f 84 00    data32 nopw %cs:0x0(%rax,%rax,1)
+ffffffff8104423c:       00 00 00 00
+
+Thus, the disable jump label case adds a 'mov', 'test' and 'jne' instruction
+vs. the jump label case just has a 'no-op' or 'jmp 0'. (The jmp 0, is patched
+to a 5 byte atomic no-op instruction at boot-time.) Thus, the disabled jump
+label case adds:
+
+6 (mov) + 2 (test) + 2 (jne) = 10 - 5 (5 byte jump 0) = 5 addition bytes.
+
+If we then include the padding bytes, the jump label code saves, 16 total bytes
+of instruction memory for this small fucntion. In this case the non-jump label
+function is 80 bytes long. Thus, we have have saved 20% of the instruction
+footprint. We can in fact improve this even further, since the 5-byte no-op
+really can be a 2-byte no-op since we can reach the branch with a 2-byte jmp.
+However, we have not yet implemented optimal no-op sizes (they are currently
+hard-coded).
+
+Since there are a number of static key API uses in the scheduler paths,
+'pipe-test' (also known as 'perf bench sched pipe') can be used to show the
+performance improvement. Testing done on 3.3.0-rc2:
+
+jump label disabled:
+
+ Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs):
+
+        855.700314 task-clock                #    0.534 CPUs utilized            ( +-  0.11% )
+           200,003 context-switches          #    0.234 M/sec                    ( +-  0.00% )
+                 0 CPU-migrations            #    0.000 M/sec                    ( +- 39.58% )
+               487 page-faults               #    0.001 M/sec                    ( +-  0.02% )
+     1,474,374,262 cycles                    #    1.723 GHz                      ( +-  0.17% )
+   <not supported> stalled-cycles-frontend
+   <not supported> stalled-cycles-backend
+     1,178,049,567 instructions              #    0.80  insns per cycle          ( +-  0.06% )
+       208,368,926 branches                  #  243.507 M/sec                    ( +-  0.06% )
+         5,569,188 branch-misses             #    2.67% of all branches          ( +-  0.54% )
+
+       1.601607384 seconds time elapsed                                          ( +-  0.07% )
+
+jump label enabled:
+
+ Performance counter stats for 'bash -c /tmp/pipe-test' (50 runs):
+
+        841.043185 task-clock                #    0.533 CPUs utilized            ( +-  0.12% )
+           200,004 context-switches          #    0.238 M/sec                    ( +-  0.00% )
+                 0 CPU-migrations            #    0.000 M/sec                    ( +- 40.87% )
+               487 page-faults               #    0.001 M/sec                    ( +-  0.05% )
+     1,432,559,428 cycles                    #    1.703 GHz                      ( +-  0.18% )
+   <not supported> stalled-cycles-frontend
+   <not supported> stalled-cycles-backend
+     1,175,363,994 instructions              #    0.82  insns per cycle          ( +-  0.04% )
+       206,859,359 branches                  #  245.956 M/sec                    ( +-  0.04% )
+         4,884,119 branch-misses             #    2.36% of all branches          ( +-  0.85% )
+
+       1.579384366 seconds time elapsed
+
+The percentage of saved branches is .7%, and we've saved 12% on
+'branch-misses'. This is where we would expect to get the most savings, since
+this optimization is about reducing the number of branches. In addition, we've
+saved .2% on instructions, and 2.8% on cycles and 1.4% on elapsed time.
index 1ebc24c..6f51fed 100644 (file)
@@ -226,6 +226,13 @@ Here is the list of current tracers that may be configured.
        Traces and records the max latency that it takes for
        the highest priority task to get scheduled after
        it has been woken up.
+        Traces all tasks as an average developer would expect.
+
+  "wakeup_rt"
+
+        Traces and records the max latency that it takes for just
+        RT tasks (as the current "wakeup" does). This is useful
+        for those interested in wake up timings of RT tasks.
 
   "hw-branch-tracer"
 
index 4f55c73..5b448a7 100644 (file)
@@ -47,18 +47,29 @@ config KPROBES
          If in doubt, say "N".
 
 config JUMP_LABEL
-       bool "Optimize trace point call sites"
+       bool "Optimize very unlikely/likely branches"
        depends on HAVE_ARCH_JUMP_LABEL
        help
+         This option enables a transparent branch optimization that
+        makes certain almost-always-true or almost-always-false branch
+        conditions even cheaper to execute within the kernel.
+
+        Certain performance-sensitive kernel code, such as trace points,
+        scheduler functionality, networking code and KVM have such
+        branches and include support for this optimization technique.
+
          If it is detected that the compiler has support for "asm goto",
-        the kernel will compile trace point locations with just a
-        nop instruction. When trace points are enabled, the nop will
-        be converted to a jump to the trace function. This technique
-        lowers overhead and stress on the branch prediction of the
-        processor.
-
-        On i386, options added to the compiler flags may increase
-        the size of the kernel slightly.
+        the kernel will compile such branches with just a nop
+        instruction. When the condition flag is toggled to true, the
+        nop will be converted to a jump instruction to execute the
+        conditional block of instructions.
+
+        This technique lowers overhead and stress on the branch prediction
+        of the processor and generally makes the kernel faster. The update
+        of the condition is slower, but those are always very rare.
+
+        ( On 32-bit x86, the necessary options added to the compiler
+          flags may increase the size of the kernel slightly. )
 
 config OPTPROBES
        def_bool y
index 99cfe36..7523340 100644 (file)
 #ifndef __ARM_PERF_EVENT_H__
 #define __ARM_PERF_EVENT_H__
 
-/* ARM performance counters start from 1 (in the cp15 accesses) so use the
- * same indexes here for consistency. */
-#define PERF_EVENT_INDEX_OFFSET 1
-
 /* ARM perf PMU IDs for use by internal perf clients. */
 enum arm_perf_pmu_ids {
        ARM_PERF_PMU_ID_XSCALE1 = 0,
index a69e015..c52ea55 100644 (file)
@@ -12,6 +12,4 @@
 #ifndef _ASM_PERF_EVENT_H
 #define _ASM_PERF_EVENT_H
 
-#define PERF_EVENT_INDEX_OFFSET        0
-
 #endif /* _ASM_PERF_EVENT_H */
index 6c2910f..8b8526b 100644 (file)
@@ -19,6 +19,4 @@
 #ifndef _ASM_PERF_EVENT_H
 #define _ASM_PERF_EVENT_H
 
-#define PERF_EVENT_INDEX_OFFSET        0
-
 #endif /* _ASM_PERF_EVENT_H */
index 32551d3..b149b88 100644 (file)
@@ -281,9 +281,9 @@ paravirt_init_missing_ticks_accounting(int cpu)
                pv_time_ops.init_missing_ticks_accounting(cpu);
 }
 
-struct jump_label_key;
-extern struct jump_label_key paravirt_steal_enabled;
-extern struct jump_label_key paravirt_steal_rq_enabled;
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
 
 static inline int
 paravirt_do_steal_accounting(unsigned long *new_itm)
index 1008682..1b22f6d 100644 (file)
@@ -634,8 +634,8 @@ struct pv_irq_ops pv_irq_ops = {
  * pv_time_ops
  * time operations
  */
-struct jump_label_key paravirt_steal_enabled;
-struct jump_label_key paravirt_steal_rq_enabled;
+struct static_key paravirt_steal_enabled;
+struct static_key paravirt_steal_rq_enabled;
 
 static int
 ia64_native_do_steal_accounting(unsigned long *new_itm)
index 1881b31..4d6d77e 100644 (file)
@@ -20,7 +20,7 @@
 #define WORD_INSN ".word"
 #endif
 
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
 {
        asm goto("1:\tnop\n\t"
                "nop\n\t"
index 938986e..ae098c4 100644 (file)
@@ -17,7 +17,7 @@
 #define JUMP_ENTRY_TYPE                stringify_in_c(FTR_ENTRY_LONG)
 #define JUMP_LABEL_NOP_SIZE    4
 
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
 {
        asm goto("1:\n\t"
                 "nop\n\t"
index 8f1df12..1a8093f 100644 (file)
@@ -61,8 +61,6 @@ struct pt_regs;
 extern unsigned long perf_misc_flags(struct pt_regs *regs);
 extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 
-#define PERF_EVENT_INDEX_OFFSET        1
-
 /*
  * Only override the default definitions in include/linux/perf_event.h
  * if we have hardware PMU support.
index 64483fd..f04c230 100644 (file)
@@ -1193,6 +1193,11 @@ static int power_pmu_event_init(struct perf_event *event)
        return err;
 }
 
+static int power_pmu_event_idx(struct perf_event *event)
+{
+       return event->hw.idx;
+}
+
 struct pmu power_pmu = {
        .pmu_enable     = power_pmu_enable,
        .pmu_disable    = power_pmu_disable,
@@ -1205,6 +1210,7 @@ struct pmu power_pmu = {
        .start_txn      = power_pmu_start_txn,
        .cancel_txn     = power_pmu_cancel_txn,
        .commit_txn     = power_pmu_commit_txn,
+       .event_idx      = power_pmu_event_idx,
 };
 
 /*
index 95a6cf2..6c32190 100644 (file)
@@ -13,7 +13,7 @@
 #define ASM_ALIGN ".balign 4"
 #endif
 
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
 {
        asm goto("0:    brcl 0,0\n"
                ".pushsection __jump_table, \"aw\"\n"
index a75f168..4eb444e 100644 (file)
@@ -6,4 +6,3 @@
 
 /* Empty, just to avoid compiling error */
 
-#define PERF_EVENT_INDEX_OFFSET 0
index fc73a82..5080d16 100644 (file)
@@ -7,7 +7,7 @@
 
 #define JUMP_LABEL_NOP_SIZE 4
 
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
 {
                asm goto("1:\n\t"
                         "nop\n\t"
index 205b063..74a2e31 100644 (file)
 
 /* Attribute search APIs */
 extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
+extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
 extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
-                                            insn_byte_t last_pfx,
+                                            int lpfx_id,
                                             insn_attr_t esc_attr);
 extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
-                                           insn_byte_t last_pfx,
+                                           int lpfx_id,
                                            insn_attr_t esc_attr);
 extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
                                          insn_byte_t vex_m,
index 74df3f1..48eb30a 100644 (file)
@@ -96,12 +96,6 @@ struct insn {
 #define X86_VEX_P(vex) ((vex) & 0x03)          /* VEX3 Byte2, VEX2 Byte1 */
 #define X86_VEX_M_MAX  0x1f                    /* VEX3.M Maximum value */
 
-/* The last prefix is needed for two-byte and three-byte opcodes */
-static inline insn_byte_t insn_last_prefix(struct insn *insn)
-{
-       return insn->prefixes.bytes[3];
-}
-
 extern void insn_init(struct insn *insn, const void *kaddr, int x86_64);
 extern void insn_get_prefixes(struct insn *insn);
 extern void insn_get_opcode(struct insn *insn);
@@ -160,6 +154,18 @@ static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
                return X86_VEX_P(insn->vex_prefix.bytes[2]);
 }
 
+/* Get the last prefix id from last prefix or VEX prefix */
+static inline int insn_last_prefix_id(struct insn *insn)
+{
+       if (insn_is_avx(insn))
+               return insn_vex_p_bits(insn);   /* VEX_p is a SIMD prefix id */
+
+       if (insn->prefixes.bytes[3])
+               return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
+
+       return 0;
+}
+
 /* Offset of each field from kaddr */
 static inline int insn_offset_rex_prefix(struct insn *insn)
 {
index a32b18c..3a16c14 100644 (file)
@@ -9,12 +9,12 @@
 
 #define JUMP_LABEL_NOP_SIZE 5
 
-#define JUMP_LABEL_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
+#define STATIC_KEY_INITIAL_NOP ".byte 0xe9 \n\t .long 0\n\t"
 
-static __always_inline bool arch_static_branch(struct jump_label_key *key)
+static __always_inline bool arch_static_branch(struct static_key *key)
 {
        asm goto("1:"
-               JUMP_LABEL_INITIAL_NOP
+               STATIC_KEY_INITIAL_NOP
                ".pushsection __jump_table,  \"aw\" \n\t"
                _ASM_ALIGN "\n\t"
                _ASM_PTR "1b, %l[l_yes], %c0 \n\t"
index a7d2db9..c0180fd 100644 (file)
@@ -230,9 +230,9 @@ static inline unsigned long long paravirt_sched_clock(void)
        return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
 }
 
-struct jump_label_key;
-extern struct jump_label_key paravirt_steal_enabled;
-extern struct jump_label_key paravirt_steal_rq_enabled;
+struct static_key;
+extern struct static_key paravirt_steal_enabled;
+extern struct static_key paravirt_steal_rq_enabled;
 
 static inline u64 paravirt_steal_clock(int cpu)
 {
index 461ce43..e8fb2c7 100644 (file)
@@ -188,8 +188,6 @@ extern u32 get_ibs_caps(void);
 #ifdef CONFIG_PERF_EVENTS
 extern void perf_events_lapic_init(void);
 
-#define PERF_EVENT_INDEX_OFFSET                        0
-
 /*
  * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
  * This flag is otherwise unused and ABI specified to be 0, so nobody should
index 5369059..532d2e0 100644 (file)
@@ -69,6 +69,7 @@ obj-$(CONFIG_KEXEC)           += machine_kexec_$(BITS).o
 obj-$(CONFIG_KEXEC)            += relocate_kernel_$(BITS).o crash.o
 obj-$(CONFIG_CRASH_DUMP)       += crash_dump_$(BITS).o
 obj-$(CONFIG_KPROBES)          += kprobes.o
+obj-$(CONFIG_OPTPROBES)                += kprobes-opt.o
 obj-$(CONFIG_MODULES)          += module.o
 obj-$(CONFIG_DOUBLEFAULT)      += doublefault_32.o
 obj-$(CONFIG_KGDB)             += kgdb.o
index f4773f4..0a44b90 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/mm.h>
 
 #include <linux/io.h>
+#include <linux/sched.h>
 #include <asm/processor.h>
 #include <asm/apic.h>
 #include <asm/cpu.h>
@@ -456,6 +457,8 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
        if (c->x86_power & (1 << 8)) {
                set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
                set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
+               if (!check_tsc_unstable())
+                       sched_clock_stable = 1;
        }
 
 #ifdef CONFIG_X86_64
index 5adce10..f8bddb5 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/bitops.h>
+#include <linux/device.h>
 
 #include <asm/apic.h>
 #include <asm/stacktrace.h>
@@ -31,6 +32,7 @@
 #include <asm/compat.h>
 #include <asm/smp.h>
 #include <asm/alternative.h>
+#include <asm/timer.h>
 
 #include "perf_event.h"
 
@@ -1210,6 +1212,8 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
                break;
 
        case CPU_STARTING:
+               if (x86_pmu.attr_rdpmc)
+                       set_in_cr4(X86_CR4_PCE);
                if (x86_pmu.cpu_starting)
                        x86_pmu.cpu_starting(cpu);
                break;
@@ -1319,6 +1323,8 @@ static int __init init_hw_perf_events(void)
                }
        }
 
+       x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
+
        pr_info("... version:                %d\n",     x86_pmu.version);
        pr_info("... bit width:              %d\n",     x86_pmu.cntval_bits);
        pr_info("... generic registers:      %d\n",     x86_pmu.num_counters);
@@ -1542,10 +1548,71 @@ static int x86_pmu_event_init(struct perf_event *event)
        return err;
 }
 
+static int x86_pmu_event_idx(struct perf_event *event)
+{
+       int idx = event->hw.idx;
+
+       if (x86_pmu.num_counters_fixed && idx >= X86_PMC_IDX_FIXED) {
+               idx -= X86_PMC_IDX_FIXED;
+               idx |= 1 << 30;
+       }
+
+       return idx + 1;
+}
+
+static ssize_t get_attr_rdpmc(struct device *cdev,
+                             struct device_attribute *attr,
+                             char *buf)
+{
+       return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
+}
+
+static void change_rdpmc(void *info)
+{
+       bool enable = !!(unsigned long)info;
+
+       if (enable)
+               set_in_cr4(X86_CR4_PCE);
+       else
+               clear_in_cr4(X86_CR4_PCE);
+}
+
+static ssize_t set_attr_rdpmc(struct device *cdev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t count)
+{
+       unsigned long val = simple_strtoul(buf, NULL, 0);
+
+       if (!!val != !!x86_pmu.attr_rdpmc) {
+               x86_pmu.attr_rdpmc = !!val;
+               smp_call_function(change_rdpmc, (void *)val, 1);
+       }
+
+       return count;
+}
+
+static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
+
+static struct attribute *x86_pmu_attrs[] = {
+       &dev_attr_rdpmc.attr,
+       NULL,
+};
+
+static struct attribute_group x86_pmu_attr_group = {
+       .attrs = x86_pmu_attrs,
+};
+
+static const struct attribute_group *x86_pmu_attr_groups[] = {
+       &x86_pmu_attr_group,
+       NULL,
+};
+
 static struct pmu pmu = {
        .pmu_enable     = x86_pmu_enable,
        .pmu_disable    = x86_pmu_disable,
 
+       .attr_groups    = x86_pmu_attr_groups,
+
        .event_init     = x86_pmu_event_init,
 
        .add            = x86_pmu_add,
@@ -1557,8 +1624,23 @@ static struct pmu pmu = {
        .start_txn      = x86_pmu_start_txn,
        .cancel_txn     = x86_pmu_cancel_txn,
        .commit_txn     = x86_pmu_commit_txn,
+
+       .event_idx      = x86_pmu_event_idx,
 };
 
+void perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
+{
+       if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
+               return;
+
+       if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
+               return;
+
+       userpg->time_mult = this_cpu_read(cyc2ns);
+       userpg->time_shift = CYC2NS_SCALE_FACTOR;
+       userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
+}
+
 /*
  * callchain support
  */
index c30c807..82db83b 100644 (file)
@@ -309,6 +309,14 @@ struct x86_pmu {
        struct x86_pmu_quirk *quirks;
        int             perfctr_second_write;
 
+       /*
+        * sysfs attrs
+        */
+       int             attr_rdpmc;
+
+       /*
+        * CPU Hotplug hooks
+        */
        int             (*cpu_prepare)(int cpu);
        void            (*cpu_starting)(int cpu);
        void            (*cpu_dying)(int cpu);
diff --git a/arch/x86/kernel/kprobes-common.h b/arch/x86/kernel/kprobes-common.h
new file mode 100644 (file)
index 0000000..3230b68
--- /dev/null
@@ -0,0 +1,102 @@
+#ifndef __X86_KERNEL_KPROBES_COMMON_H
+#define __X86_KERNEL_KPROBES_COMMON_H
+
+/* Kprobes and Optprobes common header */
+
+#ifdef CONFIG_X86_64
+#define SAVE_REGS_STRING                       \
+       /* Skip cs, ip, orig_ax. */             \
+       "       subq $24, %rsp\n"               \
+       "       pushq %rdi\n"                   \
+       "       pushq %rsi\n"                   \
+       "       pushq %rdx\n"                   \
+       "       pushq %rcx\n"                   \
+       "       pushq %rax\n"                   \
+       "       pushq %r8\n"                    \
+       "       pushq %r9\n"                    \
+       "       pushq %r10\n"                   \
+       "       pushq %r11\n"                   \
+       "       pushq %rbx\n"                   \
+       "       pushq %rbp\n"                   \
+       "       pushq %r12\n"                   \
+       "       pushq %r13\n"                   \
+       "       pushq %r14\n"                   \
+       "       pushq %r15\n"
+#define RESTORE_REGS_STRING                    \
+       "       popq %r15\n"                    \
+       "       popq %r14\n"                    \
+       "       popq %r13\n"                    \
+       "       popq %r12\n"                    \
+       "       popq %rbp\n"                    \
+       "       popq %rbx\n"                    \
+       "       popq %r11\n"                    \
+       "       popq %r10\n"                    \
+       "       popq %r9\n"                     \
+       "       popq %r8\n"                     \
+       "       popq %rax\n"                    \
+       "       popq %rcx\n"                    \
+       "       popq %rdx\n"                    \
+       "       popq %rsi\n"                    \
+       "       popq %rdi\n"                    \
+       /* Skip orig_ax, ip, cs */              \
+       "       addq $24, %rsp\n"
+#else
+#define SAVE_REGS_STRING                       \
+       /* Skip cs, ip, orig_ax and gs. */      \
+       "       subl $16, %esp\n"               \
+       "       pushl %fs\n"                    \
+       "       pushl %es\n"                    \
+       "       pushl %ds\n"                    \
+       "       pushl %eax\n"                   \
+       "       pushl %ebp\n"                   \
+       "       pushl %edi\n"                   \
+       "       pushl %esi\n"                   \
+       "       pushl %edx\n"                   \
+       "       pushl %ecx\n"                   \
+       "       pushl %ebx\n"
+#define RESTORE_REGS_STRING                    \
+       "       popl %ebx\n"                    \
+       "       popl %ecx\n"                    \
+       "       popl %edx\n"                    \
+       "       popl %esi\n"                    \
+       "       popl %edi\n"                    \
+       "       popl %ebp\n"                    \
+       "       popl %eax\n"                    \
+       /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
+       "       addl $24, %esp\n"
+#endif
+
+/* Ensure if the instruction can be boostable */
+extern int can_boost(kprobe_opcode_t *instruction);
+/* Recover instruction if given address is probed */
+extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
+                                        unsigned long addr);
+/*
+ * Copy an instruction and adjust the displacement if the instruction
+ * uses the %rip-relative addressing mode.
+ */
+extern int __copy_instruction(u8 *dest, u8 *src);
+
+/* Generate a relative-jump/call instruction */
+extern void synthesize_reljump(void *from, void *to);
+extern void synthesize_relcall(void *from, void *to);
+
+#ifdef CONFIG_OPTPROBES
+extern int arch_init_optprobes(void);
+extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter);
+extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr);
+#else  /* !CONFIG_OPTPROBES */
+static inline int arch_init_optprobes(void)
+{
+       return 0;
+}
+static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
+{
+       return 0;
+}
+static inline unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
+{
+       return addr;
+}
+#endif
+#endif
diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
new file mode 100644 (file)
index 0000000..c5e410e
--- /dev/null
@@ -0,0 +1,512 @@
+/*
+ *  Kernel Probes Jump Optimization (Optprobes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright (C) Hitachi Ltd., 2012
+ */
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/hardirq.h>
+#include <linux/preempt.h>
+#include <linux/module.h>
+#include <linux/kdebug.h>
+#include <linux/kallsyms.h>
+#include <linux/ftrace.h>
+
+#include <asm/cacheflush.h>
+#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+#include <asm/alternative.h>
+#include <asm/insn.h>
+#include <asm/debugreg.h>
+
+#include "kprobes-common.h"
+
+unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr)
+{
+       struct optimized_kprobe *op;
+       struct kprobe *kp;
+       long offs;
+       int i;
+
+       for (i = 0; i < RELATIVEJUMP_SIZE; i++) {
+               kp = get_kprobe((void *)addr - i);
+               /* This function only handles jump-optimized kprobe */
+               if (kp && kprobe_optimized(kp)) {
+                       op = container_of(kp, struct optimized_kprobe, kp);
+                       /* If op->list is not empty, op is under optimizing */
+                       if (list_empty(&op->list))
+                               goto found;
+               }
+       }
+
+       return addr;
+found:
+       /*
+        * If the kprobe can be optimized, original bytes which can be
+        * overwritten by jump destination address. In this case, original
+        * bytes must be recovered from op->optinsn.copied_insn buffer.
+        */
+       memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+       if (addr == (unsigned long)kp->addr) {
+               buf[0] = kp->opcode;
+               memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
+       } else {
+               offs = addr - (unsigned long)kp->addr - 1;
+               memcpy(buf, op->optinsn.copied_insn + offs, RELATIVE_ADDR_SIZE - offs);
+       }
+
+       return (unsigned long)buf;
+}
+
+/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
+static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
+{
+#ifdef CONFIG_X86_64
+       *addr++ = 0x48;
+       *addr++ = 0xbf;
+#else
+       *addr++ = 0xb8;
+#endif
+       *(unsigned long *)addr = val;
+}
+
+static void __used __kprobes kprobes_optinsn_template_holder(void)
+{
+       asm volatile (
+                       ".global optprobe_template_entry\n"
+                       "optprobe_template_entry:\n"
+#ifdef CONFIG_X86_64
+                       /* We don't bother saving the ss register */
+                       "       pushq %rsp\n"
+                       "       pushfq\n"
+                       SAVE_REGS_STRING
+                       "       movq %rsp, %rsi\n"
+                       ".global optprobe_template_val\n"
+                       "optprobe_template_val:\n"
+                       ASM_NOP5
+                       ASM_NOP5
+                       ".global optprobe_template_call\n"
+                       "optprobe_template_call:\n"
+                       ASM_NOP5
+                       /* Move flags to rsp */
+                       "       movq 144(%rsp), %rdx\n"
+                       "       movq %rdx, 152(%rsp)\n"
+                       RESTORE_REGS_STRING
+                       /* Skip flags entry */
+                       "       addq $8, %rsp\n"
+                       "       popfq\n"
+#else /* CONFIG_X86_32 */
+                       "       pushf\n"
+                       SAVE_REGS_STRING
+                       "       movl %esp, %edx\n"
+                       ".global optprobe_template_val\n"
+                       "optprobe_template_val:\n"
+                       ASM_NOP5
+                       ".global optprobe_template_call\n"
+                       "optprobe_template_call:\n"
+                       ASM_NOP5
+                       RESTORE_REGS_STRING
+                       "       addl $4, %esp\n"        /* skip cs */
+                       "       popf\n"
+#endif
+                       ".global optprobe_template_end\n"
+                       "optprobe_template_end:\n");
+}
+
+#define TMPL_MOVE_IDX \
+       ((long)&optprobe_template_val - (long)&optprobe_template_entry)
+#define TMPL_CALL_IDX \
+       ((long)&optprobe_template_call - (long)&optprobe_template_entry)
+#define TMPL_END_IDX \
+       ((long)&optprobe_template_end - (long)&optprobe_template_entry)
+
+#define INT3_SIZE sizeof(kprobe_opcode_t)
+
+/* Optimized kprobe call back function: called from optinsn */
+static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
+{
+       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       unsigned long flags;
+
+       /* This is possible if op is under delayed unoptimizing */
+       if (kprobe_disabled(&op->kp))
+               return;
+
+       local_irq_save(flags);
+       if (kprobe_running()) {
+               kprobes_inc_nmissed_count(&op->kp);
+       } else {
+               /* Save skipped registers */
+#ifdef CONFIG_X86_64
+               regs->cs = __KERNEL_CS;
+#else
+               regs->cs = __KERNEL_CS | get_kernel_rpl();
+               regs->gs = 0;
+#endif
+               regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
+               regs->orig_ax = ~0UL;
+
+               __this_cpu_write(current_kprobe, &op->kp);
+               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+               opt_pre_handler(&op->kp, regs);
+               __this_cpu_write(current_kprobe, NULL);
+       }
+       local_irq_restore(flags);
+}
+
+static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
+{
+       int len = 0, ret;
+
+       while (len < RELATIVEJUMP_SIZE) {
+               ret = __copy_instruction(dest + len, src + len);
+               if (!ret || !can_boost(dest + len))
+                       return -EINVAL;
+               len += ret;
+       }
+       /* Check whether the address range is reserved */
+       if (ftrace_text_reserved(src, src + len - 1) ||
+           alternatives_text_reserved(src, src + len - 1) ||
+           jump_label_text_reserved(src, src + len - 1))
+               return -EBUSY;
+
+       return len;
+}
+
+/* Check whether insn is indirect jump */
+static int __kprobes insn_is_indirect_jump(struct insn *insn)
+{
+       return ((insn->opcode.bytes[0] == 0xff &&
+               (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
+               insn->opcode.bytes[0] == 0xea); /* Segment based jump */
+}
+
+/* Check whether insn jumps into specified address range */
+static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
+{
+       unsigned long target = 0;
+
+       switch (insn->opcode.bytes[0]) {
+       case 0xe0:      /* loopne */
+       case 0xe1:      /* loope */
+       case 0xe2:      /* loop */
+       case 0xe3:      /* jcxz */
+       case 0xe9:      /* near relative jump */
+       case 0xeb:      /* short relative jump */
+               break;
+       case 0x0f:
+               if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
+                       break;
+               return 0;
+       default:
+               if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
+                       break;
+               return 0;
+       }
+       target = (unsigned long)insn->next_byte + insn->immediate.value;
+
+       return (start <= target && target <= start + len);
+}
+
+/* Decode whole function to ensure any instructions don't jump into target */
+static int __kprobes can_optimize(unsigned long paddr)
+{
+       unsigned long addr, size = 0, offset = 0;
+       struct insn insn;
+       kprobe_opcode_t buf[MAX_INSN_SIZE];
+
+       /* Lookup symbol including addr */
+       if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
+               return 0;
+
+       /*
+        * Do not optimize in the entry code due to the unstable
+        * stack handling.
+        */
+       if ((paddr >= (unsigned long)__entry_text_start) &&
+           (paddr <  (unsigned long)__entry_text_end))
+               return 0;
+
+       /* Check there is enough space for a relative jump. */
+       if (size - offset < RELATIVEJUMP_SIZE)
+               return 0;
+
+       /* Decode instructions */
+       addr = paddr - offset;
+       while (addr < paddr - offset + size) { /* Decode until function end */
+               if (search_exception_tables(addr))
+                       /*
+                        * Since some fixup code will jumps into this function,
+                        * we can't optimize kprobe in this function.
+                        */
+                       return 0;
+               kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, addr));
+               insn_get_length(&insn);
+               /* Another subsystem puts a breakpoint */
+               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+                       return 0;
+               /* Recover address */
+               insn.kaddr = (void *)addr;
+               insn.next_byte = (void *)(addr + insn.length);
+               /* Check any instructions don't jump into target */
+               if (insn_is_indirect_jump(&insn) ||
+                   insn_jump_into_range(&insn, paddr + INT3_SIZE,
+                                        RELATIVE_ADDR_SIZE))
+                       return 0;
+               addr += insn.length;
+       }
+
+       return 1;
+}
+
+/* Check optimized_kprobe can actually be optimized. */
+int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
+{
+       int i;
+       struct kprobe *p;
+
+       for (i = 1; i < op->optinsn.size; i++) {
+               p = get_kprobe(op->kp.addr + i);
+               if (p && !kprobe_disabled(p))
+                       return -EEXIST;
+       }
+
+       return 0;
+}
+
+/* Check the addr is within the optimized instructions. */
+int __kprobes
+arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr)
+{
+       return ((unsigned long)op->kp.addr <= addr &&
+               (unsigned long)op->kp.addr + op->optinsn.size > addr);
+}
+
+/* Free optimized instruction slot */
+static __kprobes
+void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
+{
+       if (op->optinsn.insn) {
+               free_optinsn_slot(op->optinsn.insn, dirty);
+               op->optinsn.insn = NULL;
+               op->optinsn.size = 0;
+       }
+}
+
+void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
+{
+       __arch_remove_optimized_kprobe(op, 1);
+}
+
+/*
+ * Copy replacing target instructions
+ * Target instructions MUST be relocatable (checked inside)
+ * This is called when new aggr(opt)probe is allocated or reused.
+ */
+int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
+{
+       u8 *buf;
+       int ret;
+       long rel;
+
+       if (!can_optimize((unsigned long)op->kp.addr))
+               return -EILSEQ;
+
+       op->optinsn.insn = get_optinsn_slot();
+       if (!op->optinsn.insn)
+               return -ENOMEM;
+
+       /*
+        * Verify if the address gap is in 2GB range, because this uses
+        * a relative jump.
+        */
+       rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
+       if (abs(rel) > 0x7fffffff)
+               return -ERANGE;
+
+       buf = (u8 *)op->optinsn.insn;
+
+       /* Copy instructions into the out-of-line buffer */
+       ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
+       if (ret < 0) {
+               __arch_remove_optimized_kprobe(op, 0);
+               return ret;
+       }
+       op->optinsn.size = ret;
+
+       /* Copy arch-dep-instance from template */
+       memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
+
+       /* Set probe information */
+       synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
+
+       /* Set probe function call */
+       synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
+
+       /* Set returning jmp instruction at the tail of out-of-line buffer */
+       synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
+                          (u8 *)op->kp.addr + op->optinsn.size);
+
+       flush_icache_range((unsigned long) buf,
+                          (unsigned long) buf + TMPL_END_IDX +
+                          op->optinsn.size + RELATIVEJUMP_SIZE);
+       return 0;
+}
+
+#define MAX_OPTIMIZE_PROBES 256
+static struct text_poke_param *jump_poke_params;
+static struct jump_poke_buffer {
+       u8 buf[RELATIVEJUMP_SIZE];
+} *jump_poke_bufs;
+
+static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
+                                           u8 *insn_buf,
+                                           struct optimized_kprobe *op)
+{
+       s32 rel = (s32)((long)op->optinsn.insn -
+                       ((long)op->kp.addr + RELATIVEJUMP_SIZE));
+
+       /* Backup instructions which will be replaced by jump address */
+       memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
+              RELATIVE_ADDR_SIZE);
+
+       insn_buf[0] = RELATIVEJUMP_OPCODE;
+       *(s32 *)(&insn_buf[1]) = rel;
+
+       tprm->addr = op->kp.addr;
+       tprm->opcode = insn_buf;
+       tprm->len = RELATIVEJUMP_SIZE;
+}
+
+/*
+ * Replace breakpoints (int3) with relative jumps.
+ * Caller must call with locking kprobe_mutex and text_mutex.
+ */
+void __kprobes arch_optimize_kprobes(struct list_head *oplist)
+{
+       struct optimized_kprobe *op, *tmp;
+       int c = 0;
+
+       list_for_each_entry_safe(op, tmp, oplist, list) {
+               WARN_ON(kprobe_disabled(&op->kp));
+               /* Setup param */
+               setup_optimize_kprobe(&jump_poke_params[c],
+                                     jump_poke_bufs[c].buf, op);
+               list_del_init(&op->list);
+               if (++c >= MAX_OPTIMIZE_PROBES)
+                       break;
+       }
+
+       /*
+        * text_poke_smp doesn't support NMI/MCE code modifying.
+        * However, since kprobes itself also doesn't support NMI/MCE
+        * code probing, it's not a problem.
+        */
+       text_poke_smp_batch(jump_poke_params, c);
+}
+
+static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm,
+                                             u8 *insn_buf,
+                                             struct optimized_kprobe *op)
+{
+       /* Set int3 to first byte for kprobes */
+       insn_buf[0] = BREAKPOINT_INSTRUCTION;
+       memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
+
+       tprm->addr = op->kp.addr;
+       tprm->opcode = insn_buf;
+       tprm->len = RELATIVEJUMP_SIZE;
+}
+
+/*
+ * Recover original instructions and breakpoints from relative jumps.
+ * Caller must call with locking kprobe_mutex.
+ */
+extern void arch_unoptimize_kprobes(struct list_head *oplist,
+                                   struct list_head *done_list)
+{
+       struct optimized_kprobe *op, *tmp;
+       int c = 0;
+
+       list_for_each_entry_safe(op, tmp, oplist, list) {
+               /* Setup param */
+               setup_unoptimize_kprobe(&jump_poke_params[c],
+                                       jump_poke_bufs[c].buf, op);
+               list_move(&op->list, done_list);
+               if (++c >= MAX_OPTIMIZE_PROBES)
+                       break;
+       }
+
+       /*
+        * text_poke_smp doesn't support NMI/MCE code modifying.
+        * However, since kprobes itself also doesn't support NMI/MCE
+        * code probing, it's not a problem.
+        */
+       text_poke_smp_batch(jump_poke_params, c);
+}
+
+/* Replace a relative jump with a breakpoint (int3).  */
+void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
+{
+       u8 buf[RELATIVEJUMP_SIZE];
+
+       /* Set int3 to first byte for kprobes */
+       buf[0] = BREAKPOINT_INSTRUCTION;
+       memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
+       text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
+}
+
+int  __kprobes
+setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
+{
+       struct optimized_kprobe *op;
+
+       if (p->flags & KPROBE_FLAG_OPTIMIZED) {
+               /* This kprobe is really able to run optimized path. */
+               op = container_of(p, struct optimized_kprobe, kp);
+               /* Detour through copied instructions */
+               regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
+               if (!reenter)
+                       reset_current_kprobe();
+               preempt_enable_no_resched();
+               return 1;
+       }
+       return 0;
+}
+
+int __kprobes arch_init_optprobes(void)
+{
+       /* Allocate code buffer and parameter array */
+       jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
+                                MAX_OPTIMIZE_PROBES, GFP_KERNEL);
+       if (!jump_poke_bufs)
+               return -ENOMEM;
+
+       jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
+                                  MAX_OPTIMIZE_PROBES, GFP_KERNEL);
+       if (!jump_poke_params) {
+               kfree(jump_poke_bufs);
+               jump_poke_bufs = NULL;
+               return -ENOMEM;
+       }
+
+       return 0;
+}
index 7da647d..e213fc8 100644 (file)
  *             <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  *             <prasanna@in.ibm.com> added function-return probes.
  * 2005-May    Rusty Lynch <rusty.lynch@intel.com>
- *             Added function return probes functionality
+ *             Added function return probes functionality
  * 2006-Feb    Masami Hiramatsu <hiramatu@sdl.hitachi.co.jp> added
- *             kprobe-booster and kretprobe-booster for i386.
+ *             kprobe-booster and kretprobe-booster for i386.
  * 2007-Dec    Masami Hiramatsu <mhiramat@redhat.com> added kprobe-booster
- *             and kretprobe-booster for x86-64
+ *             and kretprobe-booster for x86-64
  * 2007-Dec    Masami Hiramatsu <mhiramat@redhat.com>, Arjan van de Ven
- *             <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
- *             unified x86 kprobes code.
+ *             <arjan@infradead.org> and Jim Keniston <jkenisto@us.ibm.com>
+ *             unified x86 kprobes code.
  */
-
 #include <linux/kprobes.h>
 #include <linux/ptrace.h>
 #include <linux/string.h>
@@ -59,6 +58,8 @@
 #include <asm/insn.h>
 #include <asm/debugreg.h>
 
+#include "kprobes-common.h"
+
 void jprobe_return_end(void);
 
 DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
@@ -108,6 +109,7 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = {
                              doesn't switch kernel stack.*/
        {NULL, NULL}    /* Terminator */
 };
+
 const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist);
 
 static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
@@ -123,11 +125,17 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
 }
 
 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
-static void __kprobes synthesize_reljump(void *from, void *to)
+void __kprobes synthesize_reljump(void *from, void *to)
 {
        __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE);
 }
 
+/* Insert a call instruction at address 'from', which calls address 'to'.*/
+void __kprobes synthesize_relcall(void *from, void *to)
+{
+       __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
+}
+
 /*
  * Skip the prefixes of the instruction.
  */
@@ -151,7 +159,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn)
  * Returns non-zero if opcode is boostable.
  * RIP relative instructions are adjusted at copying time in 64 bits mode
  */
-static int __kprobes can_boost(kprobe_opcode_t *opcodes)
+int __kprobes can_boost(kprobe_opcode_t *opcodes)
 {
        kprobe_opcode_t opcode;
        kprobe_opcode_t *orig_opcodes = opcodes;
@@ -207,13 +215,15 @@ retry:
        }
 }
 
-/* Recover the probed instruction at addr for further analysis. */
-static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
+static unsigned long
+__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
 {
        struct kprobe *kp;
+
        kp = get_kprobe((void *)addr);
+       /* There is no probe, return original address */
        if (!kp)
-               return -EINVAL;
+               return addr;
 
        /*
         *  Basically, kp->ainsn.insn has an original instruction.
@@ -230,14 +240,29 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
         */
        memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
        buf[0] = kp->opcode;
-       return 0;
+       return (unsigned long)buf;
+}
+
+/*
+ * Recover the probed instruction at addr for further analysis.
+ * Caller must lock kprobes by kprobe_mutex, or disable preemption
+ * for preventing to release referencing kprobes.
+ */
+unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
+{
+       unsigned long __addr;
+
+       __addr = __recover_optprobed_insn(buf, addr);
+       if (__addr != addr)
+               return __addr;
+
+       return __recover_probed_insn(buf, addr);
 }
 
 /* Check if paddr is at an instruction boundary */
 static int __kprobes can_probe(unsigned long paddr)
 {
-       int ret;
-       unsigned long addr, offset = 0;
+       unsigned long addr, __addr, offset = 0;
        struct insn insn;
        kprobe_opcode_t buf[MAX_INSN_SIZE];
 
@@ -247,26 +272,24 @@ static int __kprobes can_probe(unsigned long paddr)
        /* Decode instructions */
        addr = paddr - offset;
        while (addr < paddr) {
-               kernel_insn_init(&insn, (void *)addr);
-               insn_get_opcode(&insn);
-
                /*
                 * Check if the instruction has been modified by another
                 * kprobe, in which case we replace the breakpoint by the
                 * original instruction in our buffer.
+                * Also, jump optimization will change the breakpoint to
+                * relative-jump. Since the relative-jump itself is
+                * normally used, we just go through if there is no kprobe.
                 */
-               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
-                       ret = recover_probed_instruction(buf, addr);
-                       if (ret)
-                               /*
-                                * Another debugging subsystem might insert
-                                * this breakpoint. In that case, we can't
-                                * recover it.
-                                */
-                               return 0;
-                       kernel_insn_init(&insn, buf);
-               }
+               __addr = recover_probed_instruction(buf, addr);
+               kernel_insn_init(&insn, (void *)__addr);
                insn_get_length(&insn);
+
+               /*
+                * Another debugging subsystem might insert this breakpoint.
+                * In that case, we can't recover it.
+                */
+               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+                       return 0;
                addr += insn.length;
        }
 
@@ -299,24 +322,16 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn)
  * If not, return null.
  * Only applicable to 64-bit x86.
  */
-static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
+int __kprobes __copy_instruction(u8 *dest, u8 *src)
 {
        struct insn insn;
-       int ret;
        kprobe_opcode_t buf[MAX_INSN_SIZE];
 
-       kernel_insn_init(&insn, src);
-       if (recover) {
-               insn_get_opcode(&insn);
-               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
-                       ret = recover_probed_instruction(buf,
-                                                        (unsigned long)src);
-                       if (ret)
-                               return 0;
-                       kernel_insn_init(&insn, buf);
-               }
-       }
+       kernel_insn_init(&insn, (void *)recover_probed_instruction(buf, (unsigned long)src));
        insn_get_length(&insn);
+       /* Another subsystem puts a breakpoint, failed to recover */
+       if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
+               return 0;
        memcpy(dest, insn.kaddr, insn.length);
 
 #ifdef CONFIG_X86_64
@@ -337,8 +352,7 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
                 * extension of the original signed 32-bit displacement would
                 * have given.
                 */
-               newdisp = (u8 *) src + (s64) insn.displacement.value -
-                         (u8 *) dest;
+               newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
                BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check.  */
                disp = (u8 *) dest + insn_offset_displacement(&insn);
                *(s32 *) disp = (s32) newdisp;
@@ -349,18 +363,20 @@ static int __kprobes __copy_instruction(u8 *dest, u8 *src, int recover)
 
 static void __kprobes arch_copy_kprobe(struct kprobe *p)
 {
+       /* Copy an instruction with recovering if other optprobe modifies it.*/
+       __copy_instruction(p->ainsn.insn, p->addr);
+
        /*
-        * Copy an instruction without recovering int3, because it will be
-        * put by another subsystem.
+        * __copy_instruction can modify the displacement of the instruction,
+        * but it doesn't affect boostable check.
         */
-       __copy_instruction(p->ainsn.insn, p->addr, 0);
-
-       if (can_boost(p->addr))
+       if (can_boost(p->ainsn.insn))
                p->ainsn.boostable = 0;
        else
                p->ainsn.boostable = -1;
 
-       p->opcode = *p->addr;
+       /* Also, displacement change doesn't affect the first byte */
+       p->opcode = p->ainsn.insn[0];
 }
 
 int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -442,8 +458,8 @@ static void __kprobes restore_btf(void)
        }
 }
 
-void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
-                                     struct pt_regs *regs)
+void __kprobes
+arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
 {
        unsigned long *sara = stack_addr(regs);
 
@@ -453,16 +469,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
        *sara = (unsigned long) &kretprobe_trampoline;
 }
 
-#ifdef CONFIG_OPTPROBES
-static int  __kprobes setup_detour_execution(struct kprobe *p,
-                                            struct pt_regs *regs,
-                                            int reenter);
-#else
-#define setup_detour_execution(p, regs, reenter) (0)
-#endif
-
-static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
-                                      struct kprobe_ctlblk *kcb, int reenter)
+static void __kprobes
+setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter)
 {
        if (setup_detour_execution(p, regs, reenter))
                return;
@@ -504,8 +512,8 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
  * within the handler. We save the original kprobes variables and just single
  * step on the instruction of the new probe without calling any user handlers.
  */
-static int __kprobes reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
-                                   struct kprobe_ctlblk *kcb)
+static int __kprobes
+reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
 {
        switch (kcb->kprobe_status) {
        case KPROBE_HIT_SSDONE:
@@ -600,69 +608,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
        return 0;
 }
 
-#ifdef CONFIG_X86_64
-#define SAVE_REGS_STRING               \
-       /* Skip cs, ip, orig_ax. */     \
-       "       subq $24, %rsp\n"       \
-       "       pushq %rdi\n"           \
-       "       pushq %rsi\n"           \
-       "       pushq %rdx\n"           \
-       "       pushq %rcx\n"           \
-       "       pushq %rax\n"           \
-       "       pushq %r8\n"            \
-       "       pushq %r9\n"            \
-       "       pushq %r10\n"           \
-       "       pushq %r11\n"           \
-       "       pushq %rbx\n"           \
-       "       pushq %rbp\n"           \
-       "       pushq %r12\n"           \
-       "       pushq %r13\n"           \
-       "       pushq %r14\n"           \
-       "       pushq %r15\n"
-#define RESTORE_REGS_STRING            \
-       "       popq %r15\n"            \
-       "       popq %r14\n"            \
-       "       popq %r13\n"            \
-       "       popq %r12\n"            \
-       "       popq %rbp\n"            \
-       "       popq %rbx\n"            \
-       "       popq %r11\n"            \
-       "       popq %r10\n"            \
-       "       popq %r9\n"             \
-       "       popq %r8\n"             \
-       "       popq %rax\n"            \
-       "       popq %rcx\n"            \
-       "       popq %rdx\n"            \
-       "       popq %rsi\n"            \
-       "       popq %rdi\n"            \
-       /* Skip orig_ax, ip, cs */      \
-       "       addq $24, %rsp\n"
-#else
-#define SAVE_REGS_STRING               \
-       /* Skip cs, ip, orig_ax and gs. */      \
-       "       subl $16, %esp\n"       \
-       "       pushl %fs\n"            \
-       "       pushl %es\n"            \
-       "       pushl %ds\n"            \
-       "       pushl %eax\n"           \
-       "       pushl %ebp\n"           \
-       "       pushl %edi\n"           \
-       "       pushl %esi\n"           \
-       "       pushl %edx\n"           \
-       "       pushl %ecx\n"           \
-       "       pushl %ebx\n"
-#define RESTORE_REGS_STRING            \
-       "       popl %ebx\n"            \
-       "       popl %ecx\n"            \
-       "       popl %edx\n"            \
-       "       popl %esi\n"            \
-       "       popl %edi\n"            \
-       "       popl %ebp\n"            \
-       "       popl %eax\n"            \
-       /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
-       "       addl $24, %esp\n"
-#endif
-
 /*
  * When a retprobed function returns, this code saves registers and
  * calls trampoline_handler() runs, which calls the kretprobe's handler.
@@ -816,8 +761,8 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
  * jump instruction after the copied instruction, that jumps to the next
  * instruction after the probepoint.
  */
-static void __kprobes resume_execution(struct kprobe *p,
-               struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+static void __kprobes
+resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
 {
        unsigned long *tos = stack_addr(regs);
        unsigned long copy_ip = (unsigned long)p->ainsn.insn;
@@ -996,8 +941,8 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
 /*
  * Wrapper routine for handling exceptions.
  */
-int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
-                                      unsigned long val, void *data)
+int __kprobes
+kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data)
 {
        struct die_args *args = data;
        int ret = NOTIFY_DONE;
@@ -1107,466 +1052,9 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
        return 0;
 }
 
-
-#ifdef CONFIG_OPTPROBES
-
-/* Insert a call instruction at address 'from', which calls address 'to'.*/
-static void __kprobes synthesize_relcall(void *from, void *to)
-{
-       __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE);
-}
-
-/* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
-static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
-                                         unsigned long val)
-{
-#ifdef CONFIG_X86_64
-       *addr++ = 0x48;
-       *addr++ = 0xbf;
-#else
-       *addr++ = 0xb8;
-#endif
-       *(unsigned long *)addr = val;
-}
-
-static void __used __kprobes kprobes_optinsn_template_holder(void)
-{
-       asm volatile (
-                       ".global optprobe_template_entry\n"
-                       "optprobe_template_entry: \n"
-#ifdef CONFIG_X86_64
-                       /* We don't bother saving the ss register */
-                       "       pushq %rsp\n"
-                       "       pushfq\n"
-                       SAVE_REGS_STRING
-                       "       movq %rsp, %rsi\n"
-                       ".global optprobe_template_val\n"
-                       "optprobe_template_val: \n"
-                       ASM_NOP5
-                       ASM_NOP5
-                       ".global optprobe_template_call\n"
-                       "optprobe_template_call: \n"
-                       ASM_NOP5
-                       /* Move flags to rsp */
-                       "       movq 144(%rsp), %rdx\n"
-                       "       movq %rdx, 152(%rsp)\n"
-                       RESTORE_REGS_STRING
-                       /* Skip flags entry */
-                       "       addq $8, %rsp\n"
-                       "       popfq\n"
-#else /* CONFIG_X86_32 */
-                       "       pushf\n"
-                       SAVE_REGS_STRING
-                       "       movl %esp, %edx\n"
-                       ".global optprobe_template_val\n"
-                       "optprobe_template_val: \n"
-                       ASM_NOP5
-                       ".global optprobe_template_call\n"
-                       "optprobe_template_call: \n"
-                       ASM_NOP5
-                       RESTORE_REGS_STRING
-                       "       addl $4, %esp\n"        /* skip cs */
-                       "       popf\n"
-#endif
-                       ".global optprobe_template_end\n"
-                       "optprobe_template_end: \n");
-}
-
-#define TMPL_MOVE_IDX \
-       ((long)&optprobe_template_val - (long)&optprobe_template_entry)
-#define TMPL_CALL_IDX \
-       ((long)&optprobe_template_call - (long)&optprobe_template_entry)
-#define TMPL_END_IDX \
-       ((long)&optprobe_template_end - (long)&optprobe_template_entry)
-
-#define INT3_SIZE sizeof(kprobe_opcode_t)
-
-/* Optimized kprobe call back function: called from optinsn */
-static void __kprobes optimized_callback(struct optimized_kprobe *op,
-                                        struct pt_regs *regs)
-{
-       struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
-       unsigned long flags;
-
-       /* This is possible if op is under delayed unoptimizing */
-       if (kprobe_disabled(&op->kp))
-               return;
-
-       local_irq_save(flags);
-       if (kprobe_running()) {
-               kprobes_inc_nmissed_count(&op->kp);
-       } else {
-               /* Save skipped registers */
-#ifdef CONFIG_X86_64
-               regs->cs = __KERNEL_CS;
-#else
-               regs->cs = __KERNEL_CS | get_kernel_rpl();
-               regs->gs = 0;
-#endif
-               regs->ip = (unsigned long)op->kp.addr + INT3_SIZE;
-               regs->orig_ax = ~0UL;
-
-               __this_cpu_write(current_kprobe, &op->kp);
-               kcb->kprobe_status = KPROBE_HIT_ACTIVE;
-               opt_pre_handler(&op->kp, regs);
-               __this_cpu_write(current_kprobe, NULL);
-       }
-       local_irq_restore(flags);
-}
-
-static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
-{
-       int len = 0, ret;
-
-       while (len < RELATIVEJUMP_SIZE) {
-               ret = __copy_instruction(dest + len, src + len, 1);
-               if (!ret || !can_boost(dest + len))
-                       return -EINVAL;
-               len += ret;
-       }
-       /* Check whether the address range is reserved */
-       if (ftrace_text_reserved(src, src + len - 1) ||
-           alternatives_text_reserved(src, src + len - 1) ||
-           jump_label_text_reserved(src, src + len - 1))
-               return -EBUSY;
-
-       return len;
-}
-
-/* Check whether insn is indirect jump */
-static int __kprobes insn_is_indirect_jump(struct insn *insn)
-{
-       return ((insn->opcode.bytes[0] == 0xff &&
-               (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */
-               insn->opcode.bytes[0] == 0xea); /* Segment based jump */
-}
-
-/* Check whether insn jumps into specified address range */
-static int insn_jump_into_range(struct insn *insn, unsigned long start, int len)
-{
-       unsigned long target = 0;
-
-       switch (insn->opcode.bytes[0]) {
-       case 0xe0:      /* loopne */
-       case 0xe1:      /* loope */
-       case 0xe2:      /* loop */
-       case 0xe3:      /* jcxz */
-       case 0xe9:      /* near relative jump */
-       case 0xeb:      /* short relative jump */
-               break;
-       case 0x0f:
-               if ((insn->opcode.bytes[1] & 0xf0) == 0x80) /* jcc near */
-                       break;
-               return 0;
-       default:
-               if ((insn->opcode.bytes[0] & 0xf0) == 0x70) /* jcc short */
-                       break;
-               return 0;
-       }
-       target = (unsigned long)insn->next_byte + insn->immediate.value;
-
-       return (start <= target && target <= start + len);
-}
-
-/* Decode whole function to ensure any instructions don't jump into target */
-static int __kprobes can_optimize(unsigned long paddr)
-{
-       int ret;
-       unsigned long addr, size = 0, offset = 0;
-       struct insn insn;
-       kprobe_opcode_t buf[MAX_INSN_SIZE];
-
-       /* Lookup symbol including addr */
-       if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
-               return 0;
-
-       /*
-        * Do not optimize in the entry code due to the unstable
-        * stack handling.
-        */
-       if ((paddr >= (unsigned long )__entry_text_start) &&
-           (paddr <  (unsigned long )__entry_text_end))
-               return 0;
-
-       /* Check there is enough space for a relative jump. */
-       if (size - offset < RELATIVEJUMP_SIZE)
-               return 0;
-
-       /* Decode instructions */
-       addr = paddr - offset;
-       while (addr < paddr - offset + size) { /* Decode until function end */
-               if (search_exception_tables(addr))
-                       /*
-                        * Since some fixup code will jumps into this function,
-                        * we can't optimize kprobe in this function.
-                        */
-                       return 0;
-               kernel_insn_init(&insn, (void *)addr);
-               insn_get_opcode(&insn);
-               if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION) {
-                       ret = recover_probed_instruction(buf, addr);
-                       if (ret)
-                               return 0;
-                       kernel_insn_init(&insn, buf);
-               }
-               insn_get_length(&insn);
-               /* Recover address */
-               insn.kaddr = (void *)addr;
-               insn.next_byte = (void *)(addr + insn.length);
-               /* Check any instructions don't jump into target */
-               if (insn_is_indirect_jump(&insn) ||
-                   insn_jump_into_range(&insn, paddr + INT3_SIZE,
-                                        RELATIVE_ADDR_SIZE))
-                       return 0;
-               addr += insn.length;
-       }
-
-       return 1;
-}
-
-/* Check optimized_kprobe can actually be optimized. */
-int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op)
-{
-       int i;
-       struct kprobe *p;
-
-       for (i = 1; i < op->optinsn.size; i++) {
-               p = get_kprobe(op->kp.addr + i);
-               if (p && !kprobe_disabled(p))
-                       return -EEXIST;
-       }
-
-       return 0;
-}
-
-/* Check the addr is within the optimized instructions. */
-int __kprobes arch_within_optimized_kprobe(struct optimized_kprobe *op,
-                                          unsigned long addr)
-{
-       return ((unsigned long)op->kp.addr <= addr &&
-               (unsigned long)op->kp.addr + op->optinsn.size > addr);
-}
-
-/* Free optimized instruction slot */
-static __kprobes
-void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
-{
-       if (op->optinsn.insn) {
-               free_optinsn_slot(op->optinsn.insn, dirty);
-               op->optinsn.insn = NULL;
-               op->optinsn.size = 0;
-       }
-}
-
-void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op)
-{
-       __arch_remove_optimized_kprobe(op, 1);
-}
-
-/*
- * Copy replacing target instructions
- * Target instructions MUST be relocatable (checked inside)
- */
-int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
-{
-       u8 *buf;
-       int ret;
-       long rel;
-
-       if (!can_optimize((unsigned long)op->kp.addr))
-               return -EILSEQ;
-
-       op->optinsn.insn = get_optinsn_slot();
-       if (!op->optinsn.insn)
-               return -ENOMEM;
-
-       /*
-        * Verify if the address gap is in 2GB range, because this uses
-        * a relative jump.
-        */
-       rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
-       if (abs(rel) > 0x7fffffff)
-               return -ERANGE;
-
-       buf = (u8 *)op->optinsn.insn;
-
-       /* Copy instructions into the out-of-line buffer */
-       ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
-       if (ret < 0) {
-               __arch_remove_optimized_kprobe(op, 0);
-               return ret;
-       }
-       op->optinsn.size = ret;
-
-       /* Copy arch-dep-instance from template */
-       memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
-
-       /* Set probe information */
-       synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
-
-       /* Set probe function call */
-       synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
-
-       /* Set returning jmp instruction at the tail of out-of-line buffer */
-       synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
-                          (u8 *)op->kp.addr + op->optinsn.size);
-
-       flush_icache_range((unsigned long) buf,
-                          (unsigned long) buf + TMPL_END_IDX +
-                          op->optinsn.size + RELATIVEJUMP_SIZE);
-       return 0;
-}
-
-#define MAX_OPTIMIZE_PROBES 256
-static struct text_poke_param *jump_poke_params;
-static struct jump_poke_buffer {
-       u8 buf[RELATIVEJUMP_SIZE];
-} *jump_poke_bufs;
-
-static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
-                                           u8 *insn_buf,
-                                           struct optimized_kprobe *op)
-{
-       s32 rel = (s32)((long)op->optinsn.insn -
-                       ((long)op->kp.addr + RELATIVEJUMP_SIZE));
-
-       /* Backup instructions which will be replaced by jump address */
-       memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
-              RELATIVE_ADDR_SIZE);
-
-       insn_buf[0] = RELATIVEJUMP_OPCODE;
-       *(s32 *)(&insn_buf[1]) = rel;
-
-       tprm->addr = op->kp.addr;
-       tprm->opcode = insn_buf;
-       tprm->len = RELATIVEJUMP_SIZE;
-}
-
-/*
- * Replace breakpoints (int3) with relative jumps.
- * Caller must call with locking kprobe_mutex and text_mutex.
- */
-void __kprobes arch_optimize_kprobes(struct list_head *oplist)
-{
-       struct optimized_kprobe *op, *tmp;
-       int c = 0;
-
-       list_for_each_entry_safe(op, tmp, oplist, list) {
-               WARN_ON(kprobe_disabled(&op->kp));
-               /* Setup param */
-               setup_optimize_kprobe(&jump_poke_params[c],
-                                     jump_poke_bufs[c].buf, op);
-               list_del_init(&op->list);
-               if (++c >= MAX_OPTIMIZE_PROBES)
-                       break;
-       }
-
-       /*
-        * text_poke_smp doesn't support NMI/MCE code modifying.
-        * However, since kprobes itself also doesn't support NMI/MCE
-        * code probing, it's not a problem.
-        */
-       text_poke_smp_batch(jump_poke_params, c);
-}
-
-static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm,
-                                             u8 *insn_buf,
-                                             struct optimized_kprobe *op)
-{
-       /* Set int3 to first byte for kprobes */
-       insn_buf[0] = BREAKPOINT_INSTRUCTION;
-       memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
-
-       tprm->addr = op->kp.addr;
-       tprm->opcode = insn_buf;
-       tprm->len = RELATIVEJUMP_SIZE;
-}
-
-/*
- * Recover original instructions and breakpoints from relative jumps.
- * Caller must call with locking kprobe_mutex.
- */
-extern void arch_unoptimize_kprobes(struct list_head *oplist,
-                                   struct list_head *done_list)
-{
-       struct optimized_kprobe *op, *tmp;
-       int c = 0;
-
-       list_for_each_entry_safe(op, tmp, oplist, list) {
-               /* Setup param */
-               setup_unoptimize_kprobe(&jump_poke_params[c],
-                                       jump_poke_bufs[c].buf, op);
-               list_move(&op->list, done_list);
-               if (++c >= MAX_OPTIMIZE_PROBES)
-                       break;
-       }
-
-       /*
-        * text_poke_smp doesn't support NMI/MCE code modifying.
-        * However, since kprobes itself also doesn't support NMI/MCE
-        * code probing, it's not a problem.
-        */
-       text_poke_smp_batch(jump_poke_params, c);
-}
-
-/* Replace a relative jump with a breakpoint (int3).  */
-void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op)
-{
-       u8 buf[RELATIVEJUMP_SIZE];
-
-       /* Set int3 to first byte for kprobes */
-       buf[0] = BREAKPOINT_INSTRUCTION;
-       memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE);
-       text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE);
-}
-
-static int  __kprobes setup_detour_execution(struct kprobe *p,
-                                            struct pt_regs *regs,
-                                            int reenter)
-{
-       struct optimized_kprobe *op;
-
-       if (p->flags & KPROBE_FLAG_OPTIMIZED) {
-               /* This kprobe is really able to run optimized path. */
-               op = container_of(p, struct optimized_kprobe, kp);
-               /* Detour through copied instructions */
-               regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
-               if (!reenter)
-                       reset_current_kprobe();
-               preempt_enable_no_resched();
-               return 1;
-       }
-       return 0;
-}
-
-static int __kprobes init_poke_params(void)
-{
-       /* Allocate code buffer and parameter array */
-       jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) *
-                                MAX_OPTIMIZE_PROBES, GFP_KERNEL);
-       if (!jump_poke_bufs)
-               return -ENOMEM;
-
-       jump_poke_params = kmalloc(sizeof(struct text_poke_param) *
-                                  MAX_OPTIMIZE_PROBES, GFP_KERNEL);
-       if (!jump_poke_params) {
-               kfree(jump_poke_bufs);
-               jump_poke_bufs = NULL;
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-#else  /* !CONFIG_OPTPROBES */
-static int __kprobes init_poke_params(void)
-{
-       return 0;
-}
-#endif
-
 int __init arch_init_kprobes(void)
 {
-       return init_poke_params();
+       return arch_init_optprobes();
 }
 
 int __kprobes arch_trampoline_kprobe(struct kprobe *p)
index f0c6fd6..694d801 100644 (file)
@@ -438,9 +438,9 @@ void __init kvm_guest_init(void)
 static __init int activate_jump_labels(void)
 {
        if (has_steal_clock) {
-               jump_label_inc(&paravirt_steal_enabled);
+               static_key_slow_inc(&paravirt_steal_enabled);
                if (steal_acc)
-                       jump_label_inc(&paravirt_steal_rq_enabled);
+                       static_key_slow_inc(&paravirt_steal_rq_enabled);
        }
 
        return 0;
index d90272e..ada2f99 100644 (file)
@@ -202,8 +202,8 @@ static void native_flush_tlb_single(unsigned long addr)
        __native_flush_tlb_single(addr);
 }
 
-struct jump_label_key paravirt_steal_enabled;
-struct jump_label_key paravirt_steal_rq_enabled;
+struct static_key paravirt_steal_enabled;
+struct static_key paravirt_steal_rq_enabled;
 
 static u64 native_steal_clock(int cpu)
 {
index 15763af..44eefde 100644 (file)
@@ -377,8 +377,8 @@ static inline int hlt_use_halt(void)
 void default_idle(void)
 {
        if (hlt_use_halt()) {
-               trace_power_start(POWER_CSTATE, 1, smp_processor_id());
-               trace_cpu_idle(1, smp_processor_id());
+               trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
+               trace_cpu_idle_rcuidle(1, smp_processor_id());
                current_thread_info()->status &= ~TS_POLLING;
                /*
                 * TS_POLLING-cleared state must be visible before we
@@ -391,8 +391,8 @@ void default_idle(void)
                else
                        local_irq_enable();
                current_thread_info()->status |= TS_POLLING;
-               trace_power_end(smp_processor_id());
-               trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+               trace_power_end_rcuidle(smp_processor_id());
+               trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else {
                local_irq_enable();
                /* loop is done by the caller */
@@ -450,8 +450,8 @@ EXPORT_SYMBOL_GPL(cpu_idle_wait);
 static void mwait_idle(void)
 {
        if (!need_resched()) {
-               trace_power_start(POWER_CSTATE, 1, smp_processor_id());
-               trace_cpu_idle(1, smp_processor_id());
+               trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
+               trace_cpu_idle_rcuidle(1, smp_processor_id());
                if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
                        clflush((void *)&current_thread_info()->flags);
 
@@ -461,8 +461,8 @@ static void mwait_idle(void)
                        __sti_mwait(0, 0);
                else
                        local_irq_enable();
-               trace_power_end(smp_processor_id());
-               trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+               trace_power_end_rcuidle(smp_processor_id());
+               trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else
                local_irq_enable();
 }
@@ -474,13 +474,13 @@ static void mwait_idle(void)
  */
 static void poll_idle(void)
 {
-       trace_power_start(POWER_CSTATE, 0, smp_processor_id());
-       trace_cpu_idle(0, smp_processor_id());
+       trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
+       trace_cpu_idle_rcuidle(0, smp_processor_id());
        local_irq_enable();
        while (!need_resched())
                cpu_relax();
-       trace_power_end(smp_processor_id());
-       trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+       trace_power_end_rcuidle(smp_processor_id());
+       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
 }
 
 /*
index fe15dcc..ea7b4fd 100644 (file)
@@ -234,7 +234,7 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
 }
 
 static bool mmu_audit;
-static struct jump_label_key mmu_audit_key;
+static struct static_key mmu_audit_key;
 
 static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
 {
@@ -250,7 +250,7 @@ static void __kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
 
 static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
 {
-       if (static_branch((&mmu_audit_key)))
+       if (static_key_false((&mmu_audit_key)))
                __kvm_mmu_audit(vcpu, point);
 }
 
@@ -259,7 +259,7 @@ static void mmu_audit_enable(void)
        if (mmu_audit)
                return;
 
-       jump_label_inc(&mmu_audit_key);
+       static_key_slow_inc(&mmu_audit_key);
        mmu_audit = true;
 }
 
@@ -268,7 +268,7 @@ static void mmu_audit_disable(void)
        if (!mmu_audit)
                return;
 
-       jump_label_dec(&mmu_audit_key);
+       static_key_slow_dec(&mmu_audit_key);
        mmu_audit = false;
 }
 
index 88ad5fb..c1f01a8 100644 (file)
@@ -29,46 +29,46 @@ insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
        return inat_primary_table[opcode];
 }
 
-insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, insn_byte_t last_pfx,
+int inat_get_last_prefix_id(insn_byte_t last_pfx)
+{
+       insn_attr_t lpfx_attr;
+
+       lpfx_attr = inat_get_opcode_attribute(last_pfx);
+       return inat_last_prefix_id(lpfx_attr);
+}
+
+insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,
                                      insn_attr_t esc_attr)
 {
        const insn_attr_t *table;
-       insn_attr_t lpfx_attr;
-       int n, m = 0;
+       int n;
 
        n = inat_escape_id(esc_attr);
-       if (last_pfx) {
-               lpfx_attr = inat_get_opcode_attribute(last_pfx);
-               m = inat_last_prefix_id(lpfx_attr);
-       }
+
        table = inat_escape_tables[n][0];
        if (!table)
                return 0;
-       if (inat_has_variant(table[opcode]) && m) {
-               table = inat_escape_tables[n][m];
+       if (inat_has_variant(table[opcode]) && lpfx_id) {
+               table = inat_escape_tables[n][lpfx_id];
                if (!table)
                        return 0;
        }
        return table[opcode];
 }
 
-insn_attr_t inat_get_group_attribute(insn_byte_t modrm, insn_byte_t last_pfx,
+insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id,
                                     insn_attr_t grp_attr)
 {
        const insn_attr_t *table;
-       insn_attr_t lpfx_attr;
-       int n, m = 0;
+       int n;
 
        n = inat_group_id(grp_attr);
-       if (last_pfx) {
-               lpfx_attr = inat_get_opcode_attribute(last_pfx);
-               m = inat_last_prefix_id(lpfx_attr);
-       }
+
        table = inat_group_tables[n][0];
        if (!table)
                return inat_group_common_attribute(grp_attr);
-       if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && m) {
-               table = inat_group_tables[n][m];
+       if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
+               table = inat_group_tables[n][lpfx_id];
                if (!table)
                        return inat_group_common_attribute(grp_attr);
        }
index 5a1f9f3..25feb1a 100644 (file)
@@ -185,7 +185,8 @@ err_out:
 void insn_get_opcode(struct insn *insn)
 {
        struct insn_field *opcode = &insn->opcode;
-       insn_byte_t op, pfx;
+       insn_byte_t op;
+       int pfx_id;
        if (opcode->got)
                return;
        if (!insn->prefixes.got)
@@ -212,8 +213,8 @@ void insn_get_opcode(struct insn *insn)
                /* Get escaped opcode */
                op = get_next(insn_byte_t, insn);
                opcode->bytes[opcode->nbytes++] = op;
-               pfx = insn_last_prefix(insn);
-               insn->attr = inat_get_escape_attribute(op, pfx, insn->attr);
+               pfx_id = insn_last_prefix_id(insn);
+               insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
        }
        if (inat_must_vex(insn->attr))
                insn->attr = 0; /* This instruction is bad */
@@ -235,7 +236,7 @@ err_out:
 void insn_get_modrm(struct insn *insn)
 {
        struct insn_field *modrm = &insn->modrm;
-       insn_byte_t pfx, mod;
+       insn_byte_t pfx_id, mod;
        if (modrm->got)
                return;
        if (!insn->opcode.got)
@@ -246,8 +247,8 @@ void insn_get_modrm(struct insn *insn)
                modrm->value = mod;
                modrm->nbytes = 1;
                if (inat_is_group(insn->attr)) {
-                       pfx = insn_last_prefix(insn);
-                       insn->attr = inat_get_group_attribute(mod, pfx,
+                       pfx_id = insn_last_prefix_id(insn);
+                       insn->attr = inat_get_group_attribute(mod, pfx_id,
                                                              insn->attr);
                        if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
                                insn->attr = 0; /* This is bad */
index 59f4261..6588f43 100644 (file)
@@ -94,13 +94,13 @@ int cpuidle_idle_call(void)
 
        target_state = &drv->states[next_state];
 
-       trace_power_start(POWER_CSTATE, next_state, dev->cpu);
-       trace_cpu_idle(next_state, dev->cpu);
+       trace_power_start_rcuidle(POWER_CSTATE, next_state, dev->cpu);
+       trace_cpu_idle_rcuidle(next_state, dev->cpu);
 
        entered_state = target_state->enter(dev, drv, next_state);
 
-       trace_power_end(dev->cpu);
-       trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
+       trace_power_end_rcuidle(dev->cpu);
+       trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
 
        if (entered_state >= 0) {
                /* Update cpuidle counters */
index 153dee1..b0695a9 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -63,6 +63,8 @@
 #include <trace/events/task.h>
 #include "internal.h"
 
+#include <trace/events/sched.h>
+
 int core_uses_pid;
 char core_pattern[CORENAME_MAX_SIZE] = "core";
 unsigned int core_pipe_limit;
@@ -1402,9 +1404,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
                         */
                        bprm->recursion_depth = depth;
                        if (retval >= 0) {
-                               if (depth == 0)
-                                       ptrace_event(PTRACE_EVENT_EXEC,
-                                                       old_pid);
+                               if (depth == 0) {
+                                       trace_sched_process_exec(current, old_pid, bprm);
+                                       ptrace_event(PTRACE_EVENT_EXEC, old_pid);
+                               }
                                put_binfmt(fmt);
                                allow_write_access(bprm->file);
                                if (bprm->file)
index 028e26f..72a6cab 100644 (file)
@@ -31,16 +31,33 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
 
 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
 
+/*
+ * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
+ * set in the flags member.
+ *
+ * ENABLED - set/unset when ftrace_ops is registered/unregistered
+ * GLOBAL  - set manualy by ftrace_ops user to denote the ftrace_ops
+ *           is part of the global tracers sharing the same filter
+ *           via set_ftrace_* debugfs files.
+ * DYNAMIC - set when ftrace_ops is registered to denote dynamically
+ *           allocated ftrace_ops which need special care
+ * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
+ *           could be controled by following calls:
+ *             ftrace_function_local_enable
+ *             ftrace_function_local_disable
+ */
 enum {
        FTRACE_OPS_FL_ENABLED           = 1 << 0,
        FTRACE_OPS_FL_GLOBAL            = 1 << 1,
        FTRACE_OPS_FL_DYNAMIC           = 1 << 2,
+       FTRACE_OPS_FL_CONTROL           = 1 << 3,
 };
 
 struct ftrace_ops {
        ftrace_func_t                   func;
        struct ftrace_ops               *next;
        unsigned long                   flags;
+       int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
        struct ftrace_hash              *notrace_hash;
        struct ftrace_hash              *filter_hash;
@@ -97,6 +114,55 @@ int register_ftrace_function(struct ftrace_ops *ops);
 int unregister_ftrace_function(struct ftrace_ops *ops);
 void clear_ftrace_function(void);
 
+/**
+ * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
+ *
+ * This function enables tracing on current cpu by decreasing
+ * the per cpu control variable.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
+{
+       if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
+               return;
+
+       (*this_cpu_ptr(ops->disabled))--;
+}
+
+/**
+ * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
+ *
+ * This function enables tracing on current cpu by decreasing
+ * the per cpu control variable.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
+{
+       if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
+               return;
+
+       (*this_cpu_ptr(ops->disabled))++;
+}
+
+/**
+ * ftrace_function_local_disabled - returns ftrace_ops disabled value
+ *                                  on current cpu
+ *
+ * This function returns value of ftrace_ops::disabled on current cpu.
+ * It must be called with preemption disabled and only on ftrace_ops
+ * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
+ * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
+ */
+static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
+{
+       WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
+       return *this_cpu_ptr(ops->disabled);
+}
+
 extern void ftrace_stub(unsigned long a0, unsigned long a1);
 
 #else /* !CONFIG_FUNCTION_TRACER */
@@ -178,12 +244,13 @@ struct dyn_ftrace {
 };
 
 int ftrace_force_update(void);
-void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
                       int len, int reset);
-void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
                        int len, int reset);
 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
+void ftrace_free_filter(struct ftrace_ops *ops);
 
 int register_ftrace_command(struct ftrace_func_command *cmd);
 int unregister_ftrace_command(struct ftrace_func_command *cmd);
@@ -314,9 +381,6 @@ extern void ftrace_enable_daemon(void);
 #else
 static inline int skip_trace(unsigned long ip) { return 0; }
 static inline int ftrace_force_update(void) { return 0; }
-static inline void ftrace_set_filter(unsigned char *buf, int len, int reset)
-{
-}
 static inline void ftrace_disable_daemon(void) { }
 static inline void ftrace_enable_daemon(void) { }
 static inline void ftrace_release_mod(struct module *mod) {}
@@ -340,6 +404,9 @@ static inline int ftrace_text_reserved(void *start, void *end)
  */
 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
+#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
+#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
+#define ftrace_free_filter(ops) do { } while (0)
 
 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
                            size_t cnt, loff_t *ppos) { return -ENODEV; }
index c3da42d..dd478fc 100644 (file)
@@ -146,6 +146,10 @@ enum trace_reg {
        TRACE_REG_UNREGISTER,
        TRACE_REG_PERF_REGISTER,
        TRACE_REG_PERF_UNREGISTER,
+       TRACE_REG_PERF_OPEN,
+       TRACE_REG_PERF_CLOSE,
+       TRACE_REG_PERF_ADD,
+       TRACE_REG_PERF_DEL,
 };
 
 struct ftrace_event_call;
@@ -157,7 +161,7 @@ struct ftrace_event_class {
        void                    *perf_probe;
 #endif
        int                     (*reg)(struct ftrace_event_call *event,
-                                      enum trace_reg type);
+                                      enum trace_reg type, void *data);
        int                     (*define_fields)(struct ftrace_event_call *);
        struct list_head        *(*get_fields)(struct ftrace_event_call *);
        struct list_head        fields;
@@ -165,7 +169,7 @@ struct ftrace_event_class {
 };
 
 extern int ftrace_event_reg(struct ftrace_event_call *event,
-                           enum trace_reg type);
+                           enum trace_reg type, void *data);
 
 enum {
        TRACE_EVENT_FL_ENABLED_BIT,
@@ -241,6 +245,7 @@ enum {
        FILTER_STATIC_STRING,
        FILTER_DYN_STRING,
        FILTER_PTR_STRING,
+       FILTER_TRACE_FN,
 };
 
 #define EVENT_STORAGE_SIZE 128
index a64b00e..3f830e0 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/atomic.h>
 #include <asm/ptrace.h>
 #include <asm/system.h>
-#include <trace/events/irq.h>
 
 /*
  * These correspond to the IORESOURCE_IRQ_* defines in
@@ -456,11 +455,7 @@ asmlinkage void do_softirq(void);
 asmlinkage void __do_softirq(void);
 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
 extern void softirq_init(void);
-static inline void __raise_softirq_irqoff(unsigned int nr)
-{
-       trace_softirq_raise(nr);
-       or_softirq_pending(1UL << nr);
-}
+extern void __raise_softirq_irqoff(unsigned int nr);
 
 extern void raise_softirq_irqoff(unsigned int nr);
 extern void raise_softirq(unsigned int nr);
index 5ce8b14..c513a40 100644 (file)
@@ -1,22 +1,69 @@
 #ifndef _LINUX_JUMP_LABEL_H
 #define _LINUX_JUMP_LABEL_H
 
+/*
+ * Jump label support
+ *
+ * Copyright (C) 2009-2012 Jason Baron <jbaron@redhat.com>
+ * Copyright (C) 2011-2012 Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ * Jump labels provide an interface to generate dynamic branches using
+ * self-modifying code. Assuming toolchain and architecture support the result
+ * of a "if (static_key_false(&key))" statement is a unconditional branch (which
+ * defaults to false - and the true block is placed out of line).
+ *
+ * However at runtime we can change the branch target using
+ * static_key_slow_{inc,dec}(). These function as a 'reference' count on the key
+ * object and for as long as there are references all branches referring to
+ * that particular key will point to the (out of line) true block.
+ *
+ * Since this relies on modifying code the static_key_slow_{inc,dec}() functions
+ * must be considered absolute slow paths (machine wide synchronization etc.).
+ * OTOH, since the affected branches are unconditional their runtime overhead
+ * will be absolutely minimal, esp. in the default (off) case where the total
+ * effect is a single NOP of appropriate size. The on case will patch in a jump
+ * to the out-of-line block.
+ *
+ * When the control is directly exposed to userspace it is prudent to delay the
+ * decrement to avoid high frequency code modifications which can (and do)
+ * cause significant performance degradation. Struct static_key_deferred and
+ * static_key_slow_dec_deferred() provide for this.
+ *
+ * Lacking toolchain and or architecture support, it falls back to a simple
+ * conditional branch.
+ *
+ * struct static_key my_key = STATIC_KEY_INIT_TRUE;
+ *
+ *   if (static_key_true(&my_key)) {
+ *   }
+ *
+ * will result in the true case being in-line and starts the key with a single
+ * reference. Mixing static_key_true() and static_key_false() on the same key is not
+ * allowed.
+ *
+ * Not initializing the key (static data is initialized to 0s anyway) is the
+ * same as using STATIC_KEY_INIT_FALSE and static_key_false() is
+ * equivalent with static_branch().
+ *
+*/
+
 #include <linux/types.h>
 #include <linux/compiler.h>
 #include <linux/workqueue.h>
 
 #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
 
-struct jump_label_key {
+struct static_key {
        atomic_t enabled;
+/* Set lsb bit to 1 if branch is default true, 0 ot */
        struct jump_entry *entries;
 #ifdef CONFIG_MODULES
-       struct jump_label_mod *next;
+       struct static_key_mod *next;
 #endif
 };
 
-struct jump_label_key_deferred {
-       struct jump_label_key key;
+struct static_key_deferred {
+       struct static_key key;
        unsigned long timeout;
        struct delayed_work work;
 };
@@ -34,13 +81,34 @@ struct module;
 
 #ifdef HAVE_JUMP_LABEL
 
-#ifdef CONFIG_MODULES
-#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL, NULL}
-#else
-#define JUMP_LABEL_INIT {ATOMIC_INIT(0), NULL}
-#endif
+#define JUMP_LABEL_TRUE_BRANCH 1UL
+
+static
+inline struct jump_entry *jump_label_get_entries(struct static_key *key)
+{
+       return (struct jump_entry *)((unsigned long)key->entries
+                                               & ~JUMP_LABEL_TRUE_BRANCH);
+}
 
-static __always_inline bool static_branch(struct jump_label_key *key)
+static inline bool jump_label_get_branch_default(struct static_key *key)
+{
+       if ((unsigned long)key->entries & JUMP_LABEL_TRUE_BRANCH)
+               return true;
+       return false;
+}
+
+static __always_inline bool static_key_false(struct static_key *key)
+{
+       return arch_static_branch(key);
+}
+
+static __always_inline bool static_key_true(struct static_key *key)
+{
+       return !static_key_false(key);
+}
+
+/* Deprecated. Please use 'static_key_false() instead. */
+static __always_inline bool static_branch(struct static_key *key)
 {
        return arch_static_branch(key);
 }
@@ -56,21 +124,23 @@ extern void arch_jump_label_transform(struct jump_entry *entry,
 extern void arch_jump_label_transform_static(struct jump_entry *entry,
                                             enum jump_label_type type);
 extern int jump_label_text_reserved(void *start, void *end);
-extern void jump_label_inc(struct jump_label_key *key);
-extern void jump_label_dec(struct jump_label_key *key);
-extern void jump_label_dec_deferred(struct jump_label_key_deferred *key);
-extern bool jump_label_enabled(struct jump_label_key *key);
+extern void static_key_slow_inc(struct static_key *key);
+extern void static_key_slow_dec(struct static_key *key);
+extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
 extern void jump_label_apply_nops(struct module *mod);
-extern void jump_label_rate_limit(struct jump_label_key_deferred *key,
-               unsigned long rl);
+extern void
+jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
+
+#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+       { .enabled = ATOMIC_INIT(1), .entries = (void *)1 })
+#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+       { .enabled = ATOMIC_INIT(0), .entries = (void *)0 })
 
 #else  /* !HAVE_JUMP_LABEL */
 
 #include <linux/atomic.h>
 
-#define JUMP_LABEL_INIT {ATOMIC_INIT(0)}
-
-struct jump_label_key {
+struct static_key {
        atomic_t enabled;
 };
 
@@ -78,30 +148,45 @@ static __always_inline void jump_label_init(void)
 {
 }
 
-struct jump_label_key_deferred {
-       struct jump_label_key  key;
+struct static_key_deferred {
+       struct static_key  key;
 };
 
-static __always_inline bool static_branch(struct jump_label_key *key)
+static __always_inline bool static_key_false(struct static_key *key)
+{
+       if (unlikely(atomic_read(&key->enabled)) > 0)
+               return true;
+       return false;
+}
+
+static __always_inline bool static_key_true(struct static_key *key)
 {
-       if (unlikely(atomic_read(&key->enabled)))
+       if (likely(atomic_read(&key->enabled)) > 0)
                return true;
        return false;
 }
 
-static inline void jump_label_inc(struct jump_label_key *key)
+/* Deprecated. Please use 'static_key_false() instead. */
+static __always_inline bool static_branch(struct static_key *key)
+{
+       if (unlikely(atomic_read(&key->enabled)) > 0)
+               return true;
+       return false;
+}
+
+static inline void static_key_slow_inc(struct static_key *key)
 {
        atomic_inc(&key->enabled);
 }
 
-static inline void jump_label_dec(struct jump_label_key *key)
+static inline void static_key_slow_dec(struct static_key *key)
 {
        atomic_dec(&key->enabled);
 }
 
-static inline void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
-       jump_label_dec(&key->key);
+       static_key_slow_dec(&key->key);
 }
 
 static inline int jump_label_text_reserved(void *start, void *end)
@@ -112,23 +197,30 @@ static inline int jump_label_text_reserved(void *start, void *end)
 static inline void jump_label_lock(void) {}
 static inline void jump_label_unlock(void) {}
 
-static inline bool jump_label_enabled(struct jump_label_key *key)
-{
-       return !!atomic_read(&key->enabled);
-}
-
 static inline int jump_label_apply_nops(struct module *mod)
 {
        return 0;
 }
 
-static inline void jump_label_rate_limit(struct jump_label_key_deferred *key,
+static inline void
+jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
 {
 }
+
+#define STATIC_KEY_INIT_TRUE ((struct static_key) \
+               { .enabled = ATOMIC_INIT(1) })
+#define STATIC_KEY_INIT_FALSE ((struct static_key) \
+               { .enabled = ATOMIC_INIT(0) })
+
 #endif /* HAVE_JUMP_LABEL */
 
-#define jump_label_key_enabled ((struct jump_label_key){ .enabled = ATOMIC_INIT(1), })
-#define jump_label_key_disabled        ((struct jump_label_key){ .enabled = ATOMIC_INIT(0), })
+#define STATIC_KEY_INIT STATIC_KEY_INIT_FALSE
+#define jump_label_enabled static_key_enabled
+
+static inline bool static_key_enabled(struct static_key *key)
+{
+       return (atomic_read(&key->enabled) > 0);
+}
 
 #endif /* _LINUX_JUMP_LABEL_H */
index 0eac07c..7dfaae7 100644 (file)
@@ -214,8 +214,8 @@ enum {
 #include <linux/skbuff.h>
 
 #ifdef CONFIG_RPS
-#include <linux/jump_label.h>
-extern struct jump_label_key rps_needed;
+#include <linux/static_key.h>
+extern struct static_key rps_needed;
 #endif
 
 struct neighbour;
index b809265..29734be 100644 (file)
@@ -163,13 +163,13 @@ extern struct ctl_path nf_net_ipv4_netfilter_sysctl_path[];
 extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 
 #if defined(CONFIG_JUMP_LABEL)
-#include <linux/jump_label.h>
-extern struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+#include <linux/static_key.h>
+extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook)
 {
        if (__builtin_constant_p(pf) &&
            __builtin_constant_p(hook))
-               return static_branch(&nf_hooks_needed[pf][hook]);
+               return static_key_false(&nf_hooks_needed[pf][hook]);
 
        return !list_empty(&nf_hooks[pf][hook]);
 }
index abb2776..64426b7 100644 (file)
@@ -291,12 +291,14 @@ struct perf_event_mmap_page {
        __s64   offset;                 /* add to hardware event value */
        __u64   time_enabled;           /* time event active */
        __u64   time_running;           /* time event on cpu */
+       __u32   time_mult, time_shift;
+       __u64   time_offset;
 
                /*
                 * Hole for extension of the self monitor capabilities
                 */
 
-       __u64   __reserved[123];        /* align to 1k */
+       __u64   __reserved[121];        /* align to 1k */
 
        /*
         * Control data for the mmap() data buffer.
@@ -512,7 +514,7 @@ struct perf_guest_info_callbacks {
 #include <linux/ftrace.h>
 #include <linux/cpu.h>
 #include <linux/irq_work.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 #include <linux/atomic.h>
 #include <asm/local.h>
 
@@ -616,6 +618,7 @@ struct pmu {
        struct list_head                entry;
 
        struct device                   *dev;
+       const struct attribute_group    **attr_groups;
        char                            *name;
        int                             type;
 
@@ -681,6 +684,12 @@ struct pmu {
         * for each successful ->add() during the transaction.
         */
        void (*cancel_txn)              (struct pmu *pmu); /* optional */
+
+       /*
+        * Will return the value for perf_event_mmap_page::index for this event,
+        * if no implementation is provided it will default to: event->hw.idx + 1.
+        */
+       int (*event_idx)                (struct perf_event *event); /*optional */
 };
 
 /**
@@ -850,6 +859,9 @@ struct perf_event {
 #ifdef CONFIG_EVENT_TRACING
        struct ftrace_event_call        *tp_event;
        struct event_filter             *filter;
+#ifdef CONFIG_FUNCTION_TRACER
+       struct ftrace_ops               ftrace_ops;
+#endif
 #endif
 
 #ifdef CONFIG_CGROUP_PERF
@@ -1029,7 +1041,7 @@ static inline int is_software_event(struct perf_event *event)
        return event->pmu->task_ctx_nr == perf_sw_context;
 }
 
-extern struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
+extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
 extern void __perf_sw_event(u32, u64, struct pt_regs *, u64);
 
@@ -1057,7 +1069,7 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
 {
        struct pt_regs hot_regs;
 
-       if (static_branch(&perf_swevent_enabled[event_id])) {
+       if (static_key_false(&perf_swevent_enabled[event_id])) {
                if (!regs) {
                        perf_fetch_caller_regs(&hot_regs);
                        regs = &hot_regs;
@@ -1066,12 +1078,12 @@ perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
        }
 }
 
-extern struct jump_label_key_deferred perf_sched_events;
+extern struct static_key_deferred perf_sched_events;
 
 static inline void perf_event_task_sched_in(struct task_struct *prev,
                                            struct task_struct *task)
 {
-       if (static_branch(&perf_sched_events.key))
+       if (static_key_false(&perf_sched_events.key))
                __perf_event_task_sched_in(prev, task);
 }
 
@@ -1080,7 +1092,7 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
 {
        perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, NULL, 0);
 
-       if (static_branch(&perf_sched_events.key))
+       if (static_key_false(&perf_sched_events.key))
                __perf_event_task_sched_out(prev, next);
 }
 
diff --git a/include/linux/static_key.h b/include/linux/static_key.h
new file mode 100644 (file)
index 0000000..27bd3f8
--- /dev/null
@@ -0,0 +1 @@
+#include <linux/jump_label.h>
index df0a779..bd96ecd 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/errno.h>
 #include <linux/types.h>
 #include <linux/rcupdate.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 
 struct module;
 struct tracepoint;
@@ -29,7 +29,7 @@ struct tracepoint_func {
 
 struct tracepoint {
        const char *name;               /* Tracepoint name */
-       struct jump_label_key key;
+       struct static_key key;
        void (*regfunc)(void);
        void (*unregfunc)(void);
        struct tracepoint_func __rcu *funcs;
@@ -114,7 +114,7 @@ static inline void tracepoint_synchronize_unregister(void)
  * as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
  * "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
  */
-#define __DO_TRACE(tp, proto, args, cond)                              \
+#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu)             \
        do {                                                            \
                struct tracepoint_func *it_func_ptr;                    \
                void *it_func;                                          \
@@ -122,6 +122,7 @@ static inline void tracepoint_synchronize_unregister(void)
                                                                        \
                if (!(cond))                                            \
                        return;                                         \
+               prercu;                                                 \
                rcu_read_lock_sched_notrace();                          \
                it_func_ptr = rcu_dereference_sched((tp)->funcs);       \
                if (it_func_ptr) {                                      \
@@ -132,6 +133,7 @@ static inline void tracepoint_synchronize_unregister(void)
                        } while ((++it_func_ptr)->func);                \
                }                                                       \
                rcu_read_unlock_sched_notrace();                        \
+               postrcu;                                                \
        } while (0)
 
 /*
@@ -139,15 +141,25 @@ static inline void tracepoint_synchronize_unregister(void)
  * not add unwanted padding between the beginning of the section and the
  * structure. Force alignment to the same alignment as the section start.
  */
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args)        \
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
        extern struct tracepoint __tracepoint_##name;                   \
        static inline void trace_##name(proto)                          \
        {                                                               \
+               if (static_key_false(&__tracepoint_##name.key))         \
+                       __DO_TRACE(&__tracepoint_##name,                \
+                               TP_PROTO(data_proto),                   \
+                               TP_ARGS(data_args),                     \
+                               TP_CONDITION(cond),,);                  \
+       }                                                               \
+       static inline void trace_##name##_rcuidle(proto)                \
+       {                                                               \
                if (static_branch(&__tracepoint_##name.key))            \
                        __DO_TRACE(&__tracepoint_##name,                \
                                TP_PROTO(data_proto),                   \
                                TP_ARGS(data_args),                     \
-                               TP_CONDITION(cond));                    \
+                               TP_CONDITION(cond),                     \
+                               rcu_idle_exit(),                        \
+                               rcu_idle_enter());                      \
        }                                                               \
        static inline int                                               \
        register_trace_##name(void (*probe)(data_proto), void *data)    \
@@ -176,7 +188,7 @@ static inline void tracepoint_synchronize_unregister(void)
        __attribute__((section("__tracepoints_strings"))) = #name;       \
        struct tracepoint __tracepoint_##name                            \
        __attribute__((section("__tracepoints"))) =                      \
-               { __tpstrtab_##name, JUMP_LABEL_INIT, reg, unreg, NULL };\
+               { __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
        static struct tracepoint * const __tracepoint_ptr_##name __used  \
        __attribute__((section("__tracepoints_ptrs"))) =                 \
                &__tracepoint_##name;
@@ -190,9 +202,11 @@ static inline void tracepoint_synchronize_unregister(void)
        EXPORT_SYMBOL(__tracepoint_##name)
 
 #else /* !CONFIG_TRACEPOINTS */
-#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args)        \
+#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
        static inline void trace_##name(proto)                          \
        { }                                                             \
+       static inline void trace_##name##_rcuidle(proto)                \
+       { }                                                             \
        static inline int                                               \
        register_trace_##name(void (*probe)(data_proto),                \
                              void *data)                               \
index 91c1c8b..dcde2d9 100644 (file)
@@ -55,7 +55,7 @@
 #include <linux/uaccess.h>
 #include <linux/memcontrol.h>
 #include <linux/res_counter.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 
 #include <linux/filter.h>
 #include <linux/rculist_nulls.h>
@@ -924,13 +924,13 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
 #endif /* SOCK_REFCNT_DEBUG */
 
 #if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
-extern struct jump_label_key memcg_socket_limit_enabled;
+extern struct static_key memcg_socket_limit_enabled;
 static inline struct cg_proto *parent_cg_proto(struct proto *proto,
                                               struct cg_proto *cg_proto)
 {
        return proto->proto_cgroup(parent_mem_cgroup(cg_proto->memcg));
 }
-#define mem_cgroup_sockets_enabled static_branch(&memcg_socket_limit_enabled)
+#define mem_cgroup_sockets_enabled static_key_false(&memcg_socket_limit_enabled)
 #else
 #define mem_cgroup_sockets_enabled 0
 static inline struct cg_proto *parent_cg_proto(struct proto *proto,
index 1bcc2a8..14b3894 100644 (file)
@@ -151,6 +151,8 @@ enum {
    events get removed */
 static inline void trace_power_start(u64 type, u64 state, u64 cpuid) {};
 static inline void trace_power_end(u64 cpuid) {};
+static inline void trace_power_start_rcuidle(u64 type, u64 state, u64 cpuid) {};
+static inline void trace_power_end_rcuidle(u64 cpuid) {};
 static inline void trace_power_frequency(u64 type, u64 state, u64 cpuid) {};
 #endif /* _PWR_EVENT_AVOID_DOUBLE_DEFINING_DEPRECATED */
 
diff --git a/include/trace/events/printk.h b/include/trace/events/printk.h
new file mode 100644 (file)
index 0000000..94ec79c
--- /dev/null
@@ -0,0 +1,41 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM printk
+
+#if !defined(_TRACE_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PRINTK_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT_CONDITION(console,
+       TP_PROTO(const char *log_buf, unsigned start, unsigned end,
+                unsigned log_buf_len),
+
+       TP_ARGS(log_buf, start, end, log_buf_len),
+
+       TP_CONDITION(start != end),
+
+       TP_STRUCT__entry(
+               __dynamic_array(char, msg, end - start + 1)
+       ),
+
+       TP_fast_assign(
+               if ((start & (log_buf_len - 1)) > (end & (log_buf_len - 1))) {
+                       memcpy(__get_dynamic_array(msg),
+                              log_buf + (start & (log_buf_len - 1)),
+                              log_buf_len - (start & (log_buf_len - 1)));
+                       memcpy((char *)__get_dynamic_array(msg) +
+                              log_buf_len - (start & (log_buf_len - 1)),
+                              log_buf, end & (log_buf_len - 1));
+               } else
+                       memcpy(__get_dynamic_array(msg),
+                              log_buf + (start & (log_buf_len - 1)),
+                              end - start);
+               ((char *)__get_dynamic_array(msg))[end - start] = 0;
+       ),
+
+       TP_printk("%s", __get_str(msg))
+);
+#endif /* _TRACE_PRINTK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index e33ed1b..fbc7b1a 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/sched.h>
 #include <linux/tracepoint.h>
+#include <linux/binfmts.h>
 
 /*
  * Tracepoint for calling kthread_stop, performed to end a kthread:
@@ -276,6 +277,32 @@ TRACE_EVENT(sched_process_fork,
 );
 
 /*
+ * Tracepoint for exec:
+ */
+TRACE_EVENT(sched_process_exec,
+
+       TP_PROTO(struct task_struct *p, pid_t old_pid,
+                struct linux_binprm *bprm),
+
+       TP_ARGS(p, old_pid, bprm),
+
+       TP_STRUCT__entry(
+               __string(       filename,       bprm->filename  )
+               __field(        pid_t,          pid             )
+               __field(        pid_t,          old_pid         )
+       ),
+
+       TP_fast_assign(
+               __assign_str(filename, bprm->filename);
+               __entry->pid            = p->pid;
+               __entry->old_pid        = p->pid;
+       ),
+
+       TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
+                 __entry->pid, __entry->old_pid)
+);
+
+/*
  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
  */
index 17df434..39a8a43 100644 (file)
                }                                               \
        } while (0)
 
+#ifndef TRACE_HEADER_MULTI_READ
+enum {
+       TRACE_SIGNAL_DELIVERED,
+       TRACE_SIGNAL_IGNORED,
+       TRACE_SIGNAL_ALREADY_PENDING,
+       TRACE_SIGNAL_OVERFLOW_FAIL,
+       TRACE_SIGNAL_LOSE_INFO,
+};
+#endif
+
 /**
  * signal_generate - called when a signal is generated
  * @sig: signal number
  * @info: pointer to struct siginfo
  * @task: pointer to struct task_struct
+ * @group: shared or private
+ * @result: TRACE_SIGNAL_*
  *
  * Current process sends a 'sig' signal to 'task' process with
  * 'info' siginfo. If 'info' is SEND_SIG_NOINFO or SEND_SIG_PRIV,
  */
 TRACE_EVENT(signal_generate,
 
-       TP_PROTO(int sig, struct siginfo *info, struct task_struct *task),
+       TP_PROTO(int sig, struct siginfo *info, struct task_struct *task,
+                       int group, int result),
 
-       TP_ARGS(sig, info, task),
+       TP_ARGS(sig, info, task, group, result),
 
        TP_STRUCT__entry(
                __field(        int,    sig                     )
@@ -47,6 +60,8 @@ TRACE_EVENT(signal_generate,
                __field(        int,    code                    )
                __array(        char,   comm,   TASK_COMM_LEN   )
                __field(        pid_t,  pid                     )
+               __field(        int,    group                   )
+               __field(        int,    result                  )
        ),
 
        TP_fast_assign(
@@ -54,11 +69,14 @@ TRACE_EVENT(signal_generate,
                TP_STORE_SIGINFO(__entry, info);
                memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
                __entry->pid    = task->pid;
+               __entry->group  = group;
+               __entry->result = result;
        ),
 
-       TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d",
+       TP_printk("sig=%d errno=%d code=%d comm=%s pid=%d grp=%d res=%d",
                  __entry->sig, __entry->errno, __entry->code,
-                 __entry->comm, __entry->pid)
+                 __entry->comm, __entry->pid, __entry->group,
+                 __entry->result)
 );
 
 /**
@@ -101,65 +119,6 @@ TRACE_EVENT(signal_deliver,
                  __entry->sa_handler, __entry->sa_flags)
 );
 
-DECLARE_EVENT_CLASS(signal_queue_overflow,
-
-       TP_PROTO(int sig, int group, struct siginfo *info),
-
-       TP_ARGS(sig, group, info),
-
-       TP_STRUCT__entry(
-               __field(        int,    sig     )
-               __field(        int,    group   )
-               __field(        int,    errno   )
-               __field(        int,    code    )
-       ),
-
-       TP_fast_assign(
-               __entry->sig    = sig;
-               __entry->group  = group;
-               TP_STORE_SIGINFO(__entry, info);
-       ),
-
-       TP_printk("sig=%d group=%d errno=%d code=%d",
-                 __entry->sig, __entry->group, __entry->errno, __entry->code)
-);
-
-/**
- * signal_overflow_fail - called when signal queue is overflow
- * @sig: signal number
- * @group: signal to process group or not (bool)
- * @info: pointer to struct siginfo
- *
- * Kernel fails to generate 'sig' signal with 'info' siginfo, because
- * siginfo queue is overflow, and the signal is dropped.
- * 'group' is not 0 if the signal will be sent to a process group.
- * 'sig' is always one of RT signals.
- */
-DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
-
-       TP_PROTO(int sig, int group, struct siginfo *info),
-
-       TP_ARGS(sig, group, info)
-);
-
-/**
- * signal_lose_info - called when siginfo is lost
- * @sig: signal number
- * @group: signal to process group or not (bool)
- * @info: pointer to struct siginfo
- *
- * Kernel generates 'sig' signal but loses 'info' siginfo, because siginfo
- * queue is overflow.
- * 'group' is not 0 if the signal will be sent to a process group.
- * 'sig' is always one of non-RT signals.
- */
-DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
-
-       TP_PROTO(int sig, int group, struct siginfo *info),
-
-       TP_ARGS(sig, group, info)
-);
-
 #endif /* _TRACE_SIGNAL_H */
 
 /* This part must be outside protection */
index 1b5c081..e8b32ac 100644 (file)
@@ -128,7 +128,7 @@ enum event_type_t {
  * perf_sched_events : >0 events exist
  * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
  */
-struct jump_label_key_deferred perf_sched_events __read_mostly;
+struct static_key_deferred perf_sched_events __read_mostly;
 static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
 
 static atomic_t nr_mmap_events __read_mostly;
@@ -2778,7 +2778,7 @@ static void free_event(struct perf_event *event)
 
        if (!event->parent) {
                if (event->attach_state & PERF_ATTACH_TASK)
-                       jump_label_dec_deferred(&perf_sched_events);
+                       static_key_slow_dec_deferred(&perf_sched_events);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_dec(&nr_mmap_events);
                if (event->attr.comm)
@@ -2789,7 +2789,7 @@ static void free_event(struct perf_event *event)
                        put_callchain_buffers();
                if (is_cgroup_event(event)) {
                        atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
-                       jump_label_dec_deferred(&perf_sched_events);
+                       static_key_slow_dec_deferred(&perf_sched_events);
                }
        }
 
@@ -3238,10 +3238,6 @@ int perf_event_task_disable(void)
        return 0;
 }
 
-#ifndef PERF_EVENT_INDEX_OFFSET
-# define PERF_EVENT_INDEX_OFFSET 0
-#endif
-
 static int perf_event_index(struct perf_event *event)
 {
        if (event->hw.state & PERF_HES_STOPPED)
@@ -3250,21 +3246,26 @@ static int perf_event_index(struct perf_event *event)
        if (event->state != PERF_EVENT_STATE_ACTIVE)
                return 0;
 
-       return event->hw.idx + 1 - PERF_EVENT_INDEX_OFFSET;
+       return event->pmu->event_idx(event);
 }
 
 static void calc_timer_values(struct perf_event *event,
+                               u64 *now,
                                u64 *enabled,
                                u64 *running)
 {
-       u64 now, ctx_time;
+       u64 ctx_time;
 
-       now = perf_clock();
-       ctx_time = event->shadow_ctx_time + now;
+       *now = perf_clock();
+       ctx_time = event->shadow_ctx_time + *now;
        *enabled = ctx_time - event->tstamp_enabled;
        *running = ctx_time - event->tstamp_running;
 }
 
+void __weak perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
+{
+}
+
 /*
  * Callers need to ensure there can be no nesting of this function, otherwise
  * the seqlock logic goes bad. We can not serialize this because the arch
@@ -3274,7 +3275,7 @@ void perf_event_update_userpage(struct perf_event *event)
 {
        struct perf_event_mmap_page *userpg;
        struct ring_buffer *rb;
-       u64 enabled, running;
+       u64 enabled, running, now;
 
        rcu_read_lock();
        /*
@@ -3286,7 +3287,7 @@ void perf_event_update_userpage(struct perf_event *event)
         * because of locking issue as we can be called in
         * NMI context
         */
-       calc_timer_values(event, &enabled, &running);
+       calc_timer_values(event, &now, &enabled, &running);
        rb = rcu_dereference(event->rb);
        if (!rb)
                goto unlock;
@@ -3302,7 +3303,7 @@ void perf_event_update_userpage(struct perf_event *event)
        barrier();
        userpg->index = perf_event_index(event);
        userpg->offset = perf_event_count(event);
-       if (event->state == PERF_EVENT_STATE_ACTIVE)
+       if (userpg->index)
                userpg->offset -= local64_read(&event->hw.prev_count);
 
        userpg->time_enabled = enabled +
@@ -3311,6 +3312,8 @@ void perf_event_update_userpage(struct perf_event *event)
        userpg->time_running = running +
                        atomic64_read(&event->child_total_time_running);
 
+       perf_update_user_clock(userpg, now);
+
        barrier();
        ++userpg->lock;
        preempt_enable();
@@ -3568,6 +3571,8 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
        event->mmap_user = get_current_user();
        vma->vm_mm->pinned_vm += event->mmap_locked;
 
+       perf_event_update_userpage(event);
+
 unlock:
        if (!ret)
                atomic_inc(&event->mmap_count);
@@ -3799,7 +3804,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
 static void perf_output_read(struct perf_output_handle *handle,
                             struct perf_event *event)
 {
-       u64 enabled = 0, running = 0;
+       u64 enabled = 0, running = 0, now;
        u64 read_format = event->attr.read_format;
 
        /*
@@ -3812,7 +3817,7 @@ static void perf_output_read(struct perf_output_handle *handle,
         * NMI context
         */
        if (read_format & PERF_FORMAT_TOTAL_TIMES)
-               calc_timer_values(event, &enabled, &running);
+               calc_timer_values(event, &now, &enabled, &running);
 
        if (event->attr.read_format & PERF_FORMAT_GROUP)
                perf_output_read_group(handle, event, enabled, running);
@@ -4986,7 +4991,7 @@ fail:
        return err;
 }
 
-struct jump_label_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
+struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
 
 static void sw_perf_event_destroy(struct perf_event *event)
 {
@@ -4994,7 +4999,7 @@ static void sw_perf_event_destroy(struct perf_event *event)
 
        WARN_ON(event->parent);
 
-       jump_label_dec(&perf_swevent_enabled[event_id]);
+       static_key_slow_dec(&perf_swevent_enabled[event_id]);
        swevent_hlist_put(event);
 }
 
@@ -5024,13 +5029,18 @@ static int perf_swevent_init(struct perf_event *event)
                if (err)
                        return err;
 
-               jump_label_inc(&perf_swevent_enabled[event_id]);
+               static_key_slow_inc(&perf_swevent_enabled[event_id]);
                event->destroy = sw_perf_event_destroy;
        }
 
        return 0;
 }
 
+static int perf_swevent_event_idx(struct perf_event *event)
+{
+       return 0;
+}
+
 static struct pmu perf_swevent = {
        .task_ctx_nr    = perf_sw_context,
 
@@ -5040,6 +5050,8 @@ static struct pmu perf_swevent = {
        .start          = perf_swevent_start,
        .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
+
+       .event_idx      = perf_swevent_event_idx,
 };
 
 #ifdef CONFIG_EVENT_TRACING
@@ -5126,6 +5138,8 @@ static struct pmu perf_tracepoint = {
        .start          = perf_swevent_start,
        .stop           = perf_swevent_stop,
        .read           = perf_swevent_read,
+
+       .event_idx      = perf_swevent_event_idx,
 };
 
 static inline void perf_tp_register(void)
@@ -5345,6 +5359,8 @@ static struct pmu perf_cpu_clock = {
        .start          = cpu_clock_event_start,
        .stop           = cpu_clock_event_stop,
        .read           = cpu_clock_event_read,
+
+       .event_idx      = perf_swevent_event_idx,
 };
 
 /*
@@ -5417,6 +5433,8 @@ static struct pmu perf_task_clock = {
        .start          = task_clock_event_start,
        .stop           = task_clock_event_stop,
        .read           = task_clock_event_read,
+
+       .event_idx      = perf_swevent_event_idx,
 };
 
 static void perf_pmu_nop_void(struct pmu *pmu)
@@ -5444,6 +5462,11 @@ static void perf_pmu_cancel_txn(struct pmu *pmu)
        perf_pmu_enable(pmu);
 }
 
+static int perf_event_idx_default(struct perf_event *event)
+{
+       return event->hw.idx + 1;
+}
+
 /*
  * Ensures all contexts with the same task_ctx_nr have the same
  * pmu_cpu_context too.
@@ -5530,6 +5553,7 @@ static int pmu_dev_alloc(struct pmu *pmu)
        if (!pmu->dev)
                goto out;
 
+       pmu->dev->groups = pmu->attr_groups;
        device_initialize(pmu->dev);
        ret = dev_set_name(pmu->dev, "%s", pmu->name);
        if (ret)
@@ -5633,6 +5657,9 @@ got_cpu_context:
                pmu->pmu_disable = perf_pmu_nop_void;
        }
 
+       if (!pmu->event_idx)
+               pmu->event_idx = perf_event_idx_default;
+
        list_add_rcu(&pmu->entry, &pmus);
        ret = 0;
 unlock:
@@ -5825,7 +5852,7 @@ done:
 
        if (!event->parent) {
                if (event->attach_state & PERF_ATTACH_TASK)
-                       jump_label_inc(&perf_sched_events.key);
+                       static_key_slow_inc(&perf_sched_events.key);
                if (event->attr.mmap || event->attr.mmap_data)
                        atomic_inc(&nr_mmap_events);
                if (event->attr.comm)
@@ -6063,7 +6090,7 @@ SYSCALL_DEFINE5(perf_event_open,
                 * - that may need work on context switch
                 */
                atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
-               jump_label_inc(&perf_sched_events.key);
+               static_key_slow_inc(&perf_sched_events.key);
        }
 
        /*
index ee706ce..3330022 100644 (file)
@@ -613,6 +613,11 @@ static void hw_breakpoint_stop(struct perf_event *bp, int flags)
        bp->hw.state = PERF_HES_STOPPED;
 }
 
+static int hw_breakpoint_event_idx(struct perf_event *bp)
+{
+       return 0;
+}
+
 static struct pmu perf_breakpoint = {
        .task_ctx_nr    = perf_sw_context, /* could eventually get its own */
 
@@ -622,6 +627,8 @@ static struct pmu perf_breakpoint = {
        .start          = hw_breakpoint_start,
        .stop           = hw_breakpoint_stop,
        .read           = hw_breakpoint_pmu_read,
+
+       .event_idx      = hw_breakpoint_event_idx,
 };
 
 int __init init_hw_breakpoint(void)
index fb7db75..838687e 100644 (file)
@@ -16,6 +16,8 @@
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 
+#include <trace/events/irq.h>
+
 #include "internals.h"
 
 /**
index 01d3b70..4304919 100644 (file)
@@ -12,7 +12,7 @@
 #include <linux/slab.h>
 #include <linux/sort.h>
 #include <linux/err.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 
 #ifdef HAVE_JUMP_LABEL
 
@@ -29,11 +29,6 @@ void jump_label_unlock(void)
        mutex_unlock(&jump_label_mutex);
 }
 
-bool jump_label_enabled(struct jump_label_key *key)
-{
-       return !!atomic_read(&key->enabled);
-}
-
 static int jump_label_cmp(const void *a, const void *b)
 {
        const struct jump_entry *jea = a;
@@ -58,56 +53,66 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
        sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
 }
 
-static void jump_label_update(struct jump_label_key *key, int enable);
+static void jump_label_update(struct static_key *key, int enable);
 
-void jump_label_inc(struct jump_label_key *key)
+void static_key_slow_inc(struct static_key *key)
 {
        if (atomic_inc_not_zero(&key->enabled))
                return;
 
        jump_label_lock();
-       if (atomic_read(&key->enabled) == 0)
-               jump_label_update(key, JUMP_LABEL_ENABLE);
+       if (atomic_read(&key->enabled) == 0) {
+               if (!jump_label_get_branch_default(key))
+                       jump_label_update(key, JUMP_LABEL_ENABLE);
+               else
+                       jump_label_update(key, JUMP_LABEL_DISABLE);
+       }
        atomic_inc(&key->enabled);
        jump_label_unlock();
 }
-EXPORT_SYMBOL_GPL(jump_label_inc);
+EXPORT_SYMBOL_GPL(static_key_slow_inc);
 
-static void __jump_label_dec(struct jump_label_key *key,
+static void __static_key_slow_dec(struct static_key *key,
                unsigned long rate_limit, struct delayed_work *work)
 {
-       if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
+       if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
+               WARN(atomic_read(&key->enabled) < 0,
+                    "jump label: negative count!\n");
                return;
+       }
 
        if (rate_limit) {
                atomic_inc(&key->enabled);
                schedule_delayed_work(work, rate_limit);
-       } else
-               jump_label_update(key, JUMP_LABEL_DISABLE);
-
+       } else {
+               if (!jump_label_get_branch_default(key))
+                       jump_label_update(key, JUMP_LABEL_DISABLE);
+               else
+                       jump_label_update(key, JUMP_LABEL_ENABLE);
+       }
        jump_label_unlock();
 }
-EXPORT_SYMBOL_GPL(jump_label_dec);
 
 static void jump_label_update_timeout(struct work_struct *work)
 {
-       struct jump_label_key_deferred *key =
-               container_of(work, struct jump_label_key_deferred, work.work);
-       __jump_label_dec(&key->key, 0, NULL);
+       struct static_key_deferred *key =
+               container_of(work, struct static_key_deferred, work.work);
+       __static_key_slow_dec(&key->key, 0, NULL);
 }
 
-void jump_label_dec(struct jump_label_key *key)
+void static_key_slow_dec(struct static_key *key)
 {
-       __jump_label_dec(key, 0, NULL);
+       __static_key_slow_dec(key, 0, NULL);
 }
+EXPORT_SYMBOL_GPL(static_key_slow_dec);
 
-void jump_label_dec_deferred(struct jump_label_key_deferred *key)
+void static_key_slow_dec_deferred(struct static_key_deferred *key)
 {
-       __jump_label_dec(&key->key, key->timeout, &key->work);
+       __static_key_slow_dec(&key->key, key->timeout, &key->work);
 }
+EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
 
-
-void jump_label_rate_limit(struct jump_label_key_deferred *key,
+void jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
 {
        key->timeout = rl;
@@ -150,7 +155,7 @@ void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry
        arch_jump_label_transform(entry, type); 
 }
 
-static void __jump_label_update(struct jump_label_key *key,
+static void __jump_label_update(struct static_key *key,
                                struct jump_entry *entry,
                                struct jump_entry *stop, int enable)
 {
@@ -167,27 +172,40 @@ static void __jump_label_update(struct jump_label_key *key,
        }
 }
 
+static enum jump_label_type jump_label_type(struct static_key *key)
+{
+       bool true_branch = jump_label_get_branch_default(key);
+       bool state = static_key_enabled(key);
+
+       if ((!true_branch && state) || (true_branch && !state))
+               return JUMP_LABEL_ENABLE;
+
+       return JUMP_LABEL_DISABLE;
+}
+
 void __init jump_label_init(void)
 {
        struct jump_entry *iter_start = __start___jump_table;
        struct jump_entry *iter_stop = __stop___jump_table;
-       struct jump_label_key *key = NULL;
+       struct static_key *key = NULL;
        struct jump_entry *iter;
 
        jump_label_lock();
        jump_label_sort_entries(iter_start, iter_stop);
 
        for (iter = iter_start; iter < iter_stop; iter++) {
-               struct jump_label_key *iterk;
+               struct static_key *iterk;
 
-               iterk = (struct jump_label_key *)(unsigned long)iter->key;
-               arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
-                                                JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
+               iterk = (struct static_key *)(unsigned long)iter->key;
+               arch_jump_label_transform_static(iter, jump_label_type(iterk));
                if (iterk == key)
                        continue;
 
                key = iterk;
-               key->entries = iter;
+               /*
+                * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
+                */
+               *((unsigned long *)&key->entries) += (unsigned long)iter;
 #ifdef CONFIG_MODULES
                key->next = NULL;
 #endif
@@ -197,8 +215,8 @@ void __init jump_label_init(void)
 
 #ifdef CONFIG_MODULES
 
-struct jump_label_mod {
-       struct jump_label_mod *next;
+struct static_key_mod {
+       struct static_key_mod *next;
        struct jump_entry *entries;
        struct module *mod;
 };
@@ -218,9 +236,9 @@ static int __jump_label_mod_text_reserved(void *start, void *end)
                                start, end);
 }
 
-static void __jump_label_mod_update(struct jump_label_key *key, int enable)
+static void __jump_label_mod_update(struct static_key *key, int enable)
 {
-       struct jump_label_mod *mod = key->next;
+       struct static_key_mod *mod = key->next;
 
        while (mod) {
                struct module *m = mod->mod;
@@ -251,11 +269,7 @@ void jump_label_apply_nops(struct module *mod)
                return;
 
        for (iter = iter_start; iter < iter_stop; iter++) {
-               struct jump_label_key *iterk;
-
-               iterk = (struct jump_label_key *)(unsigned long)iter->key;
-               arch_jump_label_transform_static(iter, jump_label_enabled(iterk) ?
-                               JUMP_LABEL_ENABLE : JUMP_LABEL_DISABLE);
+               arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
        }
 }
 
@@ -264,8 +278,8 @@ static int jump_label_add_module(struct module *mod)
        struct jump_entry *iter_start = mod->jump_entries;
        struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
        struct jump_entry *iter;
-       struct jump_label_key *key = NULL;
-       struct jump_label_mod *jlm;
+       struct static_key *key = NULL;
+       struct static_key_mod *jlm;
 
        /* if the module doesn't have jump label entries, just return */
        if (iter_start == iter_stop)
@@ -274,28 +288,30 @@ static int jump_label_add_module(struct module *mod)
        jump_label_sort_entries(iter_start, iter_stop);
 
        for (iter = iter_start; iter < iter_stop; iter++) {
-               if (iter->key == (jump_label_t)(unsigned long)key)
-                       continue;
+               struct static_key *iterk;
 
-               key = (struct jump_label_key *)(unsigned long)iter->key;
+               iterk = (struct static_key *)(unsigned long)iter->key;
+               if (iterk == key)
+                       continue;
 
+               key = iterk;
                if (__module_address(iter->key) == mod) {
-                       atomic_set(&key->enabled, 0);
-                       key->entries = iter;
+                       /*
+                        * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
+                        */
+                       *((unsigned long *)&key->entries) += (unsigned long)iter;
                        key->next = NULL;
                        continue;
                }
-
-               jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL);
+               jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
                if (!jlm)
                        return -ENOMEM;
-
                jlm->mod = mod;
                jlm->entries = iter;
                jlm->next = key->next;
                key->next = jlm;
 
-               if (jump_label_enabled(key))
+               if (jump_label_type(key) == JUMP_LABEL_ENABLE)
                        __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
        }
 
@@ -307,14 +323,14 @@ static void jump_label_del_module(struct module *mod)
        struct jump_entry *iter_start = mod->jump_entries;
        struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
        struct jump_entry *iter;
-       struct jump_label_key *key = NULL;
-       struct jump_label_mod *jlm, **prev;
+       struct static_key *key = NULL;
+       struct static_key_mod *jlm, **prev;
 
        for (iter = iter_start; iter < iter_stop; iter++) {
                if (iter->key == (jump_label_t)(unsigned long)key)
                        continue;
 
-               key = (struct jump_label_key *)(unsigned long)iter->key;
+               key = (struct static_key *)(unsigned long)iter->key;
 
                if (__module_address(iter->key) == mod)
                        continue;
@@ -416,12 +432,13 @@ int jump_label_text_reserved(void *start, void *end)
        return ret;
 }
 
-static void jump_label_update(struct jump_label_key *key, int enable)
+static void jump_label_update(struct static_key *key, int enable)
 {
-       struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
+       struct jump_entry *stop = __stop___jump_table;
+       struct jump_entry *entry = jump_label_get_entries(key);
 
 #ifdef CONFIG_MODULES
-       struct module *mod = __module_address((jump_label_t)key);
+       struct module *mod = __module_address((unsigned long)key);
 
        __jump_label_mod_update(key, enable);
 
index 32690a0..0b3ea2c 100644 (file)
@@ -44,6 +44,9 @@
 
 #include <asm/uaccess.h>
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/printk.h>
+
 /*
  * Architectures can override it:
  */
@@ -542,6 +545,8 @@ MODULE_PARM_DESC(ignore_loglevel, "ignore loglevel setting, to"
 static void _call_console_drivers(unsigned start,
                                unsigned end, int msg_log_level)
 {
+       trace_console(&LOG_BUF(0), start, end, log_buf_len);
+
        if ((msg_log_level < console_loglevel || ignore_loglevel) &&
                        console_drivers && start != end) {
                if ((start & LOG_BUF_MASK) > (end & LOG_BUF_MASK)) {
index b342f57..6c41ba4 100644 (file)
@@ -162,13 +162,13 @@ static int sched_feat_show(struct seq_file *m, void *v)
 
 #ifdef HAVE_JUMP_LABEL
 
-#define jump_label_key__true  jump_label_key_enabled
-#define jump_label_key__false jump_label_key_disabled
+#define jump_label_key__true  STATIC_KEY_INIT_TRUE
+#define jump_label_key__false STATIC_KEY_INIT_FALSE
 
 #define SCHED_FEAT(name, enabled)      \
        jump_label_key__##enabled ,
 
-struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
+struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 #include "features.h"
 };
 
@@ -176,14 +176,14 @@ struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR] = {
 
 static void sched_feat_disable(int i)
 {
-       if (jump_label_enabled(&sched_feat_keys[i]))
-               jump_label_dec(&sched_feat_keys[i]);
+       if (static_key_enabled(&sched_feat_keys[i]))
+               static_key_slow_dec(&sched_feat_keys[i]);
 }
 
 static void sched_feat_enable(int i)
 {
-       if (!jump_label_enabled(&sched_feat_keys[i]))
-               jump_label_inc(&sched_feat_keys[i]);
+       if (!static_key_enabled(&sched_feat_keys[i]))
+               static_key_slow_inc(&sched_feat_keys[i]);
 }
 #else
 static void sched_feat_disable(int i) { };
@@ -894,7 +894,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta)
        delta -= irq_delta;
 #endif
 #ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
-       if (static_branch((&paravirt_steal_rq_enabled))) {
+       if (static_key_false((&paravirt_steal_rq_enabled))) {
                u64 st;
 
                steal = paravirt_steal_clock(cpu_of(rq));
@@ -2755,7 +2755,7 @@ void account_idle_time(cputime_t cputime)
 static __always_inline bool steal_account_process_tick(void)
 {
 #ifdef CONFIG_PARAVIRT
-       if (static_branch(&paravirt_steal_enabled)) {
+       if (static_key_false(&paravirt_steal_enabled)) {
                u64 steal, st = 0;
 
                steal = paravirt_steal_clock(smp_processor_id());
index aca16b8..fd974fa 100644 (file)
@@ -1401,20 +1401,20 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
 #ifdef CONFIG_CFS_BANDWIDTH
 
 #ifdef HAVE_JUMP_LABEL
-static struct jump_label_key __cfs_bandwidth_used;
+static struct static_key __cfs_bandwidth_used;
 
 static inline bool cfs_bandwidth_used(void)
 {
-       return static_branch(&__cfs_bandwidth_used);
+       return static_key_false(&__cfs_bandwidth_used);
 }
 
 void account_cfs_bandwidth_used(int enabled, int was_enabled)
 {
        /* only need to count groups transitioning between enabled/!enabled */
        if (enabled && !was_enabled)
-               jump_label_inc(&__cfs_bandwidth_used);
+               static_key_slow_inc(&__cfs_bandwidth_used);
        else if (!enabled && was_enabled)
-               jump_label_dec(&__cfs_bandwidth_used);
+               static_key_slow_dec(&__cfs_bandwidth_used);
 }
 #else /* HAVE_JUMP_LABEL */
 static bool cfs_bandwidth_used(void)
index 98c0c26..b4cd6d8 100644 (file)
@@ -611,7 +611,7 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
  * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
  */
 #ifdef CONFIG_SCHED_DEBUG
-# include <linux/jump_label.h>
+# include <linux/static_key.h>
 # define const_debug __read_mostly
 #else
 # define const_debug const
@@ -630,18 +630,18 @@ enum {
 #undef SCHED_FEAT
 
 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
-static __always_inline bool static_branch__true(struct jump_label_key *key)
+static __always_inline bool static_branch__true(struct static_key *key)
 {
-       return likely(static_branch(key)); /* Not out of line branch. */
+       return static_key_true(key); /* Not out of line branch. */
 }
 
-static __always_inline bool static_branch__false(struct jump_label_key *key)
+static __always_inline bool static_branch__false(struct static_key *key)
 {
-       return unlikely(static_branch(key)); /* Out of line branch. */
+       return static_key_false(key); /* Out of line branch. */
 }
 
 #define SCHED_FEAT(name, enabled)                                      \
-static __always_inline bool static_branch_##name(struct jump_label_key *key) \
+static __always_inline bool static_branch_##name(struct static_key *key) \
 {                                                                      \
        return static_branch__##enabled(key);                           \
 }
@@ -650,7 +650,7 @@ static __always_inline bool static_branch_##name(struct jump_label_key *key) \
 
 #undef SCHED_FEAT
 
-extern struct jump_label_key sched_feat_keys[__SCHED_FEAT_NR];
+extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
 #define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
 #else /* !(SCHED_DEBUG && HAVE_JUMP_LABEL) */
 #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
index c73c428..8511e39 100644 (file)
@@ -1054,13 +1054,13 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
        struct sigpending *pending;
        struct sigqueue *q;
        int override_rlimit;
-
-       trace_signal_generate(sig, info, t);
+       int ret = 0, result;
 
        assert_spin_locked(&t->sighand->siglock);
 
+       result = TRACE_SIGNAL_IGNORED;
        if (!prepare_signal(sig, t, from_ancestor_ns))
-               return 0;
+               goto ret;
 
        pending = group ? &t->signal->shared_pending : &t->pending;
        /*
@@ -1068,8 +1068,11 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
         * exactly one non-rt signal, so that we can get more
         * detailed information about the cause of the signal.
         */
+       result = TRACE_SIGNAL_ALREADY_PENDING;
        if (legacy_queue(pending, sig))
-               return 0;
+               goto ret;
+
+       result = TRACE_SIGNAL_DELIVERED;
        /*
         * fast-pathed signals for kernel-internal things like SIGSTOP
         * or SIGKILL.
@@ -1127,14 +1130,15 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
                         * signal was rt and sent by user using something
                         * other than kill().
                         */
-                       trace_signal_overflow_fail(sig, group, info);
-                       return -EAGAIN;
+                       result = TRACE_SIGNAL_OVERFLOW_FAIL;
+                       ret = -EAGAIN;
+                       goto ret;
                } else {
                        /*
                         * This is a silent loss of information.  We still
                         * send the signal, but the *info bits are lost.
                         */
-                       trace_signal_lose_info(sig, group, info);
+                       result = TRACE_SIGNAL_LOSE_INFO;
                }
        }
 
@@ -1142,7 +1146,9 @@ out_set:
        signalfd_notify(t, sig);
        sigaddset(&pending->signal, sig);
        complete_signal(sig, t, group);
-       return 0;
+ret:
+       trace_signal_generate(sig, info, t, group, result);
+       return ret;
 }
 
 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
@@ -1585,7 +1591,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
        int sig = q->info.si_signo;
        struct sigpending *pending;
        unsigned long flags;
-       int ret;
+       int ret, result;
 
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
 
@@ -1594,6 +1600,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
                goto ret;
 
        ret = 1; /* the signal is ignored */
+       result = TRACE_SIGNAL_IGNORED;
        if (!prepare_signal(sig, t, 0))
                goto out;
 
@@ -1605,6 +1612,7 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
                 */
                BUG_ON(q->info.si_code != SI_TIMER);
                q->info.si_overrun++;
+               result = TRACE_SIGNAL_ALREADY_PENDING;
                goto out;
        }
        q->info.si_overrun = 0;
@@ -1614,7 +1622,9 @@ int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
        list_add_tail(&q->list, &pending->list);
        sigaddset(&pending->signal, sig);
        complete_signal(sig, t, group);
+       result = TRACE_SIGNAL_DELIVERED;
 out:
+       trace_signal_generate(sig, &q->info, t, group, result);
        unlock_task_sighand(t, &flags);
 ret:
        return ret;
index 4eb3a0f..06d4099 100644 (file)
@@ -385,6 +385,12 @@ void raise_softirq(unsigned int nr)
        local_irq_restore(flags);
 }
 
+void __raise_softirq_irqoff(unsigned int nr)
+{
+       trace_softirq_raise(nr);
+       or_softirq_pending(1UL << nr);
+}
+
 void open_softirq(int nr, void (*action)(struct softirq_action *))
 {
        softirq_vec[nr].action = action;
index 683d559..867bd1d 100644 (file)
@@ -62,6 +62,8 @@
 #define FTRACE_HASH_DEFAULT_BITS 10
 #define FTRACE_HASH_MAX_BITS 12
 
+#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
+
 /* ftrace_enabled is a method to turn ftrace on or off */
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
@@ -89,12 +91,14 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
 };
 
 static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
+static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
 static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
 static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
 static struct ftrace_ops global_ops;
+static struct ftrace_ops control_ops;
 
 static void
 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
@@ -168,6 +172,32 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
 }
 #endif
 
+static void control_ops_disable_all(struct ftrace_ops *ops)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               *per_cpu_ptr(ops->disabled, cpu) = 1;
+}
+
+static int control_ops_alloc(struct ftrace_ops *ops)
+{
+       int __percpu *disabled;
+
+       disabled = alloc_percpu(int);
+       if (!disabled)
+               return -ENOMEM;
+
+       ops->disabled = disabled;
+       control_ops_disable_all(ops);
+       return 0;
+}
+
+static void control_ops_free(struct ftrace_ops *ops)
+{
+       free_percpu(ops->disabled);
+}
+
 static void update_global_ops(void)
 {
        ftrace_func_t func;
@@ -259,6 +289,26 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
        return 0;
 }
 
+static void add_ftrace_list_ops(struct ftrace_ops **list,
+                               struct ftrace_ops *main_ops,
+                               struct ftrace_ops *ops)
+{
+       int first = *list == &ftrace_list_end;
+       add_ftrace_ops(list, ops);
+       if (first)
+               add_ftrace_ops(&ftrace_ops_list, main_ops);
+}
+
+static int remove_ftrace_list_ops(struct ftrace_ops **list,
+                                 struct ftrace_ops *main_ops,
+                                 struct ftrace_ops *ops)
+{
+       int ret = remove_ftrace_ops(list, ops);
+       if (!ret && *list == &ftrace_list_end)
+               ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
+       return ret;
+}
+
 static int __register_ftrace_function(struct ftrace_ops *ops)
 {
        if (ftrace_disabled)
@@ -270,15 +320,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
        if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
                return -EBUSY;
 
+       /* We don't support both control and global flags set. */
+       if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
+               return -EINVAL;
+
        if (!core_kernel_data((unsigned long)ops))
                ops->flags |= FTRACE_OPS_FL_DYNAMIC;
 
        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               int first = ftrace_global_list == &ftrace_list_end;
-               add_ftrace_ops(&ftrace_global_list, ops);
+               add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
                ops->flags |= FTRACE_OPS_FL_ENABLED;
-               if (first)
-                       add_ftrace_ops(&ftrace_ops_list, &global_ops);
+       } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+               if (control_ops_alloc(ops))
+                       return -ENOMEM;
+               add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
        } else
                add_ftrace_ops(&ftrace_ops_list, ops);
 
@@ -302,11 +357,23 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
                return -EINVAL;
 
        if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
-               ret = remove_ftrace_ops(&ftrace_global_list, ops);
-               if (!ret && ftrace_global_list == &ftrace_list_end)
-                       ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
+               ret = remove_ftrace_list_ops(&ftrace_global_list,
+                                            &global_ops, ops);
                if (!ret)
                        ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+       } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
+               ret = remove_ftrace_list_ops(&ftrace_control_list,
+                                            &control_ops, ops);
+               if (!ret) {
+                       /*
+                        * The ftrace_ops is now removed from the list,
+                        * so there'll be no new users. We must ensure
+                        * all current users are done before we free
+                        * the control data.
+                        */
+                       synchronize_sched();
+                       control_ops_free(ops);
+               }
        } else
                ret = remove_ftrace_ops(&ftrace_ops_list, ops);
 
@@ -1119,6 +1186,12 @@ static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
        call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
 }
 
+void ftrace_free_filter(struct ftrace_ops *ops)
+{
+       free_ftrace_hash(ops->filter_hash);
+       free_ftrace_hash(ops->notrace_hash);
+}
+
 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
 {
        struct ftrace_hash *hash;
@@ -1129,7 +1202,7 @@ static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
                return NULL;
 
        size = 1 << size_bits;
-       hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
+       hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
 
        if (!hash->buckets) {
                kfree(hash);
@@ -3146,8 +3219,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
        mutex_lock(&ftrace_regex_lock);
        if (reset)
                ftrace_filter_reset(hash);
-       if (buf)
-               ftrace_match_records(hash, buf, len);
+       if (buf && !ftrace_match_records(hash, buf, len)) {
+               ret = -EINVAL;
+               goto out_regex_unlock;
+       }
 
        mutex_lock(&ftrace_lock);
        ret = ftrace_hash_move(ops, enable, orig_hash, hash);
@@ -3157,6 +3232,7 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
 
        mutex_unlock(&ftrace_lock);
 
+ out_regex_unlock:
        mutex_unlock(&ftrace_regex_lock);
 
        free_ftrace_hash(hash);
@@ -3173,10 +3249,10 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
  * Filters denote which functions should be enabled when tracing is enabled.
  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
  */
-void ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
+int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
                       int len, int reset)
 {
-       ftrace_set_regex(ops, buf, len, reset, 1);
+       return ftrace_set_regex(ops, buf, len, reset, 1);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_filter);
 
@@ -3191,10 +3267,10 @@ EXPORT_SYMBOL_GPL(ftrace_set_filter);
  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
  * for tracing.
  */
-void ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
+int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
                        int len, int reset)
 {
-       ftrace_set_regex(ops, buf, len, reset, 0);
+       return ftrace_set_regex(ops, buf, len, reset, 0);
 }
 EXPORT_SYMBOL_GPL(ftrace_set_notrace);
 /**
@@ -3871,6 +3947,36 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
 static void
+ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
+{
+       struct ftrace_ops *op;
+
+       if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
+               return;
+
+       /*
+        * Some of the ops may be dynamically allocated,
+        * they must be freed after a synchronize_sched().
+        */
+       preempt_disable_notrace();
+       trace_recursion_set(TRACE_CONTROL_BIT);
+       op = rcu_dereference_raw(ftrace_control_list);
+       while (op != &ftrace_list_end) {
+               if (!ftrace_function_local_disabled(op) &&
+                   ftrace_ops_test(op, ip))
+                       op->func(ip, parent_ip);
+
+               op = rcu_dereference_raw(op->next);
+       };
+       trace_recursion_clear(TRACE_CONTROL_BIT);
+       preempt_enable_notrace();
+}
+
+static struct ftrace_ops control_ops = {
+       .func = ftrace_ops_control_func,
+};
+
+static void
 ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
 {
        struct ftrace_ops *op;
index a3f1bc5..10d5503 100644 (file)
@@ -2764,12 +2764,12 @@ static const char readme_msg[] =
        "tracing mini-HOWTO:\n\n"
        "# mount -t debugfs nodev /sys/kernel/debug\n\n"
        "# cat /sys/kernel/debug/tracing/available_tracers\n"
-       "wakeup preemptirqsoff preemptoff irqsoff function sched_switch nop\n\n"
+       "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
        "# cat /sys/kernel/debug/tracing/current_tracer\n"
        "nop\n"
-       "# echo sched_switch > /sys/kernel/debug/tracing/current_tracer\n"
+       "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
        "# cat /sys/kernel/debug/tracing/current_tracer\n"
-       "sched_switch\n"
+       "wakeup\n"
        "# cat /sys/kernel/debug/tracing/trace_options\n"
        "noprint-parent nosym-offset nosym-addr noverbose\n"
        "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
index b93ecba..54faec7 100644 (file)
@@ -56,17 +56,23 @@ enum trace_type {
 #define F_STRUCT(args...)              args
 
 #undef FTRACE_ENTRY
-#define FTRACE_ENTRY(name, struct_name, id, tstruct, print)    \
-       struct struct_name {                                    \
-               struct trace_entry      ent;                    \
-               tstruct                                         \
+#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)    \
+       struct struct_name {                                            \
+               struct trace_entry      ent;                            \
+               tstruct                                                 \
        }
 
 #undef TP_ARGS
 #define TP_ARGS(args...)       args
 
 #undef FTRACE_ENTRY_DUP
-#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk)
+#define FTRACE_ENTRY_DUP(name, name_struct, id, tstruct, printk, filter)
+
+#undef FTRACE_ENTRY_REG
+#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print,        \
+                        filter, regfn) \
+       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
+                    filter)
 
 #include "trace_entries.h"
 
@@ -288,6 +294,8 @@ struct tracer {
 /* for function tracing recursion */
 #define TRACE_INTERNAL_BIT             (1<<11)
 #define TRACE_GLOBAL_BIT               (1<<12)
+#define TRACE_CONTROL_BIT              (1<<13)
+
 /*
  * Abuse of the trace_recursion.
  * As we need a way to maintain state if we are tracing the function
@@ -589,6 +597,8 @@ static inline int ftrace_trace_task(struct task_struct *task)
 static inline int ftrace_is_dead(void) { return 0; }
 #endif
 
+int ftrace_event_is_function(struct ftrace_event_call *call);
+
 /*
  * struct trace_parser - servers for reading the user input separated by spaces
  * @cont: set if the input is not complete - no final space char was found
@@ -766,9 +776,7 @@ struct filter_pred {
        u64                     val;
        struct regex            regex;
        unsigned short          *ops;
-#ifdef CONFIG_FTRACE_STARTUP_TEST
        struct ftrace_event_field *field;
-#endif
        int                     offset;
        int                     not;
        int                     op;
@@ -818,12 +826,22 @@ extern const char *__start___trace_bprintk_fmt[];
 extern const char *__stop___trace_bprintk_fmt[];
 
 #undef FTRACE_ENTRY
-#define FTRACE_ENTRY(call, struct_name, id, tstruct, print)            \
+#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter)    \
        extern struct ftrace_event_call                                 \
        __attribute__((__aligned__(4))) event_##call;
 #undef FTRACE_ENTRY_DUP
-#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print)                \
-       FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print))
+#define FTRACE_ENTRY_DUP(call, struct_name, id, tstruct, print, filter)        \
+       FTRACE_ENTRY(call, struct_name, id, PARAMS(tstruct), PARAMS(print), \
+                    filter)
 #include "trace_entries.h"
 
+#ifdef CONFIG_PERF_EVENTS
+#ifdef CONFIG_FUNCTION_TRACER
+int perf_ftrace_event_register(struct ftrace_event_call *call,
+                              enum trace_reg type, void *data);
+#else
+#define perf_ftrace_event_register NULL
+#endif /* CONFIG_FUNCTION_TRACER */
+#endif /* CONFIG_PERF_EVENTS */
+
 #endif /* _LINUX_KERNEL_TRACE_H */
index 9336590..d91eb05 100644 (file)
@@ -55,7 +55,7 @@
 /*
  * Function trace entry - function address and parent function address:
  */
-FTRACE_ENTRY(function, ftrace_entry,
+FTRACE_ENTRY_REG(function, ftrace_entry,
 
        TRACE_FN,
 
@@ -64,7 +64,11 @@ FTRACE_ENTRY(function, ftrace_entry,
                __field(        unsigned long,  parent_ip       )
        ),
 
-       F_printk(" %lx <-- %lx", __entry->ip, __entry->parent_ip)
+       F_printk(" %lx <-- %lx", __entry->ip, __entry->parent_ip),
+
+       FILTER_TRACE_FN,
+
+       perf_ftrace_event_register
 );
 
 /* Function call entry */
@@ -78,7 +82,9 @@ FTRACE_ENTRY(funcgraph_entry, ftrace_graph_ent_entry,
                __field_desc(   int,            graph_ent,      depth           )
        ),
 
-       F_printk("--> %lx (%d)", __entry->func, __entry->depth)
+       F_printk("--> %lx (%d)", __entry->func, __entry->depth),
+
+       FILTER_OTHER
 );
 
 /* Function return entry */
@@ -98,7 +104,9 @@ FTRACE_ENTRY(funcgraph_exit, ftrace_graph_ret_entry,
        F_printk("<-- %lx (%d) (start: %llx  end: %llx) over: %d",
                 __entry->func, __entry->depth,
                 __entry->calltime, __entry->rettime,
-                __entry->depth)
+                __entry->depth),
+
+       FILTER_OTHER
 );
 
 /*
@@ -127,8 +135,9 @@ FTRACE_ENTRY(context_switch, ctx_switch_entry,
        F_printk("%u:%u:%u  ==> %u:%u:%u [%03u]",
                 __entry->prev_pid, __entry->prev_prio, __entry->prev_state,
                 __entry->next_pid, __entry->next_prio, __entry->next_state,
-                __entry->next_cpu
-               )
+                __entry->next_cpu),
+
+       FILTER_OTHER
 );
 
 /*
@@ -146,8 +155,9 @@ FTRACE_ENTRY_DUP(wakeup, ctx_switch_entry,
        F_printk("%u:%u:%u  ==+ %u:%u:%u [%03u]",
                 __entry->prev_pid, __entry->prev_prio, __entry->prev_state,
                 __entry->next_pid, __entry->next_prio, __entry->next_state,
-                __entry->next_cpu
-               )
+                __entry->next_cpu),
+
+       FILTER_OTHER
 );
 
 /*
@@ -169,7 +179,9 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
                 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
                 __entry->caller[0], __entry->caller[1], __entry->caller[2],
                 __entry->caller[3], __entry->caller[4], __entry->caller[5],
-                __entry->caller[6], __entry->caller[7])
+                __entry->caller[6], __entry->caller[7]),
+
+       FILTER_OTHER
 );
 
 FTRACE_ENTRY(user_stack, userstack_entry,
@@ -185,7 +197,9 @@ FTRACE_ENTRY(user_stack, userstack_entry,
                 "\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n\t=> (%08lx)\n",
                 __entry->caller[0], __entry->caller[1], __entry->caller[2],
                 __entry->caller[3], __entry->caller[4], __entry->caller[5],
-                __entry->caller[6], __entry->caller[7])
+                __entry->caller[6], __entry->caller[7]),
+
+       FILTER_OTHER
 );
 
 /*
@@ -202,7 +216,9 @@ FTRACE_ENTRY(bprint, bprint_entry,
        ),
 
        F_printk("%08lx fmt:%p",
-                __entry->ip, __entry->fmt)
+                __entry->ip, __entry->fmt),
+
+       FILTER_OTHER
 );
 
 FTRACE_ENTRY(print, print_entry,
@@ -215,7 +231,9 @@ FTRACE_ENTRY(print, print_entry,
        ),
 
        F_printk("%08lx %s",
-                __entry->ip, __entry->buf)
+                __entry->ip, __entry->buf),
+
+       FILTER_OTHER
 );
 
 FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw,
@@ -234,7 +252,9 @@ FTRACE_ENTRY(mmiotrace_rw, trace_mmiotrace_rw,
 
        F_printk("%lx %lx %lx %d %x %x",
                 (unsigned long)__entry->phys, __entry->value, __entry->pc,
-                __entry->map_id, __entry->opcode, __entry->width)
+                __entry->map_id, __entry->opcode, __entry->width),
+
+       FILTER_OTHER
 );
 
 FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map,
@@ -252,7 +272,9 @@ FTRACE_ENTRY(mmiotrace_map, trace_mmiotrace_map,
 
        F_printk("%lx %lx %lx %d %x",
                 (unsigned long)__entry->phys, __entry->virt, __entry->len,
-                __entry->map_id, __entry->opcode)
+                __entry->map_id, __entry->opcode),
+
+       FILTER_OTHER
 );
 
 
@@ -272,6 +294,8 @@ FTRACE_ENTRY(branch, trace_branch,
 
        F_printk("%u:%s:%s (%u)",
                 __entry->line,
-                __entry->func, __entry->file, __entry->correct)
+                __entry->func, __entry->file, __entry->correct),
+
+       FILTER_OTHER
 );
 
index 19a359d..fee3752 100644 (file)
@@ -24,6 +24,11 @@ static int   total_ref_count;
 static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
                                 struct perf_event *p_event)
 {
+       /* The ftrace function trace is allowed only for root. */
+       if (ftrace_event_is_function(tp_event) &&
+           perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
        /* No tracing, just counting, so no obvious leak */
        if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
                return 0;
@@ -44,23 +49,17 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
        return 0;
 }
 
-static int perf_trace_event_init(struct ftrace_event_call *tp_event,
-                                struct perf_event *p_event)
+static int perf_trace_event_reg(struct ftrace_event_call *tp_event,
+                               struct perf_event *p_event)
 {
        struct hlist_head __percpu *list;
-       int ret;
+       int ret = -ENOMEM;
        int cpu;
 
-       ret = perf_trace_event_perm(tp_event, p_event);
-       if (ret)
-               return ret;
-
        p_event->tp_event = tp_event;
        if (tp_event->perf_refcount++ > 0)
                return 0;
 
-       ret = -ENOMEM;
-
        list = alloc_percpu(struct hlist_head);
        if (!list)
                goto fail;
@@ -83,7 +82,7 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
                }
        }
 
-       ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
+       ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
        if (ret)
                goto fail;
 
@@ -108,6 +107,69 @@ fail:
        return ret;
 }
 
+static void perf_trace_event_unreg(struct perf_event *p_event)
+{
+       struct ftrace_event_call *tp_event = p_event->tp_event;
+       int i;
+
+       if (--tp_event->perf_refcount > 0)
+               goto out;
+
+       tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
+
+       /*
+        * Ensure our callback won't be called anymore. The buffers
+        * will be freed after that.
+        */
+       tracepoint_synchronize_unregister();
+
+       free_percpu(tp_event->perf_events);
+       tp_event->perf_events = NULL;
+
+       if (!--total_ref_count) {
+               for (i = 0; i < PERF_NR_CONTEXTS; i++) {
+                       free_percpu(perf_trace_buf[i]);
+                       perf_trace_buf[i] = NULL;
+               }
+       }
+out:
+       module_put(tp_event->mod);
+}
+
+static int perf_trace_event_open(struct perf_event *p_event)
+{
+       struct ftrace_event_call *tp_event = p_event->tp_event;
+       return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
+}
+
+static void perf_trace_event_close(struct perf_event *p_event)
+{
+       struct ftrace_event_call *tp_event = p_event->tp_event;
+       tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
+}
+
+static int perf_trace_event_init(struct ftrace_event_call *tp_event,
+                                struct perf_event *p_event)
+{
+       int ret;
+
+       ret = perf_trace_event_perm(tp_event, p_event);
+       if (ret)
+               return ret;
+
+       ret = perf_trace_event_reg(tp_event, p_event);
+       if (ret)
+               return ret;
+
+       ret = perf_trace_event_open(p_event);
+       if (ret) {
+               perf_trace_event_unreg(p_event);
+               return ret;
+       }
+
+       return 0;
+}
+
 int perf_trace_init(struct perf_event *p_event)
 {
        struct ftrace_event_call *tp_event;
@@ -130,6 +192,14 @@ int perf_trace_init(struct perf_event *p_event)
        return ret;
 }
 
+void perf_trace_destroy(struct perf_event *p_event)
+{
+       mutex_lock(&event_mutex);
+       perf_trace_event_close(p_event);
+       perf_trace_event_unreg(p_event);
+       mutex_unlock(&event_mutex);
+}
+
 int perf_trace_add(struct perf_event *p_event, int flags)
 {
        struct ftrace_event_call *tp_event = p_event->tp_event;
@@ -146,43 +216,14 @@ int perf_trace_add(struct perf_event *p_event, int flags)
        list = this_cpu_ptr(pcpu_list);
        hlist_add_head_rcu(&p_event->hlist_entry, list);
 
-       return 0;
+       return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
 }
 
 void perf_trace_del(struct perf_event *p_event, int flags)
 {
-       hlist_del_rcu(&p_event->hlist_entry);
-}
-
-void perf_trace_destroy(struct perf_event *p_event)
-{
        struct ftrace_event_call *tp_event = p_event->tp_event;
-       int i;
-
-       mutex_lock(&event_mutex);
-       if (--tp_event->perf_refcount > 0)
-               goto out;
-
-       tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
-
-       /*
-        * Ensure our callback won't be called anymore. The buffers
-        * will be freed after that.
-        */
-       tracepoint_synchronize_unregister();
-
-       free_percpu(tp_event->perf_events);
-       tp_event->perf_events = NULL;
-
-       if (!--total_ref_count) {
-               for (i = 0; i < PERF_NR_CONTEXTS; i++) {
-                       free_percpu(perf_trace_buf[i]);
-                       perf_trace_buf[i] = NULL;
-               }
-       }
-out:
-       module_put(tp_event->mod);
-       mutex_unlock(&event_mutex);
+       hlist_del_rcu(&p_event->hlist_entry);
+       tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
 }
 
 __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
@@ -214,3 +255,86 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
        return raw_data;
 }
 EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
+
+#ifdef CONFIG_FUNCTION_TRACER
+static void
+perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip)
+{
+       struct ftrace_entry *entry;
+       struct hlist_head *head;
+       struct pt_regs regs;
+       int rctx;
+
+#define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
+                   sizeof(u64)) - sizeof(u32))
+
+       BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
+
+       perf_fetch_caller_regs(&regs);
+
+       entry = perf_trace_buf_prepare(ENTRY_SIZE, TRACE_FN, NULL, &rctx);
+       if (!entry)
+               return;
+
+       entry->ip = ip;
+       entry->parent_ip = parent_ip;
+
+       head = this_cpu_ptr(event_function.perf_events);
+       perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0,
+                             1, &regs, head);
+
+#undef ENTRY_SIZE
+}
+
+static int perf_ftrace_function_register(struct perf_event *event)
+{
+       struct ftrace_ops *ops = &event->ftrace_ops;
+
+       ops->flags |= FTRACE_OPS_FL_CONTROL;
+       ops->func = perf_ftrace_function_call;
+       return register_ftrace_function(ops);
+}
+
+static int perf_ftrace_function_unregister(struct perf_event *event)
+{
+       struct ftrace_ops *ops = &event->ftrace_ops;
+       int ret = unregister_ftrace_function(ops);
+       ftrace_free_filter(ops);
+       return ret;
+}
+
+static void perf_ftrace_function_enable(struct perf_event *event)
+{
+       ftrace_function_local_enable(&event->ftrace_ops);
+}
+
+static void perf_ftrace_function_disable(struct perf_event *event)
+{
+       ftrace_function_local_disable(&event->ftrace_ops);
+}
+
+int perf_ftrace_event_register(struct ftrace_event_call *call,
+                              enum trace_reg type, void *data)
+{
+       switch (type) {
+       case TRACE_REG_REGISTER:
+       case TRACE_REG_UNREGISTER:
+               break;
+       case TRACE_REG_PERF_REGISTER:
+       case TRACE_REG_PERF_UNREGISTER:
+               return 0;
+       case TRACE_REG_PERF_OPEN:
+               return perf_ftrace_function_register(data);
+       case TRACE_REG_PERF_CLOSE:
+               return perf_ftrace_function_unregister(data);
+       case TRACE_REG_PERF_ADD:
+               perf_ftrace_function_enable(data);
+               return 0;
+       case TRACE_REG_PERF_DEL:
+               perf_ftrace_function_disable(data);
+               return 0;
+       }
+
+       return -EINVAL;
+}
+#endif /* CONFIG_FUNCTION_TRACER */
index c212a7f..079a93a 100644 (file)
@@ -147,7 +147,8 @@ int trace_event_raw_init(struct ftrace_event_call *call)
 }
 EXPORT_SYMBOL_GPL(trace_event_raw_init);
 
-int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
+int ftrace_event_reg(struct ftrace_event_call *call,
+                    enum trace_reg type, void *data)
 {
        switch (type) {
        case TRACE_REG_REGISTER:
@@ -170,6 +171,11 @@ int ftrace_event_reg(struct ftrace_event_call *call, enum trace_reg type)
                                            call->class->perf_probe,
                                            call);
                return 0;
+       case TRACE_REG_PERF_OPEN:
+       case TRACE_REG_PERF_CLOSE:
+       case TRACE_REG_PERF_ADD:
+       case TRACE_REG_PERF_DEL:
+               return 0;
 #endif
        }
        return 0;
@@ -209,7 +215,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
                                tracing_stop_cmdline_record();
                                call->flags &= ~TRACE_EVENT_FL_RECORDED_CMD;
                        }
-                       call->class->reg(call, TRACE_REG_UNREGISTER);
+                       call->class->reg(call, TRACE_REG_UNREGISTER, NULL);
                }
                break;
        case 1:
@@ -218,7 +224,7 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call,
                                tracing_start_cmdline_record();
                                call->flags |= TRACE_EVENT_FL_RECORDED_CMD;
                        }
-                       ret = call->class->reg(call, TRACE_REG_REGISTER);
+                       ret = call->class->reg(call, TRACE_REG_REGISTER, NULL);
                        if (ret) {
                                tracing_stop_cmdline_record();
                                pr_info("event trace: Could not enable event "
index 24aee71..431dba8 100644 (file)
@@ -81,6 +81,7 @@ enum {
        FILT_ERR_TOO_MANY_PREDS,
        FILT_ERR_MISSING_FIELD,
        FILT_ERR_INVALID_FILTER,
+       FILT_ERR_IP_FIELD_ONLY,
 };
 
 static char *err_text[] = {
@@ -96,6 +97,7 @@ static char *err_text[] = {
        "Too many terms in predicate expression",
        "Missing field name and/or value",
        "Meaningless filter expression",
+       "Only 'ip' field is supported for function trace",
 };
 
 struct opstack_op {
@@ -685,7 +687,7 @@ find_event_field(struct ftrace_event_call *call, char *name)
 
 static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
 {
-       stack->preds = kzalloc(sizeof(*stack->preds)*(n_preds + 1), GFP_KERNEL);
+       stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
        if (!stack->preds)
                return -ENOMEM;
        stack->index = n_preds;
@@ -826,8 +828,7 @@ static int __alloc_preds(struct event_filter *filter, int n_preds)
        if (filter->preds)
                __free_preds(filter);
 
-       filter->preds =
-               kzalloc(sizeof(*filter->preds) * n_preds, GFP_KERNEL);
+       filter->preds = kcalloc(n_preds, sizeof(*filter->preds), GFP_KERNEL);
 
        if (!filter->preds)
                return -ENOMEM;
@@ -900,6 +901,11 @@ int filter_assign_type(const char *type)
        return FILTER_OTHER;
 }
 
+static bool is_function_field(struct ftrace_event_field *field)
+{
+       return field->filter_type == FILTER_TRACE_FN;
+}
+
 static bool is_string_field(struct ftrace_event_field *field)
 {
        return field->filter_type == FILTER_DYN_STRING ||
@@ -987,6 +993,11 @@ static int init_pred(struct filter_parse_state *ps,
                        fn = filter_pred_strloc;
                else
                        fn = filter_pred_pchar;
+       } else if (is_function_field(field)) {
+               if (strcmp(field->name, "ip")) {
+                       parse_error(ps, FILT_ERR_IP_FIELD_ONLY, 0);
+                       return -EINVAL;
+               }
        } else {
                if (field->is_signed)
                        ret = strict_strtoll(pred->regex.pattern, 0, &val);
@@ -1334,10 +1345,7 @@ static struct filter_pred *create_pred(struct filter_parse_state *ps,
 
        strcpy(pred.regex.pattern, operand2);
        pred.regex.len = strlen(pred.regex.pattern);
-
-#ifdef CONFIG_FTRACE_STARTUP_TEST
        pred.field = field;
-#endif
        return init_pred(ps, field, &pred) ? NULL : &pred;
 }
 
@@ -1486,7 +1494,7 @@ static int fold_pred(struct filter_pred *preds, struct filter_pred *root)
        children = count_leafs(preds, &preds[root->left]);
        children += count_leafs(preds, &preds[root->right]);
 
-       root->ops = kzalloc(sizeof(*root->ops) * children, GFP_KERNEL);
+       root->ops = kcalloc(children, sizeof(*root->ops), GFP_KERNEL);
        if (!root->ops)
                return -ENOMEM;
 
@@ -1950,6 +1958,148 @@ void ftrace_profile_free_filter(struct perf_event *event)
        __free_filter(filter);
 }
 
+struct function_filter_data {
+       struct ftrace_ops *ops;
+       int first_filter;
+       int first_notrace;
+};
+
+#ifdef CONFIG_FUNCTION_TRACER
+static char **
+ftrace_function_filter_re(char *buf, int len, int *count)
+{
+       char *str, *sep, **re;
+
+       str = kstrndup(buf, len, GFP_KERNEL);
+       if (!str)
+               return NULL;
+
+       /*
+        * The argv_split function takes white space
+        * as a separator, so convert ',' into spaces.
+        */
+       while ((sep = strchr(str, ',')))
+               *sep = ' ';
+
+       re = argv_split(GFP_KERNEL, str, count);
+       kfree(str);
+       return re;
+}
+
+static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter,
+                                     int reset, char *re, int len)
+{
+       int ret;
+
+       if (filter)
+               ret = ftrace_set_filter(ops, re, len, reset);
+       else
+               ret = ftrace_set_notrace(ops, re, len, reset);
+
+       return ret;
+}
+
+static int __ftrace_function_set_filter(int filter, char *buf, int len,
+                                       struct function_filter_data *data)
+{
+       int i, re_cnt, ret;
+       int *reset;
+       char **re;
+
+       reset = filter ? &data->first_filter : &data->first_notrace;
+
+       /*
+        * The 'ip' field could have multiple filters set, separated
+        * either by space or comma. We first cut the filter and apply
+        * all pieces separatelly.
+        */
+       re = ftrace_function_filter_re(buf, len, &re_cnt);
+       if (!re)
+               return -EINVAL;
+
+       for (i = 0; i < re_cnt; i++) {
+               ret = ftrace_function_set_regexp(data->ops, filter, *reset,
+                                                re[i], strlen(re[i]));
+               if (ret)
+                       break;
+
+               if (*reset)
+                       *reset = 0;
+       }
+
+       argv_free(re);
+       return ret;
+}
+
+static int ftrace_function_check_pred(struct filter_pred *pred, int leaf)
+{
+       struct ftrace_event_field *field = pred->field;
+
+       if (leaf) {
+               /*
+                * Check the leaf predicate for function trace, verify:
+                *  - only '==' and '!=' is used
+                *  - the 'ip' field is used
+                */
+               if ((pred->op != OP_EQ) && (pred->op != OP_NE))
+                       return -EINVAL;
+
+               if (strcmp(field->name, "ip"))
+                       return -EINVAL;
+       } else {
+               /*
+                * Check the non leaf predicate for function trace, verify:
+                *  - only '||' is used
+               */
+               if (pred->op != OP_OR)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int ftrace_function_set_filter_cb(enum move_type move,
+                                        struct filter_pred *pred,
+                                        int *err, void *data)
+{
+       /* Checking the node is valid for function trace. */
+       if ((move != MOVE_DOWN) ||
+           (pred->left != FILTER_PRED_INVALID)) {
+               *err = ftrace_function_check_pred(pred, 0);
+       } else {
+               *err = ftrace_function_check_pred(pred, 1);
+               if (*err)
+                       return WALK_PRED_ABORT;
+
+               *err = __ftrace_function_set_filter(pred->op == OP_EQ,
+                                                   pred->regex.pattern,
+                                                   pred->regex.len,
+                                                   data);
+       }
+
+       return (*err) ? WALK_PRED_ABORT : WALK_PRED_DEFAULT;
+}
+
+static int ftrace_function_set_filter(struct perf_event *event,
+                                     struct event_filter *filter)
+{
+       struct function_filter_data data = {
+               .first_filter  = 1,
+               .first_notrace = 1,
+               .ops           = &event->ftrace_ops,
+       };
+
+       return walk_pred_tree(filter->preds, filter->root,
+                             ftrace_function_set_filter_cb, &data);
+}
+#else
+static int ftrace_function_set_filter(struct perf_event *event,
+                                     struct event_filter *filter)
+{
+       return -ENODEV;
+}
+#endif /* CONFIG_FUNCTION_TRACER */
+
 int ftrace_profile_set_filter(struct perf_event *event, int event_id,
                              char *filter_str)
 {
@@ -1970,9 +2120,16 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
                goto out_unlock;
 
        err = create_filter(call, filter_str, false, &filter);
-       if (!err)
-               event->filter = filter;
+       if (err)
+               goto free_filter;
+
+       if (ftrace_event_is_function(call))
+               err = ftrace_function_set_filter(event, filter);
        else
+               event->filter = filter;
+
+free_filter:
+       if (err || ftrace_event_is_function(call))
                __free_filter(filter);
 
 out_unlock:
index bbeec31..7b46c9b 100644 (file)
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM   ftrace
 
+/*
+ * The FTRACE_ENTRY_REG macro allows ftrace entry to define register
+ * function and thus become accesible via perf.
+ */
+#undef FTRACE_ENTRY_REG
+#define FTRACE_ENTRY_REG(name, struct_name, id, tstruct, print, \
+                        filter, regfn) \
+       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
+                    filter)
+
 /* not needed for this file */
 #undef __field_struct
 #define __field_struct(type, item)
 #define F_printk(fmt, args...) fmt, args
 
 #undef FTRACE_ENTRY
-#define FTRACE_ENTRY(name, struct_name, id, tstruct, print)    \
-struct ____ftrace_##name {                                     \
-       tstruct                                                 \
-};                                                             \
-static void __always_unused ____ftrace_check_##name(void)      \
-{                                                              \
-       struct ____ftrace_##name *__entry = NULL;               \
-                                                               \
-       /* force compile-time check on F_printk() */            \
-       printk(print);                                          \
+#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)    \
+struct ____ftrace_##name {                                             \
+       tstruct                                                         \
+};                                                                     \
+static void __always_unused ____ftrace_check_##name(void)              \
+{                                                                      \
+       struct ____ftrace_##name *__entry = NULL;                       \
+                                                                       \
+       /* force compile-time check on F_printk() */                    \
+       printk(print);                                                  \
 }
 
 #undef FTRACE_ENTRY_DUP
-#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print)        \
-       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print))
+#define FTRACE_ENTRY_DUP(name, struct_name, id, tstruct, print, filter)        \
+       FTRACE_ENTRY(name, struct_name, id, PARAMS(tstruct), PARAMS(print), \
+                    filter)
 
 #include "trace_entries.h"
 
@@ -67,7 +78,7 @@ static void __always_unused ____ftrace_check_##name(void)     \
        ret = trace_define_field(event_call, #type, #item,              \
                                 offsetof(typeof(field), item),         \
                                 sizeof(field.item),                    \
-                                is_signed_type(type), FILTER_OTHER);   \
+                                is_signed_type(type), filter_type);    \
        if (ret)                                                        \
                return ret;
 
@@ -77,7 +88,7 @@ static void __always_unused ____ftrace_check_##name(void)     \
                                 offsetof(typeof(field),                \
                                          container.item),              \
                                 sizeof(field.container.item),          \
-                                is_signed_type(type), FILTER_OTHER);   \
+                                is_signed_type(type), filter_type);    \
        if (ret)                                                        \
                return ret;
 
@@ -91,7 +102,7 @@ static void __always_unused ____ftrace_check_##name(void)    \
                ret = trace_define_field(event_call, event_storage, #item, \
                                 offsetof(typeof(field), item),         \
                                 sizeof(field.item),                    \
-                                is_signed_type(type), FILTER_OTHER);   \
+                                is_signed_type(type), filter_type);    \
                mutex_unlock(&event_storage_mutex);                     \
                if (ret)                                                \
                        return ret;                                     \
@@ -104,7 +115,7 @@ static void __always_unused ____ftrace_check_##name(void)   \
                                 offsetof(typeof(field),                \
                                          container.item),              \
                                 sizeof(field.container.item),          \
-                                is_signed_type(type), FILTER_OTHER);   \
+                                is_signed_type(type), filter_type);    \
        if (ret)                                                        \
                return ret;
 
@@ -112,17 +123,18 @@ static void __always_unused ____ftrace_check_##name(void) \
 #define __dynamic_array(type, item)                                    \
        ret = trace_define_field(event_call, #type, #item,              \
                                 offsetof(typeof(field), item),         \
-                                0, is_signed_type(type), FILTER_OTHER);\
+                                0, is_signed_type(type), filter_type);\
        if (ret)                                                        \
                return ret;
 
 #undef FTRACE_ENTRY
-#define FTRACE_ENTRY(name, struct_name, id, tstruct, print)            \
+#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter)    \
 int                                                                    \
 ftrace_define_fields_##name(struct ftrace_event_call *event_call)      \
 {                                                                      \
        struct struct_name field;                                       \
        int ret;                                                        \
+       int filter_type = filter;                                       \
                                                                        \
        tstruct;                                                        \
                                                                        \
@@ -152,13 +164,15 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
 #undef F_printk
 #define F_printk(fmt, args...) #fmt ", "  __stringify(args)
 
-#undef FTRACE_ENTRY
-#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print)         \
+#undef FTRACE_ENTRY_REG
+#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
+                        regfn)                                         \
                                                                        \
 struct ftrace_event_class event_class_ftrace_##call = {                        \
        .system                 = __stringify(TRACE_SYSTEM),            \
        .define_fields          = ftrace_define_fields_##call,          \
        .fields                 = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
+       .reg                    = regfn,                                \
 };                                                                     \
                                                                        \
 struct ftrace_event_call __used event_##call = {                       \
@@ -170,4 +184,14 @@ struct ftrace_event_call __used event_##call = {                   \
 struct ftrace_event_call __used                                                \
 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call;
 
+#undef FTRACE_ENTRY
+#define FTRACE_ENTRY(call, struct_name, etype, tstruct, print, filter) \
+       FTRACE_ENTRY_REG(call, struct_name, etype,                      \
+                        PARAMS(tstruct), PARAMS(print), filter, NULL)
+
+int ftrace_event_is_function(struct ftrace_event_call *call)
+{
+       return call == &event_function;
+}
+
 #include "trace_entries.h"
index 00d527c..580a05e 100644 (file)
@@ -1892,7 +1892,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
 #endif /* CONFIG_PERF_EVENTS */
 
 static __kprobes
-int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
+int kprobe_register(struct ftrace_event_call *event,
+                   enum trace_reg type, void *data)
 {
        struct trace_probe *tp = (struct trace_probe *)event->data;
 
@@ -1909,6 +1910,11 @@ int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
        case TRACE_REG_PERF_UNREGISTER:
                disable_trace_probe(tp, TP_FLAG_PROFILE);
                return 0;
+       case TRACE_REG_PERF_OPEN:
+       case TRACE_REG_PERF_CLOSE:
+       case TRACE_REG_PERF_ADD:
+       case TRACE_REG_PERF_DEL:
+               return 0;
 #endif
        }
        return 0;
index 0d6ff35..c5a0187 100644 (file)
@@ -300,7 +300,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
        unsigned long mask;
        const char *str;
        const char *ret = p->buffer + p->len;
-       int i;
+       int i, first = 1;
 
        for (i = 0;  flag_array[i].name && flags; i++) {
 
@@ -310,14 +310,16 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
 
                str = flag_array[i].name;
                flags &= ~mask;
-               if (p->len && delim)
+               if (!first && delim)
                        trace_seq_puts(p, delim);
+               else
+                       first = 0;
                trace_seq_puts(p, str);
        }
 
        /* check for left over flags */
        if (flags) {
-               if (p->len && delim)
+               if (!first && delim)
                        trace_seq_puts(p, delim);
                trace_seq_printf(p, "0x%lx", flags);
        }
@@ -344,7 +346,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
                break;
        }
 
-       if (!p->len)
+       if (ret == (const char *)(p->buffer + p->len))
                trace_seq_printf(p, "0x%lx", val);
                
        trace_seq_putc(p, 0);
@@ -370,7 +372,7 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
                break;
        }
 
-       if (!p->len)
+       if (ret == (const char *)(p->buffer + p->len))
                trace_seq_printf(p, "0x%llx", val);
 
        trace_seq_putc(p, 0);
index cb65454..96fc733 100644 (file)
@@ -17,9 +17,9 @@ static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
 static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
 
 static int syscall_enter_register(struct ftrace_event_call *event,
-                                enum trace_reg type);
+                                enum trace_reg type, void *data);
 static int syscall_exit_register(struct ftrace_event_call *event,
-                                enum trace_reg type);
+                                enum trace_reg type, void *data);
 
 static int syscall_enter_define_fields(struct ftrace_event_call *call);
 static int syscall_exit_define_fields(struct ftrace_event_call *call);
@@ -468,8 +468,8 @@ int __init init_ftrace_syscalls(void)
        unsigned long addr;
        int i;
 
-       syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
-                                       NR_syscalls, GFP_KERNEL);
+       syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
+                                   GFP_KERNEL);
        if (!syscalls_metadata) {
                WARN_ON(1);
                return -ENOMEM;
@@ -649,7 +649,7 @@ void perf_sysexit_disable(struct ftrace_event_call *call)
 #endif /* CONFIG_PERF_EVENTS */
 
 static int syscall_enter_register(struct ftrace_event_call *event,
-                                enum trace_reg type)
+                                enum trace_reg type, void *data)
 {
        switch (type) {
        case TRACE_REG_REGISTER:
@@ -664,13 +664,18 @@ static int syscall_enter_register(struct ftrace_event_call *event,
        case TRACE_REG_PERF_UNREGISTER:
                perf_sysenter_disable(event);
                return 0;
+       case TRACE_REG_PERF_OPEN:
+       case TRACE_REG_PERF_CLOSE:
+       case TRACE_REG_PERF_ADD:
+       case TRACE_REG_PERF_DEL:
+               return 0;
 #endif
        }
        return 0;
 }
 
 static int syscall_exit_register(struct ftrace_event_call *event,
-                                enum trace_reg type)
+                                enum trace_reg type, void *data)
 {
        switch (type) {
        case TRACE_REG_REGISTER:
@@ -685,6 +690,11 @@ static int syscall_exit_register(struct ftrace_event_call *event,
        case TRACE_REG_PERF_UNREGISTER:
                perf_sysexit_disable(event);
                return 0;
+       case TRACE_REG_PERF_OPEN:
+       case TRACE_REG_PERF_CLOSE:
+       case TRACE_REG_PERF_ADD:
+       case TRACE_REG_PERF_DEL:
+               return 0;
 #endif
        }
        return 0;
index f1539de..d96ba22 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 
 extern struct tracepoint * const __start___tracepoints_ptrs[];
 extern struct tracepoint * const __stop___tracepoints_ptrs[];
@@ -256,9 +256,9 @@ static void set_tracepoint(struct tracepoint_entry **entry,
 {
        WARN_ON(strcmp((*entry)->name, elem->name) != 0);
 
-       if (elem->regfunc && !jump_label_enabled(&elem->key) && active)
+       if (elem->regfunc && !static_key_enabled(&elem->key) && active)
                elem->regfunc();
-       else if (elem->unregfunc && jump_label_enabled(&elem->key) && !active)
+       else if (elem->unregfunc && static_key_enabled(&elem->key) && !active)
                elem->unregfunc();
 
        /*
@@ -269,10 +269,10 @@ static void set_tracepoint(struct tracepoint_entry **entry,
         * is used.
         */
        rcu_assign_pointer(elem->funcs, (*entry)->funcs);
-       if (active && !jump_label_enabled(&elem->key))
-               jump_label_inc(&elem->key);
-       else if (!active && jump_label_enabled(&elem->key))
-               jump_label_dec(&elem->key);
+       if (active && !static_key_enabled(&elem->key))
+               static_key_slow_inc(&elem->key);
+       else if (!active && static_key_enabled(&elem->key))
+               static_key_slow_dec(&elem->key);
 }
 
 /*
@@ -283,11 +283,11 @@ static void set_tracepoint(struct tracepoint_entry **entry,
  */
 static void disable_tracepoint(struct tracepoint *elem)
 {
-       if (elem->unregfunc && jump_label_enabled(&elem->key))
+       if (elem->unregfunc && static_key_enabled(&elem->key))
                elem->unregfunc();
 
-       if (jump_label_enabled(&elem->key))
-               jump_label_dec(&elem->key);
+       if (static_key_enabled(&elem->key))
+               static_key_slow_dec(&elem->key);
        rcu_assign_pointer(elem->funcs, NULL);
 }
 
index d117262..14bc092 100644 (file)
@@ -3,12 +3,9 @@
  *
  * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc.
  *
- * this code detects hard lockups: incidents in where on a CPU
- * the kernel does not respond to anything except NMI.
- *
- * Note: Most of this code is borrowed heavily from softlockup.c,
- * so thanks to Ingo for the initial implementation.
- * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks
+ * Note: Most of this code is borrowed heavily from the original softlockup
+ * detector, so thanks to Ingo for the initial implementation.
+ * Some chunks also taken from the old x86-specific nmi watchdog code, thanks
  * to those contributors as well.
  */
 
@@ -117,9 +114,10 @@ static unsigned long get_sample_period(void)
 {
        /*
         * convert watchdog_thresh from seconds to ns
-        * the divide by 5 is to give hrtimer 5 chances to
-        * increment before the hardlockup detector generates
-        * a warning
+        * the divide by 5 is to give hrtimer several chances (two
+        * or three with the current relation between the soft
+        * and hard thresholds) to increment before the
+        * hardlockup detector generates a warning
         */
        return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
 }
@@ -336,9 +334,11 @@ static int watchdog(void *unused)
 
        set_current_state(TASK_INTERRUPTIBLE);
        /*
-        * Run briefly once per second to reset the softlockup timestamp.
-        * If this gets delayed for more than 60 seconds then the
-        * debug-printout triggers in watchdog_timer_fn().
+        * Run briefly (kicked by the hrtimer callback function) once every
+        * get_sample_period() seconds (4 seconds by default) to reset the
+        * softlockup timestamp. If this gets delayed for more than
+        * 2*watchdog_thresh seconds then the debug-printout triggers in
+        * watchdog_timer_fn().
         */
        while (!kthread_should_stop()) {
                __touch_watchdog();
index 8745ac7..9739c0b 100644 (file)
@@ -166,18 +166,21 @@ config LOCKUP_DETECTOR
          hard and soft lockups.
 
          Softlockups are bugs that cause the kernel to loop in kernel
-         mode for more than 60 seconds, without giving other tasks a
+         mode for more than 20 seconds, without giving other tasks a
          chance to run.  The current stack trace is displayed upon
          detection and the system will stay locked up.
 
          Hardlockups are bugs that cause the CPU to loop in kernel mode
-         for more than 60 seconds, without letting other interrupts have a
+         for more than 10 seconds, without letting other interrupts have a
          chance to run.  The current stack trace is displayed upon detection
          and the system will stay locked up.
 
          The overhead should be minimal.  A periodic hrtimer runs to
-         generate interrupts and kick the watchdog task every 10-12 seconds.
-         An NMI is generated every 60 seconds or so to check for hardlockups.
+         generate interrupts and kick the watchdog task every 4 seconds.
+         An NMI is generated every 10 seconds or so to check for hardlockups.
+
+         The frequency of hrtimer and NMI events and the soft and hard lockup
+         thresholds can be controlled through the sysctl watchdog_thresh.
 
 config HARDLOCKUP_DETECTOR
        def_bool LOCKUP_DETECTOR && PERF_EVENTS && HAVE_PERF_EVENTS_NMI && \
@@ -189,7 +192,8 @@ config BOOTPARAM_HARDLOCKUP_PANIC
        help
          Say Y here to enable the kernel to panic on "hard lockups",
          which are bugs that cause the kernel to loop in kernel
-         mode with interrupts disabled for more than 60 seconds.
+         mode with interrupts disabled for more than 10 seconds (configurable
+         using the watchdog_thresh sysctl).
 
          Say N if unsure.
 
@@ -206,8 +210,8 @@ config BOOTPARAM_SOFTLOCKUP_PANIC
        help
          Say Y here to enable the kernel to panic on "soft lockups",
          which are bugs that cause the kernel to loop in kernel
-         mode for more than 60 seconds, without giving other tasks a
-         chance to run.
+         mode for more than 20 seconds (configurable using the watchdog_thresh
+         sysctl), without giving other tasks a chance to run.
 
          The panic can be used in combination with panic_timeout,
          to cause the system to reboot automatically after a
index 6ca32f6..6982bfd 100644 (file)
 #include <linux/inetdevice.h>
 #include <linux/cpu_rmap.h>
 #include <linux/net_tstamp.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 #include <net/flow_keys.h>
 
 #include "net-sysfs.h"
@@ -1441,11 +1441,11 @@ int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
 }
 EXPORT_SYMBOL(call_netdevice_notifiers);
 
-static struct jump_label_key netstamp_needed __read_mostly;
+static struct static_key netstamp_needed __read_mostly;
 #ifdef HAVE_JUMP_LABEL
-/* We are not allowed to call jump_label_dec() from irq context
+/* We are not allowed to call static_key_slow_dec() from irq context
  * If net_disable_timestamp() is called from irq context, defer the
- * jump_label_dec() calls.
+ * static_key_slow_dec() calls.
  */
 static atomic_t netstamp_needed_deferred;
 #endif
@@ -1457,12 +1457,12 @@ void net_enable_timestamp(void)
 
        if (deferred) {
                while (--deferred)
-                       jump_label_dec(&netstamp_needed);
+                       static_key_slow_dec(&netstamp_needed);
                return;
        }
 #endif
        WARN_ON(in_interrupt());
-       jump_label_inc(&netstamp_needed);
+       static_key_slow_inc(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_enable_timestamp);
 
@@ -1474,19 +1474,19 @@ void net_disable_timestamp(void)
                return;
        }
 #endif
-       jump_label_dec(&netstamp_needed);
+       static_key_slow_dec(&netstamp_needed);
 }
 EXPORT_SYMBOL(net_disable_timestamp);
 
 static inline void net_timestamp_set(struct sk_buff *skb)
 {
        skb->tstamp.tv64 = 0;
-       if (static_branch(&netstamp_needed))
+       if (static_key_false(&netstamp_needed))
                __net_timestamp(skb);
 }
 
 #define net_timestamp_check(COND, SKB)                 \
-       if (static_branch(&netstamp_needed)) {          \
+       if (static_key_false(&netstamp_needed)) {               \
                if ((COND) && !(SKB)->tstamp.tv64)      \
                        __net_timestamp(SKB);           \
        }                                               \
@@ -2660,7 +2660,7 @@ EXPORT_SYMBOL(__skb_get_rxhash);
 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
 EXPORT_SYMBOL(rps_sock_flow_table);
 
-struct jump_label_key rps_needed __read_mostly;
+struct static_key rps_needed __read_mostly;
 
 static struct rps_dev_flow *
 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
@@ -2945,7 +2945,7 @@ int netif_rx(struct sk_buff *skb)
 
        trace_netif_rx(skb);
 #ifdef CONFIG_RPS
-       if (static_branch(&rps_needed)) {
+       if (static_key_false(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu;
 
@@ -3309,7 +3309,7 @@ int netif_receive_skb(struct sk_buff *skb)
                return NET_RX_SUCCESS;
 
 #ifdef CONFIG_RPS
-       if (static_branch(&rps_needed)) {
+       if (static_key_false(&rps_needed)) {
                struct rps_dev_flow voidflow, *rflow = &voidflow;
                int cpu, ret;
 
index a1727cd..4955862 100644 (file)
@@ -608,10 +608,10 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue,
        spin_unlock(&rps_map_lock);
 
        if (map)
-               jump_label_inc(&rps_needed);
+               static_key_slow_inc(&rps_needed);
        if (old_map) {
                kfree_rcu(old_map, rcu);
-               jump_label_dec(&rps_needed);
+               static_key_slow_dec(&rps_needed);
        }
        free_cpumask_var(mask);
        return len;
index 02f8dfe..95aff9c 100644 (file)
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/user_namespace.h>
-#include <linux/jump_label.h>
+#include <linux/static_key.h>
 #include <linux/memcontrol.h>
 
 #include <asm/uaccess.h>
@@ -184,7 +184,7 @@ void mem_cgroup_sockets_destroy(struct cgroup *cgrp, struct cgroup_subsys *ss)
 static struct lock_class_key af_family_keys[AF_MAX];
 static struct lock_class_key af_family_slock_keys[AF_MAX];
 
-struct jump_label_key memcg_socket_limit_enabled;
+struct static_key memcg_socket_limit_enabled;
 EXPORT_SYMBOL(memcg_socket_limit_enabled);
 
 /*
index d05559d..0c28508 100644 (file)
@@ -69,9 +69,9 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
                if (sock_table != orig_sock_table) {
                        rcu_assign_pointer(rps_sock_flow_table, sock_table);
                        if (sock_table)
-                               jump_label_inc(&rps_needed);
+                               static_key_slow_inc(&rps_needed);
                        if (orig_sock_table) {
-                               jump_label_dec(&rps_needed);
+                               static_key_slow_dec(&rps_needed);
                                synchronize_rcu();
                                vfree(orig_sock_table);
                        }
index 4997878..602fb30 100644 (file)
@@ -111,7 +111,7 @@ void tcp_destroy_cgroup(struct cgroup *cgrp, struct cgroup_subsys *ss)
        val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
 
        if (val != RESOURCE_MAX)
-               jump_label_dec(&memcg_socket_limit_enabled);
+               static_key_slow_dec(&memcg_socket_limit_enabled);
 }
 EXPORT_SYMBOL(tcp_destroy_cgroup);
 
@@ -143,9 +143,9 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
                                             net->ipv4.sysctl_tcp_mem[i]);
 
        if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX)
-               jump_label_dec(&memcg_socket_limit_enabled);
+               static_key_slow_dec(&memcg_socket_limit_enabled);
        else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX)
-               jump_label_inc(&memcg_socket_limit_enabled);
+               static_key_slow_inc(&memcg_socket_limit_enabled);
 
        return 0;
 }
index b4e8ff0..e1b7e05 100644 (file)
@@ -56,7 +56,7 @@ struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly;
 EXPORT_SYMBOL(nf_hooks);
 
 #if defined(CONFIG_JUMP_LABEL)
-struct jump_label_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
+struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
 EXPORT_SYMBOL(nf_hooks_needed);
 #endif
 
@@ -77,7 +77,7 @@ int nf_register_hook(struct nf_hook_ops *reg)
        list_add_rcu(&reg->list, elem->list.prev);
        mutex_unlock(&nf_hook_mutex);
 #if defined(CONFIG_JUMP_LABEL)
-       jump_label_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
+       static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
        return 0;
 }
@@ -89,7 +89,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg)
        list_del_rcu(&reg->list);
        mutex_unlock(&nf_hook_mutex);
 #if defined(CONFIG_JUMP_LABEL)
-       jump_label_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
+       static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
 #endif
        synchronize_net();
 }
index 4626a39..ca600e0 100644 (file)
@@ -1,3 +1,10 @@
+OUTPUT := ./
+ifeq ("$(origin O)", "command line")
+  ifneq ($(O),)
+       OUTPUT := $(O)/
+  endif
+endif
+
 MAN1_TXT= \
        $(filter-out $(addsuffix .txt, $(ARTICLES) $(SP_ARTICLES)), \
                $(wildcard perf-*.txt)) \
@@ -6,10 +13,11 @@ MAN5_TXT=
 MAN7_TXT=
 
 MAN_TXT = $(MAN1_TXT) $(MAN5_TXT) $(MAN7_TXT)
-MAN_XML=$(patsubst %.txt,%.xml,$(MAN_TXT))
-MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT))
+_MAN_XML=$(patsubst %.txt,%.xml,$(MAN_TXT))
+_MAN_HTML=$(patsubst %.txt,%.html,$(MAN_TXT))
 
-DOC_HTML=$(MAN_HTML)
+MAN_XML=$(addprefix $(OUTPUT),$(_MAN_XML))
+MAN_HTML=$(addprefix $(OUTPUT),$(_MAN_HTML))
 
 ARTICLES =
 # with their own formatting rules.
@@ -18,11 +26,17 @@ API_DOCS = $(patsubst %.txt,%,$(filter-out technical/api-index-skel.txt technica
 SP_ARTICLES += $(API_DOCS)
 SP_ARTICLES += technical/api-index
 
-DOC_HTML += $(patsubst %,%.html,$(ARTICLES) $(SP_ARTICLES))
+_DOC_HTML = $(_MAN_HTML)
+_DOC_HTML+=$(patsubst %,%.html,$(ARTICLES) $(SP_ARTICLES))
+DOC_HTML=$(addprefix $(OUTPUT),$(_DOC_HTML))
 
-DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT))
-DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT))
-DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT))
+_DOC_MAN1=$(patsubst %.txt,%.1,$(MAN1_TXT))
+_DOC_MAN5=$(patsubst %.txt,%.5,$(MAN5_TXT))
+_DOC_MAN7=$(patsubst %.txt,%.7,$(MAN7_TXT))
+
+DOC_MAN1=$(addprefix $(OUTPUT),$(_DOC_MAN1))
+DOC_MAN5=$(addprefix $(OUTPUT),$(_DOC_MAN5))
+DOC_MAN7=$(addprefix $(OUTPUT),$(_DOC_MAN7))
 
 # Make the path relative to DESTDIR, not prefix
 ifndef DESTDIR
@@ -150,9 +164,9 @@ man1: $(DOC_MAN1)
 man5: $(DOC_MAN5)
 man7: $(DOC_MAN7)
 
-info: perf.info perfman.info
+info: $(OUTPUT)perf.info $(OUTPUT)perfman.info
 
-pdf: user-manual.pdf
+pdf: $(OUTPUT)user-manual.pdf
 
 install: install-man
 
@@ -166,7 +180,7 @@ install-man: man
 
 install-info: info
        $(INSTALL) -d -m 755 $(DESTDIR)$(infodir)
-       $(INSTALL) -m 644 perf.info perfman.info $(DESTDIR)$(infodir)
+       $(INSTALL) -m 644 $(OUTPUT)perf.info $(OUTPUT)perfman.info $(DESTDIR)$(infodir)
        if test -r $(DESTDIR)$(infodir)/dir; then \
          $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perf.info ;\
          $(INSTALL_INFO) --info-dir=$(DESTDIR)$(infodir) perfman.info ;\
@@ -176,7 +190,7 @@ install-info: info
 
 install-pdf: pdf
        $(INSTALL) -d -m 755 $(DESTDIR)$(pdfdir)
-       $(INSTALL) -m 644 user-manual.pdf $(DESTDIR)$(pdfdir)
+       $(INSTALL) -m 644 $(OUTPUT)user-manual.pdf $(DESTDIR)$(pdfdir)
 
 #install-html: html
 #      '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir)
@@ -189,14 +203,14 @@ install-pdf: pdf
 #
 # Determine "include::" file references in asciidoc files.
 #
-doc.dep : $(wildcard *.txt) build-docdep.perl
+$(OUTPUT)doc.dep : $(wildcard *.txt) build-docdep.perl
        $(QUIET_GEN)$(RM) $@+ $@ && \
        $(PERL_PATH) ./build-docdep.perl >$@+ $(QUIET_STDERR) && \
        mv $@+ $@
 
--include doc.dep
+-include $(OUPTUT)doc.dep
 
-cmds_txt = cmds-ancillaryinterrogators.txt \
+_cmds_txt = cmds-ancillaryinterrogators.txt \
        cmds-ancillarymanipulators.txt \
        cmds-mainporcelain.txt \
        cmds-plumbinginterrogators.txt \
@@ -205,32 +219,36 @@ cmds_txt = cmds-ancillaryinterrogators.txt \
        cmds-synchelpers.txt \
        cmds-purehelpers.txt \
        cmds-foreignscminterface.txt
+cmds_txt=$(addprefix $(OUTPUT),$(_cmds_txt))
 
-$(cmds_txt): cmd-list.made
+$(cmds_txt): $(OUTPUT)cmd-list.made
 
-cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT)
+$(OUTPUT)cmd-list.made: cmd-list.perl ../command-list.txt $(MAN1_TXT)
        $(QUIET_GEN)$(RM) $@ && \
        $(PERL_PATH) ./cmd-list.perl ../command-list.txt $(QUIET_STDERR) && \
        date >$@
 
 clean:
-       $(RM) *.xml *.xml+ *.html *.html+ *.1 *.5 *.7
-       $(RM) *.texi *.texi+ *.texi++ perf.info perfman.info
-       $(RM) howto-index.txt howto/*.html doc.dep
-       $(RM) technical/api-*.html technical/api-index.txt
-       $(RM) $(cmds_txt) *.made
-
-$(MAN_HTML): %.html : %.txt
+       $(RM) $(MAN_XML) $(addsuffix +,$(MAN_XML))
+       $(RM) $(MAN_HTML) $(addsuffix +,$(MAN_HTML))
+       $(RM) $(DOC_HTML) $(DOC_MAN1) $(DOC_MAN5) $(DOC_MAN7)
+       $(RM) $(OUTPUT)*.texi $(OUTPUT)*.texi+ $(OUTPUT)*.texi++
+       $(RM) $(OUTPUT)perf.info $(OUTPUT)perfman.info
+       $(RM) $(OUTPUT)howto-index.txt $(OUTPUT)howto/*.html $(OUTPUT)doc.dep
+       $(RM) $(OUTPUT)technical/api-*.html $(OUTPUT)technical/api-index.txt
+       $(RM) $(cmds_txt) $(OUTPUT)*.made
+
+$(MAN_HTML): $(OUTPUT)%.html : %.txt
        $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
        $(ASCIIDOC) -b xhtml11 -d manpage -f asciidoc.conf \
                $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
        mv $@+ $@
 
-%.1 %.5 %.7 : %.xml
+$(OUTPUT)%.1 $(OUTPUT)%.5 $(OUTPUT)%.7 : $(OUTPUT)%.xml
        $(QUIET_XMLTO)$(RM) $@ && \
-       xmlto -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
+       xmlto -o $(OUTPUT) -m $(MANPAGE_XSL) $(XMLTO_EXTRA) man $<
 
-%.xml : %.txt
+$(OUTPUT)%.xml : %.txt
        $(QUIET_ASCIIDOC)$(RM) $@+ $@ && \
        $(ASCIIDOC) -b docbook -d manpage -f asciidoc.conf \
                $(ASCIIDOC_EXTRA) -aperf_version=$(PERF_VERSION) -o $@+ $< && \
@@ -239,25 +257,25 @@ $(MAN_HTML): %.html : %.txt
 XSLT = docbook.xsl
 XSLTOPTS = --xinclude --stringparam html.stylesheet docbook-xsl.css
 
-user-manual.html: user-manual.xml
+$(OUTPUT)user-manual.html: $(OUTPUT)user-manual.xml
        $(QUIET_XSLTPROC)xsltproc $(XSLTOPTS) -o $@ $(XSLT) $<
 
-perf.info: user-manual.texi
-       $(QUIET_MAKEINFO)$(MAKEINFO) --no-split -o $@ user-manual.texi
+$(OUTPUT)perf.info: $(OUTPUT)user-manual.texi
+       $(QUIET_MAKEINFO)$(MAKEINFO) --no-split -o $@ $(OUTPUT)user-manual.texi
 
-user-manual.texi: user-manual.xml
+$(OUTPUT)user-manual.texi: $(OUTPUT)user-manual.xml
        $(QUIET_DB2TEXI)$(RM) $@+ $@ && \
-       $(DOCBOOK2X_TEXI) user-manual.xml --encoding=UTF-8 --to-stdout >$@++ && \
+       $(DOCBOOK2X_TEXI) $(OUTPUT)user-manual.xml --encoding=UTF-8 --to-stdout >$@++ && \
        $(PERL_PATH) fix-texi.perl <$@++ >$@+ && \
        rm $@++ && \
        mv $@+ $@
 
-user-manual.pdf: user-manual.xml
+$(OUTPUT)user-manual.pdf: $(OUTPUT)user-manual.xml
        $(QUIET_DBLATEX)$(RM) $@+ $@ && \
        $(DBLATEX) -o $@+ -p /etc/asciidoc/dblatex/asciidoc-dblatex.xsl -s /etc/asciidoc/dblatex/asciidoc-dblatex.sty $< && \
        mv $@+ $@
 
-perfman.texi: $(MAN_XML) cat-texi.perl
+$(OUTPUT)perfman.texi: $(MAN_XML) cat-texi.perl
        $(QUIET_DB2TEXI)$(RM) $@+ $@ && \
        ($(foreach xml,$(MAN_XML),$(DOCBOOK2X_TEXI) --encoding=UTF-8 \
                --to-stdout $(xml) &&) true) > $@++ && \
@@ -265,7 +283,7 @@ perfman.texi: $(MAN_XML) cat-texi.perl
        rm $@++ && \
        mv $@+ $@
 
-perfman.info: perfman.texi
+$(OUTPUT)perfman.info: $(OUTPUT)perfman.texi
        $(QUIET_MAKEINFO)$(MAKEINFO) --no-split --no-validate $*.texi
 
 $(patsubst %.txt,%.texi,$(MAN_TXT)): %.texi : %.xml
index d6b2a4f..c7f5f55 100644 (file)
@@ -8,7 +8,7 @@ perf-lock - Analyze lock events
 SYNOPSIS
 --------
 [verse]
-'perf lock' {record|report|trace}
+'perf lock' {record|report|script|info}
 
 DESCRIPTION
 -----------
@@ -20,10 +20,13 @@ and statistics with this 'perf lock' command.
   produces the file "perf.data" which contains tracing
   results of lock events.
 
-  'perf lock trace' shows raw lock events.
-
   'perf lock report' reports statistical data.
 
+  'perf lock script' shows raw lock events.
+
+  'perf lock info' shows metadata like threads or addresses
+  of lock instances.
+
 COMMON OPTIONS
 --------------
 
@@ -47,6 +50,17 @@ REPORT OPTIONS
         Sorting key. Possible values: acquired (default), contended,
         wait_total, wait_max, wait_min.
 
+INFO OPTIONS
+------------
+
+-t::
+--threads::
+       dump thread list in perf.data
+
+-m::
+--map::
+       dump map of lock instances (address:name table)
+
 SEE ALSO
 --------
 linkperf:perf[1]