perf, x86: Fix callgraphs of 32-bit processes on 64-bit kernels
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_event.c
index c6bde7d..53ea4cf 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/apic.h>
 #include <asm/stacktrace.h>
 #include <asm/nmi.h>
+#include <asm/compat.h>
 
 static u64 perf_event_mask __read_mostly;
 
@@ -158,7 +159,7 @@ struct x86_pmu {
                                                 struct perf_event *event);
        struct event_constraint *event_constraints;
 
-       void            (*cpu_prepare)(int cpu);
+       int             (*cpu_prepare)(int cpu);
        void            (*cpu_starting)(int cpu);
        void            (*cpu_dying)(int cpu);
        void            (*cpu_dead)(int cpu);
@@ -811,7 +812,6 @@ void hw_perf_enable(void)
                 * step2: reprogram moved events into new counters
                 */
                for (i = 0; i < n_running; i++) {
-
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
 
@@ -826,21 +826,16 @@ void hw_perf_enable(void)
                                continue;
 
                        x86_pmu_stop(event);
-
-                       hwc->idx = -1;
                }
 
                for (i = 0; i < cpuc->n_events; i++) {
-
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
 
-                       if (i < n_running &&
-                           match_prev_assignment(hwc, cpuc, i))
-                               continue;
-
-                       if (hwc->idx == -1)
+                       if (!match_prev_assignment(hwc, cpuc, i))
                                x86_assign_hw_event(event, cpuc, i);
+                       else if (i < n_running)
+                               continue;
 
                        x86_pmu_start(event);
                }
@@ -1339,11 +1334,12 @@ static int __cpuinit
 x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 {
        unsigned int cpu = (long)hcpu;
+       int ret = NOTIFY_OK;
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_UP_PREPARE:
                if (x86_pmu.cpu_prepare)
-                       x86_pmu.cpu_prepare(cpu);
+                       ret = x86_pmu.cpu_prepare(cpu);
                break;
 
        case CPU_STARTING:
@@ -1356,6 +1352,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
                        x86_pmu.cpu_dying(cpu);
                break;
 
+       case CPU_UP_CANCELED:
        case CPU_DEAD:
                if (x86_pmu.cpu_dead)
                        x86_pmu.cpu_dead(cpu);
@@ -1365,7 +1362,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
                break;
        }
 
-       return NOTIFY_OK;
+       return ret;
 }
 
 static void __init pmu_check_apic(void)
@@ -1634,14 +1631,42 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
        return len;
 }
 
-static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
+#ifdef CONFIG_COMPAT
+static inline int
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
 {
-       unsigned long bytes;
+       /* 32-bit process in 64-bit kernel. */
+       struct stack_frame_ia32 frame;
+       const void __user *fp;
 
-       bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
+       if (!test_thread_flag(TIF_IA32))
+               return 0;
+
+       fp = compat_ptr(regs->bp);
+       while (entry->nr < PERF_MAX_STACK_DEPTH) {
+               unsigned long bytes;
+               frame.next_frame     = 0;
+               frame.return_address = 0;
 
-       return bytes == sizeof(*frame);
+               bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+               if (bytes != sizeof(frame))
+                       break;
+
+               if (fp < compat_ptr(regs->sp))
+                       break;
+
+               callchain_store(entry, frame.return_address);
+               fp = compat_ptr(frame.next_frame);
+       }
+       return 1;
 }
+#else
+static inline int
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+    return 0;
+}
+#endif
 
 static void
 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
@@ -1657,11 +1682,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
        callchain_store(entry, PERF_CONTEXT_USER);
        callchain_store(entry, regs->ip);
 
+       if (perf_callchain_user32(regs, entry))
+               return;
+
        while (entry->nr < PERF_MAX_STACK_DEPTH) {
+               unsigned long bytes;
                frame.next_frame             = NULL;
                frame.return_address = 0;
 
-               if (!copy_stack_frame(fp, &frame))
+               bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+               if (bytes != sizeof(frame))
                        break;
 
                if ((unsigned long)fp < regs->sp)