perf: export perf_trace_regs and perf_arch_fetch_caller_regs
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_event.c
index 9757b96..7645fae 100644 (file)
@@ -802,6 +802,7 @@ void hw_perf_enable(void)
                return;
 
        if (cpuc->n_added) {
+               int n_running = cpuc->n_events - cpuc->n_added;
                /*
                 * apply assignment obtained either from
                 * hw_perf_group_sched_in() or x86_pmu_enable()
@@ -809,8 +810,7 @@ void hw_perf_enable(void)
                 * step1: save events moving to new counters
                 * step2: reprogram moved events into new counters
                 */
-               for (i = 0; i < cpuc->n_events; i++) {
-
+               for (i = 0; i < n_running; i++) {
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
 
@@ -825,17 +825,16 @@ void hw_perf_enable(void)
                                continue;
 
                        x86_pmu_stop(event);
-
-                       hwc->idx = -1;
                }
 
                for (i = 0; i < cpuc->n_events; i++) {
-
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
 
-                       if (hwc->idx == -1)
+                       if (!match_prev_assignment(hwc, cpuc, i))
                                x86_assign_hw_event(event, cpuc, i);
+                       else if (i < n_running)
+                               continue;
 
                        x86_pmu_start(event);
                }
@@ -959,7 +958,7 @@ static int x86_pmu_enable(struct perf_event *event)
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
        cpuc->n_events = n;
-       cpuc->n_added  = n - n0;
+       cpuc->n_added += n - n0;
 
        return 0;
 }
@@ -983,14 +982,8 @@ static int x86_pmu_start(struct perf_event *event)
 
 static void x86_pmu_unthrottle(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       struct hw_perf_event *hwc = &event->hw;
-
-       if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
-                               cpuc->events[hwc->idx] != event))
-               return;
-
-       x86_pmu.enable(event);
+       int ret = x86_pmu_start(event);
+       WARN_ON_ONCE(ret);
 }
 
 void perf_event_print_debug(void)
@@ -1050,11 +1043,9 @@ static void x86_pmu_stop(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
 
-       /*
-        * Must be done before we disable, otherwise the nmi handler
-        * could reenable again:
-        */
-       __clear_bit(idx, cpuc->active_mask);
+       if (!__test_and_clear_bit(idx, cpuc->active_mask))
+               return;
+
        x86_pmu.disable(event);
 
        /*
@@ -1123,7 +1114,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       x86_pmu.disable(event);
+                       x86_pmu_stop(event);
        }
 
        if (handled)
@@ -1310,7 +1301,7 @@ int hw_perf_group_sched_in(struct perf_event *leader,
        memcpy(cpuc->assign, assign, n0*sizeof(int));
 
        cpuc->n_events  = n0;
-       cpuc->n_added   = n1;
+       cpuc->n_added  += n1;
        ctx->nr_active += n1;
 
        /*
@@ -1710,3 +1701,16 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 
        return entry;
 }
+
+void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
+{
+       regs->ip = ip;
+       /*
+        * perf_arch_fetch_caller_regs adds another call, we need to increment
+        * the skip level
+        */
+       regs->bp = rewind_frame_pointer(skip + 1);
+       regs->cs = __KERNEL_CS;
+       local_save_flags(regs->flags);
+}
+EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);