perf: export perf_trace_regs and perf_arch_fetch_caller_regs
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_event.c
index 01b1667..7645fae 100644 (file)
@@ -785,6 +785,7 @@ static inline int match_prev_assignment(struct hw_perf_event *hwc,
                hwc->last_tag == cpuc->tags[i];
 }
 
+static int x86_pmu_start(struct perf_event *event);
 static void x86_pmu_stop(struct perf_event *event);
 
 void hw_perf_enable(void)
@@ -801,6 +802,7 @@ void hw_perf_enable(void)
                return;
 
        if (cpuc->n_added) {
+               int n_running = cpuc->n_events - cpuc->n_added;
                /*
                 * apply assignment obtained either from
                 * hw_perf_group_sched_in() or x86_pmu_enable()
@@ -808,8 +810,7 @@ void hw_perf_enable(void)
                 * step1: save events moving to new counters
                 * step2: reprogram moved events into new counters
                 */
-               for (i = 0; i < cpuc->n_events; i++) {
-
+               for (i = 0; i < n_running; i++) {
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
 
@@ -824,29 +825,18 @@ void hw_perf_enable(void)
                                continue;
 
                        x86_pmu_stop(event);
-
-                       hwc->idx = -1;
                }
 
                for (i = 0; i < cpuc->n_events; i++) {
-
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
 
-                       if (hwc->idx == -1) {
+                       if (!match_prev_assignment(hwc, cpuc, i))
                                x86_assign_hw_event(event, cpuc, i);
-                               x86_perf_event_set_period(event);
-                       }
-                       /*
-                        * need to mark as active because x86_pmu_disable()
-                        * clear active_mask and events[] yet it preserves
-                        * idx
-                        */
-                       __set_bit(hwc->idx, cpuc->active_mask);
-                       cpuc->events[hwc->idx] = event;
+                       else if (i < n_running)
+                               continue;
 
-                       x86_pmu.enable(event);
-                       perf_event_update_userpage(event);
+                       x86_pmu_start(event);
                }
                cpuc->n_added = 0;
                perf_events_lapic_init();
@@ -968,32 +958,32 @@ static int x86_pmu_enable(struct perf_event *event)
        memcpy(cpuc->assign, assign, n*sizeof(int));
 
        cpuc->n_events = n;
-       cpuc->n_added  = n - n0;
+       cpuc->n_added += n - n0;
 
        return 0;
 }
 
 static int x86_pmu_start(struct perf_event *event)
 {
-       if (event->hw.idx == -1)
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       int idx = event->hw.idx;
+
+       if (idx == -1)
                return -EAGAIN;
 
        x86_perf_event_set_period(event);
+       cpuc->events[idx] = event;
+       __set_bit(idx, cpuc->active_mask);
        x86_pmu.enable(event);
+       perf_event_update_userpage(event);
 
        return 0;
 }
 
 static void x86_pmu_unthrottle(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       struct hw_perf_event *hwc = &event->hw;
-
-       if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
-                               cpuc->events[hwc->idx] != event))
-               return;
-
-       x86_pmu.enable(event);
+       int ret = x86_pmu_start(event);
+       WARN_ON_ONCE(ret);
 }
 
 void perf_event_print_debug(void)
@@ -1053,11 +1043,9 @@ static void x86_pmu_stop(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
 
-       /*
-        * Must be done before we disable, otherwise the nmi handler
-        * could reenable again:
-        */
-       __clear_bit(idx, cpuc->active_mask);
+       if (!__test_and_clear_bit(idx, cpuc->active_mask))
+               return;
+
        x86_pmu.disable(event);
 
        /*
@@ -1126,7 +1114,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       x86_pmu.disable(event);
+                       x86_pmu_stop(event);
        }
 
        if (handled)
@@ -1313,7 +1301,7 @@ int hw_perf_group_sched_in(struct perf_event *leader,
        memcpy(cpuc->assign, assign, n0*sizeof(int));
 
        cpuc->n_events  = n0;
-       cpuc->n_added   = n1;
+       cpuc->n_added  += n1;
        ctx->nr_active += n1;
 
        /*
@@ -1713,3 +1701,16 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 
        return entry;
 }
+
+void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
+{
+       regs->ip = ip;
+       /*
+        * perf_arch_fetch_caller_regs adds another call, we need to increment
+        * the skip level
+        */
+       regs->bp = rewind_frame_pointer(skip + 1);
+       regs->cs = __KERNEL_CS;
+       local_save_flags(regs->flags);
+}
+EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);