perf_events, x86: AMD event scheduling
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_event.c
index 5b91992..aa12f36 100644 (file)
@@ -80,6 +80,13 @@ struct event_constraint {
        int     weight;
 };
 
+struct amd_nb {
+       int nb_id;  /* NorthBridge id */
+       int refcnt; /* reference count */
+       struct perf_event *owners[X86_PMC_IDX_MAX];
+       struct event_constraint event_constraints[X86_PMC_IDX_MAX];
+};
+
 struct cpu_hw_events {
        struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
        unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
@@ -90,16 +97,21 @@ struct cpu_hw_events {
        int                     n_events;
        int                     n_added;
        int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
+       u64                     tags[X86_PMC_IDX_MAX];
        struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
+       struct amd_nb           *amd_nb;
 };
 
-#define EVENT_CONSTRAINT(c, n, m) {    \
+#define __EVENT_CONSTRAINT(c, n, m, w) {\
        { .idxmsk64[0] = (n) },         \
        .code = (c),                    \
        .cmask = (m),                   \
-       .weight = HWEIGHT64((u64)(n)),  \
+       .weight = (w),                  \
 }
 
+#define EVENT_CONSTRAINT(c, n, m)      \
+       __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
+
 #define INTEL_EVENT_CONSTRAINT(c, n)   \
        EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
 
@@ -149,6 +161,8 @@ struct x86_pmu {
 
 static struct x86_pmu x86_pmu __read_mostly;
 
+static raw_spinlock_t amd_nb_lock;
+
 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
        .enabled = 1,
 };
@@ -798,7 +812,7 @@ static u64 amd_pmu_event_map(int hw_event)
 
 static u64 amd_pmu_raw_event(u64 hw_event)
 {
-#define K7_EVNTSEL_EVENT_MASK  0x7000000FFULL
+#define K7_EVNTSEL_EVENT_MASK  0xF000000FFULL
 #define K7_EVNTSEL_UNIT_MASK   0x00000FF00ULL
 #define K7_EVNTSEL_EDGE_MASK   0x000040000ULL
 #define K7_EVNTSEL_INV_MASK    0x000800000ULL
@@ -1139,6 +1153,8 @@ static int __hw_perf_event_init(struct perf_event *event)
        hwc->config = ARCH_PERFMON_EVENTSEL_INT;
 
        hwc->idx = -1;
+       hwc->last_cpu = -1;
+       hwc->last_tag = ~0ULL;
 
        /*
         * Count user and OS events unless requested not to.
@@ -1454,11 +1470,14 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
        return n;
 }
 
-
 static inline void x86_assign_hw_event(struct perf_event *event,
-                               struct hw_perf_event *hwc, int idx)
+                               struct cpu_hw_events *cpuc, int i)
 {
-       hwc->idx = idx;
+       struct hw_perf_event *hwc = &event->hw;
+
+       hwc->idx = cpuc->assign[i];
+       hwc->last_cpu = smp_processor_id();
+       hwc->last_tag = ++cpuc->tags[i];
 
        if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
                hwc->config_base = 0;
@@ -1477,7 +1496,16 @@ static inline void x86_assign_hw_event(struct perf_event *event,
        }
 }
 
-static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc);
+static inline int match_prev_assignment(struct hw_perf_event *hwc,
+                                       struct cpu_hw_events *cpuc,
+                                       int i)
+{
+       return hwc->idx == cpuc->assign[i] &&
+               hwc->last_cpu == smp_processor_id() &&
+               hwc->last_tag == cpuc->tags[i];
+}
+
+static void x86_pmu_stop(struct perf_event *event);
 
 void hw_perf_enable(void)
 {
@@ -1505,10 +1533,17 @@ void hw_perf_enable(void)
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
 
-                       if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
+                       /*
+                        * we can avoid reprogramming counter if:
+                        * - assigned same counter as last time
+                        * - running on same CPU as last time
+                        * - no other event has used the counter since
+                        */
+                       if (hwc->idx == -1 ||
+                           match_prev_assignment(hwc, cpuc, i))
                                continue;
 
-                       __x86_pmu_disable(event, cpuc);
+                       x86_pmu_stop(event);
 
                        hwc->idx = -1;
                }
@@ -1519,12 +1554,12 @@ void hw_perf_enable(void)
                        hwc = &event->hw;
 
                        if (hwc->idx == -1) {
-                               x86_assign_hw_event(event, hwc, cpuc->assign[i]);
+                               x86_assign_hw_event(event, cpuc, i);
                                x86_perf_event_set_period(event, hwc, hwc->idx);
                        }
                        /*
                         * need to mark as active because x86_pmu_disable()
-                        * clear active_mask and eventsp[] yet it preserves
+                        * clear active_mask and events[] yet it preserves
                         * idx
                         */
                        set_bit(hwc->idx, cpuc->active_mask);
@@ -1776,6 +1811,19 @@ static int x86_pmu_enable(struct perf_event *event)
        return 0;
 }
 
+static int x86_pmu_start(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (hwc->idx == -1)
+               return -EAGAIN;
+
+       x86_perf_event_set_period(event, hwc, hwc->idx);
+       x86_pmu.enable(hwc, hwc->idx);
+
+       return 0;
+}
+
 static void x86_pmu_unthrottle(struct perf_event *event)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1899,8 +1947,9 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
        event->pending_kill = POLL_IN;
 }
 
-static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc)
+static void x86_pmu_stop(struct perf_event *event)
 {
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        int idx = hwc->idx;
 
@@ -1929,7 +1978,7 @@ static void x86_pmu_disable(struct perf_event *event)
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int i;
 
-       __x86_pmu_disable(event, cpuc);
+       x86_pmu_stop(event);
 
        for (i = 0; i < cpuc->n_events; i++) {
                if (event == cpuc->event_list[i]) {
@@ -2171,6 +2220,7 @@ perf_event_nmi_handler(struct notifier_block *self,
 }
 
 static struct event_constraint unconstrained;
+static struct event_constraint emptyconstraint;
 
 static struct event_constraint bts_constraint =
        EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
@@ -2210,10 +2260,146 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
        return &unconstrained;
 }
 
+/*
+ * AMD64 events are detected based on their event codes.
+ */
+static inline int amd_is_nb_event(struct hw_perf_event *hwc)
+{
+       return (hwc->config & 0xe0) == 0xe0;
+}
+
+static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
+                                     struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+       struct amd_nb *nb = cpuc->amd_nb;
+       int i;
+
+       /*
+        * only care about NB events
+        */
+       if (!(nb && amd_is_nb_event(hwc)))
+               return;
+
+       /*
+        * need to scan whole list because event may not have
+        * been assigned during scheduling
+        *
+        * no race condition possible because event can only
+        * be removed on one CPU at a time AND PMU is disabled
+        * when we come here
+        */
+       for (i = 0; i < x86_pmu.num_events; i++) {
+               if (nb->owners[i] == event) {
+                       cmpxchg(nb->owners+i, event, NULL);
+                       break;
+               }
+       }
+}
+
+ /*
+  * AMD64 NorthBridge events need special treatment because
+  * counter access needs to be synchronized across all cores
+  * of a package. Refer to BKDG section 3.12
+  *
+  * NB events are events measuring L3 cache, Hypertransport
+  * traffic. They are identified by an event code >= 0xe00.
+  * They measure events on the NorthBride which is shared
+  * by all cores on a package. NB events are counted on a
+  * shared set of counters. When a NB event is programmed
+  * in a counter, the data actually comes from a shared
+  * counter. Thus, access to those counters needs to be
+  * synchronized.
+  *
+  * We implement the synchronization such that no two cores
+  * can be measuring NB events using the same counters. Thus,
+  * we maintain a per-NB allocation table. The available slot
+  * is propagated using the event_constraint structure.
+  *
+  * We provide only one choice for each NB event based on
+  * the fact that only NB events have restrictions. Consequently,
+  * if a counter is available, there is a guarantee the NB event
+  * will be assigned to it. If no slot is available, an empty
+  * constraint is returned and scheduling will eventually fail
+  * for this event.
+  *
+  * Note that all cores attached the same NB compete for the same
+  * counters to host NB events, this is why we use atomic ops. Some
+  * multi-chip CPUs may have more than one NB.
+  *
+  * Given that resources are allocated (cmpxchg), they must be
+  * eventually freed for others to use. This is accomplished by
+  * calling amd_put_event_constraints().
+  *
+  * Non NB events are not impacted by this restriction.
+  */
 static struct event_constraint *
 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
 {
-       return &unconstrained;
+       struct hw_perf_event *hwc = &event->hw;
+       struct amd_nb *nb = cpuc->amd_nb;
+       struct perf_event *old = NULL;
+       int max = x86_pmu.num_events;
+       int i, j, k = -1;
+
+       /*
+        * if not NB event or no NB, then no constraints
+        */
+       if (!(nb && amd_is_nb_event(hwc)))
+               return &unconstrained;
+
+       /*
+        * detect if already present, if so reuse
+        *
+        * cannot merge with actual allocation
+        * because of possible holes
+        *
+        * event can already be present yet not assigned (in hwc->idx)
+        * because of successive calls to x86_schedule_events() from
+        * hw_perf_group_sched_in() without hw_perf_enable()
+        */
+       for (i = 0; i < max; i++) {
+               /*
+                * keep track of first free slot
+                */
+               if (k == -1 && !nb->owners[i])
+                       k = i;
+
+               /* already present, reuse */
+               if (nb->owners[i] == event)
+                       goto done;
+       }
+       /*
+        * not present, so grab a new slot
+        * starting either at:
+        */
+       if (hwc->idx != -1) {
+               /* previous assignment */
+               i = hwc->idx;
+       } else if (k != -1) {
+               /* start from free slot found */
+               i = k;
+       } else {
+               /*
+                * event not found, no slot found in
+                * first pass, try again from the
+                * beginning
+                */
+               i = 0;
+       }
+       j = i;
+       do {
+               old = cmpxchg(nb->owners+i, NULL, event);
+               if (!old)
+                       break;
+               if (++i == max)
+                       i = 0;
+       } while (i != j);
+done:
+       if (!old)
+               return &nb->event_constraints[i];
+
+       return &emptyconstraint;
 }
 
 static int x86_event_sched_in(struct perf_event *event,
@@ -2426,7 +2612,8 @@ static __initconst struct x86_pmu amd_pmu = {
        .apic                   = 1,
        /* use highest bit to detect overflow */
        .max_period             = (1ULL << 47) - 1,
-       .get_event_constraints  = amd_get_event_constraints
+       .get_event_constraints  = amd_get_event_constraints,
+       .put_event_constraints  = amd_put_event_constraints
 };
 
 static __init int p6_pmu_init(void)
@@ -2550,6 +2737,91 @@ static __init int intel_pmu_init(void)
        return 0;
 }
 
+static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
+{
+       struct amd_nb *nb;
+       int i;
+
+       nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
+       if (!nb)
+               return NULL;
+
+       memset(nb, 0, sizeof(*nb));
+       nb->nb_id = nb_id;
+
+       /*
+        * initialize all possible NB constraints
+        */
+       for (i = 0; i < x86_pmu.num_events; i++) {
+               set_bit(i, nb->event_constraints[i].idxmsk);
+               nb->event_constraints[i].weight = 1;
+       }
+       return nb;
+}
+
+static void amd_pmu_cpu_online(int cpu)
+{
+       struct cpu_hw_events *cpu1, *cpu2;
+       struct amd_nb *nb = NULL;
+       int i, nb_id;
+
+       if (boot_cpu_data.x86_max_cores < 2)
+               return;
+
+       /*
+        * function may be called too early in the
+        * boot process, in which case nb_id is bogus
+        */
+       nb_id = amd_get_nb_id(cpu);
+       if (nb_id == BAD_APICID)
+               return;
+
+       cpu1 = &per_cpu(cpu_hw_events, cpu);
+       cpu1->amd_nb = NULL;
+
+       raw_spin_lock(&amd_nb_lock);
+
+       for_each_online_cpu(i) {
+               cpu2 = &per_cpu(cpu_hw_events, i);
+               nb = cpu2->amd_nb;
+               if (!nb)
+                       continue;
+               if (nb->nb_id == nb_id)
+                       goto found;
+       }
+
+       nb = amd_alloc_nb(cpu, nb_id);
+       if (!nb) {
+               pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
+               raw_spin_unlock(&amd_nb_lock);
+               return;
+       }
+found:
+       nb->refcnt++;
+       cpu1->amd_nb = nb;
+
+       raw_spin_unlock(&amd_nb_lock);
+}
+
+static void amd_pmu_cpu_offline(int cpu)
+{
+       struct cpu_hw_events *cpuhw;
+
+       if (boot_cpu_data.x86_max_cores < 2)
+               return;
+
+       cpuhw = &per_cpu(cpu_hw_events, cpu);
+
+       raw_spin_lock(&amd_nb_lock);
+
+       if (--cpuhw->amd_nb->refcnt == 0)
+               kfree(cpuhw->amd_nb);
+
+       cpuhw->amd_nb = NULL;
+
+       raw_spin_unlock(&amd_nb_lock);
+}
+
 static __init int amd_pmu_init(void)
 {
        /* Performance-monitoring supported from K7 and later: */
@@ -2562,6 +2834,11 @@ static __init int amd_pmu_init(void)
        memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
               sizeof(hw_cache_event_ids));
 
+       /*
+        * explicitly initialize the boot cpu, other cpus will get
+        * the cpu hotplug callbacks from smp_init()
+        */
+       amd_pmu_cpu_online(smp_processor_id());
        return 0;
 }
 
@@ -2622,7 +2899,8 @@ void __init init_hw_perf_events(void)
        register_die_notifier(&perf_event_nmi_notifier);
 
        unconstrained = (struct event_constraint)
-               EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0);
+               __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
+                                  0, x86_pmu.num_events);
 
        pr_info("... version:                %d\n",     x86_pmu.version);
        pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
@@ -2641,6 +2919,8 @@ static inline void x86_pmu_read(struct perf_event *event)
 static const struct pmu pmu = {
        .enable         = x86_pmu_enable,
        .disable        = x86_pmu_disable,
+       .start          = x86_pmu_start,
+       .stop           = x86_pmu_stop,
        .read           = x86_pmu_read,
        .unthrottle     = x86_pmu_unthrottle,
 };
@@ -2892,4 +3172,25 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 void hw_perf_event_setup_online(int cpu)
 {
        init_debug_store_on_cpu(cpu);
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               amd_pmu_cpu_online(cpu);
+               break;
+       default:
+               return;
+       }
+}
+
+void hw_perf_event_setup_offline(int cpu)
+{
+       init_debug_store_on_cpu(cpu);
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               amd_pmu_cpu_offline(cpu);
+               break;
+       default:
+               return;
+       }
 }