ARM: Build fix after Tegra14 K3.4 merge
[linux-3.10.git] / arch / arm / kernel / perf_event_v6.c
index a4c5aa9..03664b0 100644 (file)
@@ -65,13 +65,15 @@ enum armv6_counters {
  * accesses/misses in hardware.
  */
 static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV6_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6_PERFCTR_INSTR_EXEC,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6_PERFCTR_BR_MISPREDICT,
-       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV6_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV6_PERFCTR_INSTR_EXEC,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]            = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV6_PERFCTR_BR_EXEC,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV6_PERFCTR_BR_MISPREDICT,
+       [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV6_PERFCTR_LSU_FULL_STALL,
 };
 
 static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -104,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]        = ARMV6_PERFCTR_ICACHE_MISS,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
@@ -218,13 +220,15 @@ enum armv6mpcore_perf_types {
  * accesses/misses in hardware.
  */
 static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
-       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
-       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
-       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
-       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
-       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
-       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CPU_CYCLES]              = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]            = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = ARMV6MPCORE_PERFCTR_BR_EXEC,
+       [PERF_COUNT_HW_BRANCH_MISSES]           = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
+       [PERF_COUNT_HW_BUS_CYCLES]              = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
+       [PERF_COUNT_HW_STALLED_CYCLES_BACKEND]  = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
 };
 
 static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@@ -255,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                },
                [C(OP_WRITE)] = {
                        [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
-                       [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
+                       [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
                },
                [C(OP_PREFETCH)] = {
                        [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
@@ -397,9 +401,10 @@ armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
        return ret;
 }
 
-static inline u32
-armv6pmu_read_counter(int counter)
+static inline u32 armv6pmu_read_counter(struct perf_event *event)
 {
+       struct hw_perf_event *hwc = &event->hw;
+       int counter = hwc->idx;
        unsigned long value = 0;
 
        if (ARMV6_CYCLE_COUNTER == counter)
@@ -414,10 +419,11 @@ armv6pmu_read_counter(int counter)
        return value;
 }
 
-static inline void
-armv6pmu_write_counter(int counter,
-                      u32 value)
+static inline void armv6pmu_write_counter(struct perf_event *event, u32 value)
 {
+       struct hw_perf_event *hwc = &event->hw;
+       int counter = hwc->idx;
+
        if (ARMV6_CYCLE_COUNTER == counter)
                asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
        else if (ARMV6_COUNTER0 == counter)
@@ -428,12 +434,13 @@ armv6pmu_write_counter(int counter,
                WARN_ONCE(1, "invalid counter number (%d)\n", counter);
 }
 
-static void
-armv6pmu_enable_event(struct hw_perf_event *hwc,
-                     int idx)
+static void armv6pmu_enable_event(struct perf_event *event)
 {
        unsigned long val, mask, evt, flags;
-       struct cpu_hw_events *events = armpmu->get_hw_events();
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+       int idx = hwc->idx;
 
        if (ARMV6_CYCLE_COUNTER == idx) {
                mask    = 0;
@@ -463,30 +470,14 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
-static int counter_is_active(unsigned long pmcr, int idx)
-{
-       unsigned long mask = 0;
-       if (idx == ARMV6_CYCLE_COUNTER)
-               mask = ARMV6_PMCR_CCOUNT_IEN;
-       else if (idx == ARMV6_COUNTER0)
-               mask = ARMV6_PMCR_COUNT0_IEN;
-       else if (idx == ARMV6_COUNTER1)
-               mask = ARMV6_PMCR_COUNT1_IEN;
-
-       if (mask)
-               return pmcr & mask;
-
-       WARN_ONCE(1, "invalid counter number (%d)\n", idx);
-       return 0;
-}
-
 static irqreturn_t
 armv6pmu_handle_irq(int irq_num,
                    void *dev)
 {
        unsigned long pmcr = armv6_pmcr_read();
        struct perf_sample_data data;
-       struct cpu_hw_events *cpuc;
+       struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
+       struct pmu_hw_events *cpuc = cpu_pmu->get_hw_events();
        struct pt_regs *regs;
        int idx;
 
@@ -502,14 +493,12 @@ armv6pmu_handle_irq(int irq_num,
         */
        armv6_pmcr_write(pmcr);
 
-       perf_sample_data_init(&data, 0);
-
-       cpuc = &__get_cpu_var(cpu_hw_events);
-       for (idx = 0; idx < armpmu->num_events; ++idx) {
+       for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
                struct perf_event *event = cpuc->events[idx];
                struct hw_perf_event *hwc;
 
-               if (!counter_is_active(pmcr, idx))
+               /* Ignore if we don't have an event. */
+               if (!event)
                        continue;
 
                /*
@@ -520,13 +509,13 @@ armv6pmu_handle_irq(int irq_num,
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx, 1);
-               data.period = event->hw.last_period;
-               if (!armpmu_event_set_period(event, hwc, idx))
+               armpmu_event_update(event);
+               perf_sample_data_init(&data, 0, hwc->last_period);
+               if (!armpmu_event_set_period(event))
                        continue;
 
                if (perf_event_overflow(event, &data, regs))
-                       armpmu->disable(hwc, idx);
+                       cpu_pmu->disable(event);
        }
 
        /*
@@ -541,11 +530,10 @@ armv6pmu_handle_irq(int irq_num,
        return IRQ_HANDLED;
 }
 
-static void
-armv6pmu_start(void)
+static void armv6pmu_start(struct arm_pmu *cpu_pmu)
 {
        unsigned long flags, val;
-       struct cpu_hw_events *events = armpmu->get_hw_events();
+       struct pmu_hw_events *events = cpu_pmu->get_hw_events();
 
        raw_spin_lock_irqsave(&events->pmu_lock, flags);
        val = armv6_pmcr_read();
@@ -554,11 +542,10 @@ armv6pmu_start(void)
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
-static void
-armv6pmu_stop(void)
+static void armv6pmu_stop(struct arm_pmu *cpu_pmu)
 {
        unsigned long flags, val;
-       struct cpu_hw_events *events = armpmu->get_hw_events();
+       struct pmu_hw_events *events = cpu_pmu->get_hw_events();
 
        raw_spin_lock_irqsave(&events->pmu_lock, flags);
        val = armv6_pmcr_read();
@@ -568,11 +555,12 @@ armv6pmu_stop(void)
 }
 
 static int
-armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
-                      struct hw_perf_event *event)
+armv6pmu_get_event_idx(struct pmu_hw_events *cpuc,
+                               struct perf_event *event)
 {
+       struct hw_perf_event *hwc = &event->hw;
        /* Always place a cycle counter into the cycle counter. */
-       if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
+       if (ARMV6_PERFCTR_CPU_CYCLES == hwc->config_base) {
                if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
                        return -EAGAIN;
 
@@ -593,12 +581,13 @@ armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
        }
 }
 
-static void
-armv6pmu_disable_event(struct hw_perf_event *hwc,
-                      int idx)
+static void armv6pmu_disable_event(struct perf_event *event)
 {
        unsigned long val, mask, evt, flags;
-       struct cpu_hw_events *events = armpmu->get_hw_events();
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+       int idx = hwc->idx;
 
        if (ARMV6_CYCLE_COUNTER == idx) {
                mask    = ARMV6_PMCR_CCOUNT_IEN;
@@ -627,12 +616,13 @@ armv6pmu_disable_event(struct hw_perf_event *hwc,
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
-static void
-armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
-                             int idx)
+static void armv6mpcore_pmu_disable_event(struct perf_event *event)
 {
        unsigned long val, mask, flags, evt = 0;
-       struct cpu_hw_events *events = armpmu->get_hw_events();
+       struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
+       struct hw_perf_event *hwc = &event->hw;
+       struct pmu_hw_events *events = cpu_pmu->get_hw_events();
+       int idx = hwc->idx;
 
        if (ARMV6_CYCLE_COUNTER == idx) {
                mask    = ARMV6_PMCR_CCOUNT_IEN;
@@ -659,29 +649,26 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
 
 static int armv6_map_event(struct perf_event *event)
 {
-       return map_cpu_event(event, &armv6_perf_map,
+       return armpmu_map_event(event, &armv6_perf_map,
                                &armv6_perf_cache_map, 0xFF);
 }
 
-static struct arm_pmu armv6pmu = {
-       .id                     = ARM_PERF_PMU_ID_V6,
-       .name                   = "v6",
-       .handle_irq             = armv6pmu_handle_irq,
-       .enable                 = armv6pmu_enable_event,
-       .disable                = armv6pmu_disable_event,
-       .read_counter           = armv6pmu_read_counter,
-       .write_counter          = armv6pmu_write_counter,
-       .get_event_idx          = armv6pmu_get_event_idx,
-       .start                  = armv6pmu_start,
-       .stop                   = armv6pmu_stop,
-       .map_event              = armv6_map_event,
-       .num_events             = 3,
-       .max_period             = (1LLU << 32) - 1,
-};
-
-static struct arm_pmu *__init armv6pmu_init(void)
+static int armv6pmu_init(struct arm_pmu *cpu_pmu)
 {
-       return &armv6pmu;
+       cpu_pmu->name           = "v6";
+       cpu_pmu->handle_irq     = armv6pmu_handle_irq;
+       cpu_pmu->enable         = armv6pmu_enable_event;
+       cpu_pmu->disable        = armv6pmu_disable_event;
+       cpu_pmu->read_counter   = armv6pmu_read_counter;
+       cpu_pmu->write_counter  = armv6pmu_write_counter;
+       cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
+       cpu_pmu->start          = armv6pmu_start;
+       cpu_pmu->stop           = armv6pmu_stop;
+       cpu_pmu->map_event      = armv6_map_event;
+       cpu_pmu->num_events     = 3;
+       cpu_pmu->max_period     = (1LLU << 32) - 1;
+
+       return 0;
 }
 
 /*
@@ -694,38 +681,35 @@ static struct arm_pmu *__init armv6pmu_init(void)
 
 static int armv6mpcore_map_event(struct perf_event *event)
 {
-       return map_cpu_event(event, &armv6mpcore_perf_map,
+       return armpmu_map_event(event, &armv6mpcore_perf_map,
                                &armv6mpcore_perf_cache_map, 0xFF);
 }
 
-static struct arm_pmu armv6mpcore_pmu = {
-       .id                     = ARM_PERF_PMU_ID_V6MP,
-       .name                   = "v6mpcore",
-       .handle_irq             = armv6pmu_handle_irq,
-       .enable                 = armv6pmu_enable_event,
-       .disable                = armv6mpcore_pmu_disable_event,
-       .read_counter           = armv6pmu_read_counter,
-       .write_counter          = armv6pmu_write_counter,
-       .get_event_idx          = armv6pmu_get_event_idx,
-       .start                  = armv6pmu_start,
-       .stop                   = armv6pmu_stop,
-       .map_event              = armv6mpcore_map_event,
-       .num_events             = 3,
-       .max_period             = (1LLU << 32) - 1,
-};
-
-static struct arm_pmu *__init armv6mpcore_pmu_init(void)
+static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
 {
-       return &armv6mpcore_pmu;
+       cpu_pmu->name           = "v6mpcore";
+       cpu_pmu->handle_irq     = armv6pmu_handle_irq;
+       cpu_pmu->enable         = armv6pmu_enable_event;
+       cpu_pmu->disable        = armv6mpcore_pmu_disable_event;
+       cpu_pmu->read_counter   = armv6pmu_read_counter;
+       cpu_pmu->write_counter  = armv6pmu_write_counter;
+       cpu_pmu->get_event_idx  = armv6pmu_get_event_idx;
+       cpu_pmu->start          = armv6pmu_start;
+       cpu_pmu->stop           = armv6pmu_stop;
+       cpu_pmu->map_event      = armv6mpcore_map_event;
+       cpu_pmu->num_events     = 3;
+       cpu_pmu->max_period     = (1LLU << 32) - 1;
+
+       return 0;
 }
 #else
-static struct arm_pmu *__init armv6pmu_init(void)
+static int armv6pmu_init(struct arm_pmu *cpu_pmu)
 {
-       return NULL;
+       return -ENODEV;
 }
 
-static struct arm_pmu *__init armv6mpcore_pmu_init(void)
+static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
 {
-       return NULL;
+       return -ENODEV;
 }
 #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */