sparc64: Add perf callchain support.
[linux-2.6.git] / arch / sparc / kernel / perf_event.c
index 9199524..2386ac6 100644 (file)
@@ -1,6 +1,6 @@
 /* Performance event support for sparc64.
  *
- * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
  *
  * This code is based almost entirely upon the x86 perf event
  * code, which is:
 #include <linux/kdebug.h>
 #include <linux/mutex.h>
 
+#include <asm/stacktrace.h>
 #include <asm/cpudata.h>
+#include <asm/uaccess.h>
 #include <asm/atomic.h>
 #include <asm/nmi.h>
 #include <asm/pcr.h>
 
+#include "kstack.h"
+
 /* Sparc64 chips have two performance counters, 32-bits each, with
  * overflow interrupts generated on transition from 0xffffffff to 0.
  * The counters are accessed in one go using a 64-bit register.
@@ -56,7 +60,8 @@ struct cpu_hw_events {
        struct perf_event       *events[MAX_HWEVENTS];
        unsigned long           used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
        unsigned long           active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
-       int enabled;
+       u64                     pcr;
+       int                     enabled;
 };
 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
 
@@ -68,6 +73,17 @@ struct perf_event_map {
 #define PIC_LOWER      0x02
 };
 
+static unsigned long perf_event_encode(const struct perf_event_map *pmap)
+{
+       return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
+}
+
+static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk)
+{
+       *msk = val & 0xff;
+       *enc = val >> 16;
+}
+
 #define C(x) PERF_COUNT_HW_CACHE_##x
 
 #define CACHE_OP_UNSUPPORTED   0xfffe
@@ -203,7 +219,7 @@ static const struct sparc_pmu ultra3_pmu = {
 
 /* Niagara1 is very limited.  The upper PIC is hard-locked to count
  * only instructions, so it is free running which creates all kinds of
- * problems.  Some hardware designs make one wonder if the creastor
+ * problems.  Some hardware designs make one wonder if the creator
  * even looked at how this stuff gets used by software.
  */
 static const struct perf_event_map niagara1_perfmon_event_map[] = {
@@ -425,7 +441,7 @@ static const struct sparc_pmu niagara2_pmu = {
        .lower_shift    = 6,
        .event_mask     = 0xfff,
        .hv_bit         = 0x8,
-       .irq_bit        = 0x03,
+       .irq_bit        = 0x30,
        .upper_nop      = 0x220,
        .lower_nop      = 0x220,
 };
@@ -453,23 +469,30 @@ static u64 nop_for_index(int idx)
                              sparc_pmu->lower_nop, idx);
 }
 
-static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
-                                           int idx)
+static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
 {
        u64 val, mask = mask_for_index(idx);
 
-       val = pcr_ops->read();
-       pcr_ops->write((val & ~mask) | hwc->config);
+       val = cpuc->pcr;
+       val &= ~mask;
+       val |= hwc->config;
+       cpuc->pcr = val;
+
+       pcr_ops->write(cpuc->pcr);
 }
 
-static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
-                                            int idx)
+static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
 {
        u64 mask = mask_for_index(idx);
        u64 nop = nop_for_index(idx);
-       u64 val = pcr_ops->read();
+       u64 val;
+
+       val = cpuc->pcr;
+       val &= ~mask;
+       val |= nop;
+       cpuc->pcr = val;
 
-       pcr_ops->write((val & ~mask) | nop);
+       pcr_ops->write(cpuc->pcr);
 }
 
 void hw_perf_enable(void)
@@ -484,7 +507,7 @@ void hw_perf_enable(void)
        cpuc->enabled = 1;
        barrier();
 
-       val = pcr_ops->read();
+       val = cpuc->pcr;
 
        for (i = 0; i < MAX_HWEVENTS; i++) {
                struct perf_event *cp = cpuc->events[i];
@@ -496,7 +519,9 @@ void hw_perf_enable(void)
                val |= hwc->config_base;
        }
 
-       pcr_ops->write(val);
+       cpuc->pcr = val;
+
+       pcr_ops->write(cpuc->pcr);
 }
 
 void hw_perf_disable(void)
@@ -509,10 +534,12 @@ void hw_perf_disable(void)
 
        cpuc->enabled = 0;
 
-       val = pcr_ops->read();
+       val = cpuc->pcr;
        val &= ~(PCR_UTRACE | PCR_STRACE |
                 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
-       pcr_ops->write(val);
+       cpuc->pcr = val;
+
+       pcr_ops->write(cpuc->pcr);
 }
 
 static u32 read_pmc(int idx)
@@ -544,7 +571,7 @@ static void write_pmc(int idx, u64 val)
 }
 
 static int sparc_perf_event_set_period(struct perf_event *event,
-                                        struct hw_perf_event *hwc, int idx)
+                                      struct hw_perf_event *hwc, int idx)
 {
        s64 left = atomic64_read(&hwc->period_left);
        s64 period = hwc->sample_period;
@@ -584,19 +611,19 @@ static int sparc_pmu_enable(struct perf_event *event)
        if (test_and_set_bit(idx, cpuc->used_mask))
                return -EAGAIN;
 
-       sparc_pmu_disable_event(hwc, idx);
+       sparc_pmu_disable_event(cpuc, hwc, idx);
 
        cpuc->events[idx] = event;
        set_bit(idx, cpuc->active_mask);
 
        sparc_perf_event_set_period(event, hwc, idx);
-       sparc_pmu_enable_event(hwc, idx);
+       sparc_pmu_enable_event(cpuc, hwc, idx);
        perf_event_update_userpage(event);
        return 0;
 }
 
 static u64 sparc_perf_event_update(struct perf_event *event,
-                                    struct hw_perf_event *hwc, int idx)
+                                  struct hw_perf_event *hwc, int idx)
 {
        int shift = 64 - 32;
        u64 prev_raw_count, new_raw_count;
@@ -626,7 +653,7 @@ static void sparc_pmu_disable(struct perf_event *event)
        int idx = hwc->idx;
 
        clear_bit(idx, cpuc->active_mask);
-       sparc_pmu_disable_event(hwc, idx);
+       sparc_pmu_disable_event(cpuc, hwc, idx);
 
        barrier();
 
@@ -640,18 +667,29 @@ static void sparc_pmu_disable(struct perf_event *event)
 static void sparc_pmu_read(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
+
        sparc_perf_event_update(event, hwc, hwc->idx);
 }
 
 static void sparc_pmu_unthrottle(struct perf_event *event)
 {
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       sparc_pmu_enable_event(hwc, hwc->idx);
+
+       sparc_pmu_enable_event(cpuc, hwc, hwc->idx);
 }
 
 static atomic_t active_events = ATOMIC_INIT(0);
 static DEFINE_MUTEX(pmc_grab_mutex);
 
+static void perf_stop_nmi_watchdog(void *unused)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+
+       stop_nmi_watchdog(NULL);
+       cpuc->pcr = pcr_ops->read();
+}
+
 void perf_event_grab_pmc(void)
 {
        if (atomic_inc_not_zero(&active_events))
@@ -660,7 +698,7 @@ void perf_event_grab_pmc(void)
        mutex_lock(&pmc_grab_mutex);
        if (atomic_read(&active_events) == 0) {
                if (atomic_read(&nmi_active) > 0) {
-                       on_each_cpu(stop_nmi_watchdog, NULL, 1);
+                       on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
                        BUG_ON(atomic_read(&nmi_active) != 0);
                }
                atomic_inc(&active_events);
@@ -713,6 +751,48 @@ static void hw_perf_event_destroy(struct perf_event *event)
        perf_event_release_pmc();
 }
 
+/* Make sure all events can be scheduled into the hardware at
+ * the same time.  This is simplified by the fact that we only
+ * need to support 2 simultaneous HW events.
+ */
+static int sparc_check_constraints(unsigned long *events, int n_ev)
+{
+       if (n_ev <= perf_max_events) {
+               u8 msk1, msk2;
+               u16 dummy;
+
+               if (n_ev == 1)
+                       return 0;
+               BUG_ON(n_ev != 2);
+               perf_event_decode(events[0], &dummy, &msk1);
+               perf_event_decode(events[1], &dummy, &msk2);
+
+               /* If both events can go on any counter, OK.  */
+               if (msk1 == (PIC_UPPER | PIC_LOWER) &&
+                   msk2 == (PIC_UPPER | PIC_LOWER))
+                       return 0;
+
+               /* If one event is limited to a specific counter,
+                * and the other can go on both, OK.
+                */
+               if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
+                   msk2 == (PIC_UPPER | PIC_LOWER))
+                       return 0;
+               if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) &&
+                   msk1 == (PIC_UPPER | PIC_LOWER))
+                       return 0;
+
+               /* If the events are fixed to different counters, OK.  */
+               if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) ||
+                   (msk1 == PIC_LOWER && msk2 == PIC_UPPER))
+                       return 0;
+
+               /* Otherwise, there is a conflict.  */
+       }
+
+       return -1;
+}
+
 static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
 {
        int eu = 0, ek = 0, eh = 0;
@@ -742,7 +822,7 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
 }
 
 static int collect_events(struct perf_event *group, int max_count,
-                         struct perf_event *evts[], u64 *events)
+                         struct perf_event *evts[], unsigned long *events)
 {
        struct perf_event *event;
        int n = 0;
@@ -751,7 +831,7 @@ static int collect_events(struct perf_event *group, int max_count,
                if (n >= max_count)
                        return -1;
                evts[n] = group;
-               events[n++] = group->hw.config;
+               events[n++] = group->hw.event_base;
        }
        list_for_each_entry(event, &group->sibling_list, group_entry) {
                if (!is_software_event(event) &&
@@ -759,7 +839,7 @@ static int collect_events(struct perf_event *group, int max_count,
                        if (n >= max_count)
                                return -1;
                        evts[n] = event;
-                       events[n++] = event->hw.config;
+                       events[n++] = event->hw.event_base;
                }
        }
        return n;
@@ -770,8 +850,9 @@ static int __hw_perf_event_init(struct perf_event *event)
        struct perf_event_attr *attr = &event->attr;
        struct perf_event *evts[MAX_HWEVENTS];
        struct hw_perf_event *hwc = &event->hw;
+       unsigned long events[MAX_HWEVENTS];
        const struct perf_event_map *pmap;
-       u64 enc, events[MAX_HWEVENTS];
+       u64 enc;
        int n;
 
        if (atomic_read(&nmi_active) < 0)
@@ -800,6 +881,8 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (!attr->exclude_hv)
                hwc->config_base |= sparc_pmu->hv_bit;
 
+       hwc->event_base = perf_event_encode(pmap);
+
        enc = pmap->encoding;
 
        n = 0;
@@ -810,12 +893,15 @@ static int __hw_perf_event_init(struct perf_event *event)
                if (n < 0)
                        return -EINVAL;
        }
-       events[n] = enc;
+       events[n] = hwc->event_base;
        evts[n] = event;
 
        if (check_excludes(evts, n, 1))
                return -EINVAL;
 
+       if (sparc_check_constraints(events, n + 1))
+               return -EINVAL;
+
        /* Try to do all error checking before this point, as unwinding
         * state after grabbing the PMC is difficult.
         */
@@ -880,7 +966,7 @@ void perf_event_print_debug(void)
 }
 
 static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
-                                             unsigned long cmd, void *__args)
+                                           unsigned long cmd, void *__args)
 {
        struct die_args *args = __args;
        struct perf_sample_data data;
@@ -904,6 +990,17 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
        data.addr = 0;
 
        cpuc = &__get_cpu_var(cpu_hw_events);
+
+       /* If the PMU has the TOE IRQ enable bits, we need to do a
+        * dummy write to the %pcr to clear the overflow bits and thus
+        * the interrupt.
+        *
+        * Do this before we peek at the counters to determine
+        * overflow so we don't lose any events.
+        */
+       if (sparc_pmu->irq_bit)
+               pcr_ops->write(cpuc->pcr);
+
        for (idx = 0; idx < MAX_HWEVENTS; idx++) {
                struct perf_event *event = cpuc->events[idx];
                struct hw_perf_event *hwc;
@@ -921,7 +1018,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       sparc_pmu_disable_event(hwc, idx);
+                       sparc_pmu_disable_event(cpuc, hwc, idx);
        }
 
        return NOTIFY_STOP;
@@ -969,3 +1066,117 @@ void __init init_hw_perf_events(void)
 
        register_die_notifier(&perf_event_nmi_notifier);
 }
+
+static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
+{
+       if (entry->nr < PERF_MAX_STACK_DEPTH)
+               entry->ip[entry->nr++] = ip;
+}
+
+static void perf_callchain_kernel(struct pt_regs *regs,
+                                 struct perf_callchain_entry *entry)
+{
+       unsigned long ksp, fp;
+
+       callchain_store(entry, PERF_CONTEXT_KERNEL);
+       callchain_store(entry, regs->tpc);
+
+       ksp = regs->u_regs[UREG_I6];
+       fp = ksp + STACK_BIAS;
+       do {
+               struct sparc_stackf *sf;
+               struct pt_regs *regs;
+               unsigned long pc;
+
+               if (!kstack_valid(current_thread_info(), fp))
+                       break;
+
+               sf = (struct sparc_stackf *) fp;
+               regs = (struct pt_regs *) (sf + 1);
+
+               if (kstack_is_trap_frame(current_thread_info(), regs)) {
+                       if (user_mode(regs))
+                               break;
+                       pc = regs->tpc;
+                       fp = regs->u_regs[UREG_I6] + STACK_BIAS;
+               } else {
+                       pc = sf->callers_pc;
+                       fp = (unsigned long)sf->fp + STACK_BIAS;
+               }
+               callchain_store(entry, pc);
+       } while (entry->nr < PERF_MAX_STACK_DEPTH);
+}
+
+static void perf_callchain_user_64(struct pt_regs *regs,
+                                  struct perf_callchain_entry *entry)
+{
+       unsigned long ufp;
+
+       callchain_store(entry, PERF_CONTEXT_USER);
+       callchain_store(entry, regs->tpc);
+
+       ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
+       do {
+               struct sparc_stackf *usf, sf;
+               unsigned long pc;
+
+               usf = (struct sparc_stackf *) ufp;
+               if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
+                       break;
+
+               pc = sf.callers_pc;
+               ufp = (unsigned long)sf.fp + STACK_BIAS;
+               callchain_store(entry, pc);
+       } while (entry->nr < PERF_MAX_STACK_DEPTH);
+}
+
+static void perf_callchain_user_32(struct pt_regs *regs,
+                                  struct perf_callchain_entry *entry)
+{
+       unsigned long ufp;
+
+       callchain_store(entry, PERF_CONTEXT_USER);
+       callchain_store(entry, regs->tpc);
+
+       ufp = regs->u_regs[UREG_I6];
+       do {
+               struct sparc_stackf32 *usf, sf;
+               unsigned long pc;
+
+               usf = (struct sparc_stackf32 *) ufp;
+               if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
+                       break;
+
+               pc = sf.callers_pc;
+               ufp = (unsigned long)sf.fp;
+               callchain_store(entry, pc);
+       } while (entry->nr < PERF_MAX_STACK_DEPTH);
+}
+
+/* Like powerpc we can't get PMU interrupts within the PMU handler,
+ * so no need for seperate NMI and IRQ chains as on x86.
+ */
+static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+       struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
+
+       entry->nr = 0;
+       if (!user_mode(regs)) {
+               stack_trace_flush();
+               perf_callchain_kernel(regs, entry);
+               if (current->mm)
+                       regs = task_pt_regs(current);
+               else
+                       regs = NULL;
+       }
+       if (regs) {
+               flushw_user();
+               if (test_thread_flag(TIF_32BIT))
+                       perf_callchain_user_32(regs, entry);
+               else
+                       perf_callchain_user_64(regs, entry);
+       }
+       return entry;
+}