Merge branch 'perfcounters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
Linus Torvalds [Thu, 11 Jun 2009 21:01:07 +0000 (14:01 -0700)]
* 'perfcounters-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (574 commits)
  perf_counter: Turn off by default
  perf_counter: Add counter->id to the throttle event
  perf_counter: Better align code
  perf_counter: Rename L2 to LL cache
  perf_counter: Standardize event names
  perf_counter: Rename enums
  perf_counter tools: Clean up u64 usage
  perf_counter: Rename perf_counter_limit sysctl
  perf_counter: More paranoia settings
  perf_counter: powerpc: Implement generalized cache events for POWER processors
  perf_counters: powerpc: Add support for POWER7 processors
  perf_counter: Accurate period data
  perf_counter: Introduce struct for sample data
  perf_counter tools: Normalize data using per sample period data
  perf_counter: Annotate exit ctx recursion
  perf_counter tools: Propagate signals properly
  perf_counter tools: Small frequency related fixes
  perf_counter: More aggressive frequency adjustment
  perf_counter/x86: Fix the model number of Intel Core2 processors
  perf_counter, x86: Correct some event and umask values for Intel processors
  ...

138 files changed:
MAINTAINERS
arch/powerpc/include/asm/hw_irq.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/perf_counter.h [new file with mode: 0644]
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/systbl.h
arch/powerpc/include/asm/unistd.h
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/perf_counter.c [new file with mode: 0644]
arch/powerpc/kernel/power4-pmu.c [new file with mode: 0644]
arch/powerpc/kernel/power5+-pmu.c [new file with mode: 0644]
arch/powerpc/kernel/power5-pmu.c [new file with mode: 0644]
arch/powerpc/kernel/power6-pmu.c [new file with mode: 0644]
arch/powerpc/kernel/power7-pmu.c [new file with mode: 0644]
arch/powerpc/kernel/ppc970-pmu.c [new file with mode: 0644]
arch/powerpc/mm/fault.c
arch/powerpc/platforms/Kconfig.cputype
arch/x86/Kconfig
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/atomic_32.h
arch/x86/include/asm/entry_arch.h
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/intel_arch_perfmon.h [deleted file]
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/perf_counter.h [new file with mode: 0644]
arch/x86/include/asm/unistd_32.h
arch/x86/include/asm/unistd_64.h
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/Makefile
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/perf_counter.c [new file with mode: 0644]
arch/x86/kernel/cpu/perfctr-watchdog.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/irq.c
arch/x86/kernel/irqinit.c
arch/x86/kernel/signal.c
arch/x86/kernel/syscall_table_32.S
arch/x86/kernel/traps.c
arch/x86/mm/fault.c
arch/x86/oprofile/nmi_int.c
arch/x86/oprofile/op_model_ppro.c
arch/x86/vdso/vdso32-setup.c
arch/x86/vdso/vma.c
drivers/char/sysrq.c
fs/exec.c
include/asm-generic/atomic.h
include/linux/init_task.h
include/linux/kernel_stat.h
include/linux/perf_counter.h [new file with mode: 0644]
include/linux/prctl.h
include/linux/sched.h
include/linux/syscalls.h
init/Kconfig
kernel/Makefile
kernel/exit.c
kernel/fork.c
kernel/mutex.c
kernel/perf_counter.c [new file with mode: 0644]
kernel/sched.c
kernel/sys.c
kernel/sys_ni.c
kernel/sysctl.c
kernel/timer.c
mm/mmap.c
mm/mprotect.c
tools/perf/.gitignore [new file with mode: 0644]
tools/perf/Documentation/Makefile [new file with mode: 0644]
tools/perf/Documentation/asciidoc.conf [new file with mode: 0644]
tools/perf/Documentation/manpage-1.72.xsl [new file with mode: 0644]
tools/perf/Documentation/manpage-base.xsl [new file with mode: 0644]
tools/perf/Documentation/manpage-bold-literal.xsl [new file with mode: 0644]
tools/perf/Documentation/manpage-normal.xsl [new file with mode: 0644]
tools/perf/Documentation/manpage-suppress-sp.xsl [new file with mode: 0644]
tools/perf/Documentation/perf-annotate.txt [new file with mode: 0644]
tools/perf/Documentation/perf-help.txt [new file with mode: 0644]
tools/perf/Documentation/perf-list.txt [new file with mode: 0644]
tools/perf/Documentation/perf-record.txt [new file with mode: 0644]
tools/perf/Documentation/perf-report.txt [new file with mode: 0644]
tools/perf/Documentation/perf-stat.txt [new file with mode: 0644]
tools/perf/Documentation/perf-top.txt [new file with mode: 0644]
tools/perf/Documentation/perf.txt [new file with mode: 0644]
tools/perf/Makefile [new file with mode: 0644]
tools/perf/builtin-annotate.c [new file with mode: 0644]
tools/perf/builtin-help.c [new file with mode: 0644]
tools/perf/builtin-list.c [new file with mode: 0644]
tools/perf/builtin-record.c [new file with mode: 0644]
tools/perf/builtin-report.c [new file with mode: 0644]
tools/perf/builtin-stat.c [new file with mode: 0644]
tools/perf/builtin-top.c [new file with mode: 0644]
tools/perf/builtin.h [new file with mode: 0644]
tools/perf/command-list.txt [new file with mode: 0644]
tools/perf/design.txt [new file with mode: 0644]
tools/perf/perf.c [new file with mode: 0644]
tools/perf/perf.h [new file with mode: 0644]
tools/perf/util/PERF-VERSION-GEN [new file with mode: 0755]
tools/perf/util/abspath.c [new file with mode: 0644]
tools/perf/util/alias.c [new file with mode: 0644]
tools/perf/util/cache.h [new file with mode: 0644]
tools/perf/util/color.c [new file with mode: 0644]
tools/perf/util/color.h [new file with mode: 0644]
tools/perf/util/config.c [new file with mode: 0644]
tools/perf/util/ctype.c [new file with mode: 0644]
tools/perf/util/environment.c [new file with mode: 0644]
tools/perf/util/exec_cmd.c [new file with mode: 0644]
tools/perf/util/exec_cmd.h [new file with mode: 0644]
tools/perf/util/generate-cmdlist.sh [new file with mode: 0755]
tools/perf/util/help.c [new file with mode: 0644]
tools/perf/util/help.h [new file with mode: 0644]
tools/perf/util/levenshtein.c [new file with mode: 0644]
tools/perf/util/levenshtein.h [new file with mode: 0644]
tools/perf/util/list.h [new file with mode: 0644]
tools/perf/util/pager.c [new file with mode: 0644]
tools/perf/util/parse-events.c [new file with mode: 0644]
tools/perf/util/parse-events.h [new file with mode: 0644]
tools/perf/util/parse-options.c [new file with mode: 0644]
tools/perf/util/parse-options.h [new file with mode: 0644]
tools/perf/util/path.c [new file with mode: 0644]
tools/perf/util/quote.c [new file with mode: 0644]
tools/perf/util/quote.h [new file with mode: 0644]
tools/perf/util/rbtree.c [new file with mode: 0644]
tools/perf/util/rbtree.h [new file with mode: 0644]
tools/perf/util/run-command.c [new file with mode: 0644]
tools/perf/util/run-command.h [new file with mode: 0644]
tools/perf/util/sigchain.c [new file with mode: 0644]
tools/perf/util/sigchain.h [new file with mode: 0644]
tools/perf/util/strbuf.c [new file with mode: 0644]
tools/perf/util/strbuf.h [new file with mode: 0644]
tools/perf/util/string.c [new file with mode: 0644]
tools/perf/util/string.h [new file with mode: 0644]
tools/perf/util/symbol.c [new file with mode: 0644]
tools/perf/util/symbol.h [new file with mode: 0644]
tools/perf/util/usage.c [new file with mode: 0644]
tools/perf/util/util.h [new file with mode: 0644]
tools/perf/util/wrapper.c [new file with mode: 0644]

index ccdb575..70f961d 100644 (file)
@@ -4403,6 +4403,16 @@ S:       Maintained
 F:     include/linux/delayacct.h
 F:     kernel/delayacct.c
 
+PERFORMANCE COUNTER SUBSYSTEM
+P:     Peter Zijlstra
+M:     a.p.zijlstra@chello.nl
+P:     Paul Mackerras
+M:     paulus@samba.org
+P:     Ingo Molnar
+M:     mingo@elte.hu
+L:     linux-kernel@vger.kernel.org
+S:     Supported
+
 PERSONALITY HANDLING
 P:     Christoph Hellwig
 M:     hch@infradead.org
index b7e034b..20a44d0 100644 (file)
@@ -131,5 +131,44 @@ static inline int irqs_disabled_flags(unsigned long flags)
  */
 struct irq_chip;
 
+#ifdef CONFIG_PERF_COUNTERS
+static inline unsigned long test_perf_counter_pending(void)
+{
+       unsigned long x;
+
+       asm volatile("lbz %0,%1(13)"
+               : "=r" (x)
+               : "i" (offsetof(struct paca_struct, perf_counter_pending)));
+       return x;
+}
+
+static inline void set_perf_counter_pending(void)
+{
+       asm volatile("stb %0,%1(13)" : :
+               "r" (1),
+               "i" (offsetof(struct paca_struct, perf_counter_pending)));
+}
+
+static inline void clear_perf_counter_pending(void)
+{
+       asm volatile("stb %0,%1(13)" : :
+               "r" (0),
+               "i" (offsetof(struct paca_struct, perf_counter_pending)));
+}
+
+extern void perf_counter_do_pending(void);
+
+#else
+
+static inline unsigned long test_perf_counter_pending(void)
+{
+       return 0;
+}
+
+static inline void set_perf_counter_pending(void) {}
+static inline void clear_perf_counter_pending(void) {}
+static inline void perf_counter_do_pending(void) {}
+#endif /* CONFIG_PERF_COUNTERS */
+
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_HW_IRQ_H */
index 082b3ae..6ef0557 100644 (file)
@@ -99,6 +99,7 @@ struct paca_struct {
        u8 soft_enabled;                /* irq soft-enable flag */
        u8 hard_enabled;                /* set if irqs are enabled in MSR */
        u8 io_sync;                     /* writel() needs spin_unlock sync */
+       u8 perf_counter_pending;        /* PM interrupt while soft-disabled */
 
        /* Stuff for accurate time accounting */
        u64 user_time;                  /* accumulated usermode TB ticks */
diff --git a/arch/powerpc/include/asm/perf_counter.h b/arch/powerpc/include/asm/perf_counter.h
new file mode 100644 (file)
index 0000000..cc7c887
--- /dev/null
@@ -0,0 +1,98 @@
+/*
+ * Performance counter support - PowerPC-specific definitions.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/types.h>
+
+#define MAX_HWCOUNTERS         8
+#define MAX_EVENT_ALTERNATIVES 8
+#define MAX_LIMITED_HWCOUNTERS 2
+
+/*
+ * This struct provides the constants and functions needed to
+ * describe the PMU on a particular POWER-family CPU.
+ */
+struct power_pmu {
+       int     n_counter;
+       int     max_alternatives;
+       u64     add_fields;
+       u64     test_adder;
+       int     (*compute_mmcr)(u64 events[], int n_ev,
+                               unsigned int hwc[], u64 mmcr[]);
+       int     (*get_constraint)(u64 event, u64 *mskp, u64 *valp);
+       int     (*get_alternatives)(u64 event, unsigned int flags,
+                                   u64 alt[]);
+       void    (*disable_pmc)(unsigned int pmc, u64 mmcr[]);
+       int     (*limited_pmc_event)(u64 event);
+       u32     flags;
+       int     n_generic;
+       int     *generic_events;
+       int     (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
+                              [PERF_COUNT_HW_CACHE_OP_MAX]
+                              [PERF_COUNT_HW_CACHE_RESULT_MAX];
+};
+
+extern struct power_pmu *ppmu;
+
+/*
+ * Values for power_pmu.flags
+ */
+#define PPMU_LIMITED_PMC5_6    1       /* PMC5/6 have limited function */
+#define PPMU_ALT_SIPR          2       /* uses alternate posn for SIPR/HV */
+
+/*
+ * Values for flags to get_alternatives()
+ */
+#define PPMU_LIMITED_PMC_OK    1       /* can put this on a limited PMC */
+#define PPMU_LIMITED_PMC_REQD  2       /* have to put this on a limited PMC */
+#define PPMU_ONLY_COUNT_RUN    4       /* only counting in run state */
+
+struct pt_regs;
+extern unsigned long perf_misc_flags(struct pt_regs *regs);
+#define perf_misc_flags(regs)  perf_misc_flags(regs)
+
+extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
+
+/*
+ * The power_pmu.get_constraint function returns a 64-bit value and
+ * a 64-bit mask that express the constraints between this event and
+ * other events.
+ *
+ * The value and mask are divided up into (non-overlapping) bitfields
+ * of three different types:
+ *
+ * Select field: this expresses the constraint that some set of bits
+ * in MMCR* needs to be set to a specific value for this event.  For a
+ * select field, the mask contains 1s in every bit of the field, and
+ * the value contains a unique value for each possible setting of the
+ * MMCR* bits.  The constraint checking code will ensure that two events
+ * that set the same field in their masks have the same value in their
+ * value dwords.
+ *
+ * Add field: this expresses the constraint that there can be at most
+ * N events in a particular class.  A field of k bits can be used for
+ * N <= 2^(k-1) - 1.  The mask has the most significant bit of the field
+ * set (and the other bits 0), and the value has only the least significant
+ * bit of the field set.  In addition, the 'add_fields' and 'test_adder'
+ * in the struct power_pmu for this processor come into play.  The
+ * add_fields value contains 1 in the LSB of the field, and the
+ * test_adder contains 2^(k-1) - 1 - N in the field.
+ *
+ * NAND field: this expresses the constraint that you may not have events
+ * in all of a set of classes.  (For example, on PPC970, you can't select
+ * events from the FPU, ISU and IDU simultaneously, although any two are
+ * possible.)  For N classes, the field is N+1 bits wide, and each class
+ * is assigned one bit from the least-significant N bits.  The mask has
+ * only the most-significant bit set, and the value has only the bit
+ * for the event's class set.  The test_adder has the least significant
+ * bit set in the field.
+ *
+ * If an event is not subject to the constraint expressed by a particular
+ * field, then it will have 0 in both the mask and value for that field.
+ */
index e8018d5..fb359b0 100644 (file)
 #define   MMCR0_FCHV   0x00000001UL /* freeze conditions in hypervisor mode */
 #define SPRN_MMCR1     798
 #define SPRN_MMCRA     0x312
+#define   MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
 #define   MMCRA_SIHV   0x10000000UL /* state of MSR HV when SIAR set */
 #define   MMCRA_SIPR   0x08000000UL /* state of MSR PR when SIAR set */
 #define   MMCRA_SLOT   0x07000000UL /* SLOT bits (37-39) */
 #define   MMCRA_SLOT_SHIFT     24
 #define   MMCRA_SAMPLE_ENABLE 0x00000001UL /* enable sampling */
+#define   POWER6_MMCRA_SDSYNC 0x0000080000000000ULL    /* SDAR/SIAR synced */
 #define   POWER6_MMCRA_SIHV   0x0000040000000000ULL
 #define   POWER6_MMCRA_SIPR   0x0000020000000000ULL
 #define   POWER6_MMCRA_THRM    0x00000020UL
index d98a30d..a0b92de 100644 (file)
@@ -322,6 +322,6 @@ SYSCALL_SPU(epoll_create1)
 SYSCALL_SPU(dup3)
 SYSCALL_SPU(pipe2)
 SYSCALL(inotify_init1)
-SYSCALL(ni_syscall)
+SYSCALL_SPU(perf_counter_open)
 COMPAT_SYS_SPU(preadv)
 COMPAT_SYS_SPU(pwritev)
index 3f06f8e..4badac2 100644 (file)
 #define __NR_dup3              316
 #define __NR_pipe2             317
 #define __NR_inotify_init1     318
+#define __NR_perf_counter_open 319
 #define __NR_preadv            320
 #define __NR_pwritev           321
 
index 71901fb..a2c6834 100644 (file)
@@ -94,6 +94,9 @@ obj64-$(CONFIG_AUDIT)         += compat_audit.o
 
 obj-$(CONFIG_DYNAMIC_FTRACE)   += ftrace.o
 obj-$(CONFIG_FUNCTION_GRAPH_TRACER)    += ftrace.o
+obj-$(CONFIG_PERF_COUNTERS)    += perf_counter.o power4-pmu.o ppc970-pmu.o \
+                                  power5-pmu.o power5+-pmu.o power6-pmu.o \
+                                  power7-pmu.o
 
 obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
 
index 1e40bc0..e981d1c 100644 (file)
@@ -131,6 +131,7 @@ int main(void)
        DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
        DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
        DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
+       DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending));
        DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
        DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
        DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
index abfc323..43e0734 100644 (file)
@@ -526,6 +526,15 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
 2:
        TRACE_AND_RESTORE_IRQ(r5);
 
+#ifdef CONFIG_PERF_COUNTERS
+       /* check paca->perf_counter_pending if we're enabling ints */
+       lbz     r3,PACAPERFPEND(r13)
+       and.    r3,r3,r5
+       beq     27f
+       bl      .perf_counter_do_pending
+27:
+#endif /* CONFIG_PERF_COUNTERS */
+
        /* extract EE bit and use it to restore paca->hard_enabled */
        ld      r3,_MSR(r1)
        rldicl  r4,r3,49,63             /* r0 = (r3 >> 15) & 1 */
index 8c1a496..feff792 100644 (file)
@@ -135,6 +135,11 @@ notrace void raw_local_irq_restore(unsigned long en)
                        iseries_handle_interrupts();
        }
 
+       if (test_perf_counter_pending()) {
+               clear_perf_counter_pending();
+               perf_counter_do_pending();
+       }
+
        /*
         * if (get_paca()->hard_enabled) return;
         * But again we need to take care that gcc gets hard_enabled directly
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
new file mode 100644 (file)
index 0000000..bb20238
--- /dev/null
@@ -0,0 +1,1263 @@
+/*
+ * Performance counter support - powerpc architecture code
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/perf_counter.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <asm/reg.h>
+#include <asm/pmc.h>
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/ptrace.h>
+
+struct cpu_hw_counters {
+       int n_counters;
+       int n_percpu;
+       int disabled;
+       int n_added;
+       int n_limited;
+       u8  pmcs_enabled;
+       struct perf_counter *counter[MAX_HWCOUNTERS];
+       u64 events[MAX_HWCOUNTERS];
+       unsigned int flags[MAX_HWCOUNTERS];
+       u64 mmcr[3];
+       struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
+       u8  limited_hwidx[MAX_LIMITED_HWCOUNTERS];
+};
+DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
+
+struct power_pmu *ppmu;
+
+/*
+ * Normally, to ignore kernel events we set the FCS (freeze counters
+ * in supervisor mode) bit in MMCR0, but if the kernel runs with the
+ * hypervisor bit set in the MSR, or if we are running on a processor
+ * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
+ * then we need to use the FCHV bit to ignore kernel events.
+ */
+static unsigned int freeze_counters_kernel = MMCR0_FCS;
+
+static void perf_counter_interrupt(struct pt_regs *regs);
+
+void perf_counter_print_debug(void)
+{
+}
+
+/*
+ * Read one performance monitor counter (PMC).
+ */
+static unsigned long read_pmc(int idx)
+{
+       unsigned long val;
+
+       switch (idx) {
+       case 1:
+               val = mfspr(SPRN_PMC1);
+               break;
+       case 2:
+               val = mfspr(SPRN_PMC2);
+               break;
+       case 3:
+               val = mfspr(SPRN_PMC3);
+               break;
+       case 4:
+               val = mfspr(SPRN_PMC4);
+               break;
+       case 5:
+               val = mfspr(SPRN_PMC5);
+               break;
+       case 6:
+               val = mfspr(SPRN_PMC6);
+               break;
+       case 7:
+               val = mfspr(SPRN_PMC7);
+               break;
+       case 8:
+               val = mfspr(SPRN_PMC8);
+               break;
+       default:
+               printk(KERN_ERR "oops trying to read PMC%d\n", idx);
+               val = 0;
+       }
+       return val;
+}
+
+/*
+ * Write one PMC.
+ */
+static void write_pmc(int idx, unsigned long val)
+{
+       switch (idx) {
+       case 1:
+               mtspr(SPRN_PMC1, val);
+               break;
+       case 2:
+               mtspr(SPRN_PMC2, val);
+               break;
+       case 3:
+               mtspr(SPRN_PMC3, val);
+               break;
+       case 4:
+               mtspr(SPRN_PMC4, val);
+               break;
+       case 5:
+               mtspr(SPRN_PMC5, val);
+               break;
+       case 6:
+               mtspr(SPRN_PMC6, val);
+               break;
+       case 7:
+               mtspr(SPRN_PMC7, val);
+               break;
+       case 8:
+               mtspr(SPRN_PMC8, val);
+               break;
+       default:
+               printk(KERN_ERR "oops trying to write PMC%d\n", idx);
+       }
+}
+
+/*
+ * Check if a set of events can all go on the PMU at once.
+ * If they can't, this will look at alternative codes for the events
+ * and see if any combination of alternative codes is feasible.
+ * The feasible set is returned in event[].
+ */
+static int power_check_constraints(u64 event[], unsigned int cflags[],
+                                  int n_ev)
+{
+       u64 mask, value, nv;
+       u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+       u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+       u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+       u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
+       int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
+       int i, j;
+       u64 addf = ppmu->add_fields;
+       u64 tadd = ppmu->test_adder;
+
+       if (n_ev > ppmu->n_counter)
+               return -1;
+
+       /* First see if the events will go on as-is */
+       for (i = 0; i < n_ev; ++i) {
+               if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
+                   && !ppmu->limited_pmc_event(event[i])) {
+                       ppmu->get_alternatives(event[i], cflags[i],
+                                              alternatives[i]);
+                       event[i] = alternatives[i][0];
+               }
+               if (ppmu->get_constraint(event[i], &amasks[i][0],
+                                        &avalues[i][0]))
+                       return -1;
+       }
+       value = mask = 0;
+       for (i = 0; i < n_ev; ++i) {
+               nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
+               if ((((nv + tadd) ^ value) & mask) != 0 ||
+                   (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
+                       break;
+               value = nv;
+               mask |= amasks[i][0];
+       }
+       if (i == n_ev)
+               return 0;       /* all OK */
+
+       /* doesn't work, gather alternatives... */
+       if (!ppmu->get_alternatives)
+               return -1;
+       for (i = 0; i < n_ev; ++i) {
+               choice[i] = 0;
+               n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
+                                                 alternatives[i]);
+               for (j = 1; j < n_alt[i]; ++j)
+                       ppmu->get_constraint(alternatives[i][j],
+                                            &amasks[i][j], &avalues[i][j]);
+       }
+
+       /* enumerate all possibilities and see if any will work */
+       i = 0;
+       j = -1;
+       value = mask = nv = 0;
+       while (i < n_ev) {
+               if (j >= 0) {
+                       /* we're backtracking, restore context */
+                       value = svalues[i];
+                       mask = smasks[i];
+                       j = choice[i];
+               }
+               /*
+                * See if any alternative k for event i,
+                * where k > j, will satisfy the constraints.
+                */
+               while (++j < n_alt[i]) {
+                       nv = (value | avalues[i][j]) +
+                               (value & avalues[i][j] & addf);
+                       if ((((nv + tadd) ^ value) & mask) == 0 &&
+                           (((nv + tadd) ^ avalues[i][j])
+                            & amasks[i][j]) == 0)
+                               break;
+               }
+               if (j >= n_alt[i]) {
+                       /*
+                        * No feasible alternative, backtrack
+                        * to event i-1 and continue enumerating its
+                        * alternatives from where we got up to.
+                        */
+                       if (--i < 0)
+                               return -1;
+               } else {
+                       /*
+                        * Found a feasible alternative for event i,
+                        * remember where we got up to with this event,
+                        * go on to the next event, and start with
+                        * the first alternative for it.
+                        */
+                       choice[i] = j;
+                       svalues[i] = value;
+                       smasks[i] = mask;
+                       value = nv;
+                       mask |= amasks[i][j];
+                       ++i;
+                       j = -1;
+               }
+       }
+
+       /* OK, we have a feasible combination, tell the caller the solution */
+       for (i = 0; i < n_ev; ++i)
+               event[i] = alternatives[i][choice[i]];
+       return 0;
+}
+
+/*
+ * Check if newly-added counters have consistent settings for
+ * exclude_{user,kernel,hv} with each other and any previously
+ * added counters.
+ */
+static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
+                         int n_prev, int n_new)
+{
+       int eu = 0, ek = 0, eh = 0;
+       int i, n, first;
+       struct perf_counter *counter;
+
+       n = n_prev + n_new;
+       if (n <= 1)
+               return 0;
+
+       first = 1;
+       for (i = 0; i < n; ++i) {
+               if (cflags[i] & PPMU_LIMITED_PMC_OK) {
+                       cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
+                       continue;
+               }
+               counter = ctrs[i];
+               if (first) {
+                       eu = counter->attr.exclude_user;
+                       ek = counter->attr.exclude_kernel;
+                       eh = counter->attr.exclude_hv;
+                       first = 0;
+               } else if (counter->attr.exclude_user != eu ||
+                          counter->attr.exclude_kernel != ek ||
+                          counter->attr.exclude_hv != eh) {
+                       return -EAGAIN;
+               }
+       }
+
+       if (eu || ek || eh)
+               for (i = 0; i < n; ++i)
+                       if (cflags[i] & PPMU_LIMITED_PMC_OK)
+                               cflags[i] |= PPMU_LIMITED_PMC_REQD;
+
+       return 0;
+}
+
+static void power_pmu_read(struct perf_counter *counter)
+{
+       long val, delta, prev;
+
+       if (!counter->hw.idx)
+               return;
+       /*
+        * Performance monitor interrupts come even when interrupts
+        * are soft-disabled, as long as interrupts are hard-enabled.
+        * Therefore we treat them like NMIs.
+        */
+       do {
+               prev = atomic64_read(&counter->hw.prev_count);
+               barrier();
+               val = read_pmc(counter->hw.idx);
+       } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
+
+       /* The counters are only 32 bits wide */
+       delta = (val - prev) & 0xfffffffful;
+       atomic64_add(delta, &counter->count);
+       atomic64_sub(delta, &counter->hw.period_left);
+}
+
+/*
+ * On some machines, PMC5 and PMC6 can't be written, don't respect
+ * the freeze conditions, and don't generate interrupts.  This tells
+ * us if `counter' is using such a PMC.
+ */
+static int is_limited_pmc(int pmcnum)
+{
+       return (ppmu->flags & PPMU_LIMITED_PMC5_6)
+               && (pmcnum == 5 || pmcnum == 6);
+}
+
+static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
+                                   unsigned long pmc5, unsigned long pmc6)
+{
+       struct perf_counter *counter;
+       u64 val, prev, delta;
+       int i;
+
+       for (i = 0; i < cpuhw->n_limited; ++i) {
+               counter = cpuhw->limited_counter[i];
+               if (!counter->hw.idx)
+                       continue;
+               val = (counter->hw.idx == 5) ? pmc5 : pmc6;
+               prev = atomic64_read(&counter->hw.prev_count);
+               counter->hw.idx = 0;
+               delta = (val - prev) & 0xfffffffful;
+               atomic64_add(delta, &counter->count);
+       }
+}
+
+static void thaw_limited_counters(struct cpu_hw_counters *cpuhw,
+                                 unsigned long pmc5, unsigned long pmc6)
+{
+       struct perf_counter *counter;
+       u64 val;
+       int i;
+
+       for (i = 0; i < cpuhw->n_limited; ++i) {
+               counter = cpuhw->limited_counter[i];
+               counter->hw.idx = cpuhw->limited_hwidx[i];
+               val = (counter->hw.idx == 5) ? pmc5 : pmc6;
+               atomic64_set(&counter->hw.prev_count, val);
+               perf_counter_update_userpage(counter);
+       }
+}
+
+/*
+ * Since limited counters don't respect the freeze conditions, we
+ * have to read them immediately after freezing or unfreezing the
+ * other counters.  We try to keep the values from the limited
+ * counters as consistent as possible by keeping the delay (in
+ * cycles and instructions) between freezing/unfreezing and reading
+ * the limited counters as small and consistent as possible.
+ * Therefore, if any limited counters are in use, we read them
+ * both, and always in the same order, to minimize variability,
+ * and do it inside the same asm that writes MMCR0.
+ */
+static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
+{
+       unsigned long pmc5, pmc6;
+
+       if (!cpuhw->n_limited) {
+               mtspr(SPRN_MMCR0, mmcr0);
+               return;
+       }
+
+       /*
+        * Write MMCR0, then read PMC5 and PMC6 immediately.
+        * To ensure we don't get a performance monitor interrupt
+        * between writing MMCR0 and freezing/thawing the limited
+        * counters, we first write MMCR0 with the counter overflow
+        * interrupt enable bits turned off.
+        */
+       asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
+                    : "=&r" (pmc5), "=&r" (pmc6)
+                    : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
+                      "i" (SPRN_MMCR0),
+                      "i" (SPRN_PMC5), "i" (SPRN_PMC6));
+
+       if (mmcr0 & MMCR0_FC)
+               freeze_limited_counters(cpuhw, pmc5, pmc6);
+       else
+               thaw_limited_counters(cpuhw, pmc5, pmc6);
+
+       /*
+        * Write the full MMCR0 including the counter overflow interrupt
+        * enable bits, if necessary.
+        */
+       if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
+               mtspr(SPRN_MMCR0, mmcr0);
+}
+
+/*
+ * Disable all counters to prevent PMU interrupts and to allow
+ * counters to be added or removed.
+ */
+void hw_perf_disable(void)
+{
+       struct cpu_hw_counters *cpuhw;
+       unsigned long ret;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       cpuhw = &__get_cpu_var(cpu_hw_counters);
+
+       ret = cpuhw->disabled;
+       if (!ret) {
+               cpuhw->disabled = 1;
+               cpuhw->n_added = 0;
+
+               /*
+                * Check if we ever enabled the PMU on this cpu.
+                */
+               if (!cpuhw->pmcs_enabled) {
+                       if (ppc_md.enable_pmcs)
+                               ppc_md.enable_pmcs();
+                       cpuhw->pmcs_enabled = 1;
+               }
+
+               /*
+                * Disable instruction sampling if it was enabled
+                */
+               if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
+                       mtspr(SPRN_MMCRA,
+                             cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+                       mb();
+               }
+
+               /*
+                * Set the 'freeze counters' bit.
+                * The barrier is to make sure the mtspr has been
+                * executed and the PMU has frozen the counters
+                * before we return.
+                */
+               write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
+               mb();
+       }
+       local_irq_restore(flags);
+}
+
+/*
+ * Re-enable all counters if disable == 0.
+ * If we were previously disabled and counters were added, then
+ * put the new config on the PMU.
+ */
+void hw_perf_enable(void)
+{
+       struct perf_counter *counter;
+       struct cpu_hw_counters *cpuhw;
+       unsigned long flags;
+       long i;
+       unsigned long val;
+       s64 left;
+       unsigned int hwc_index[MAX_HWCOUNTERS];
+       int n_lim;
+       int idx;
+
+       local_irq_save(flags);
+       cpuhw = &__get_cpu_var(cpu_hw_counters);
+       if (!cpuhw->disabled) {
+               local_irq_restore(flags);
+               return;
+       }
+       cpuhw->disabled = 0;
+
+       /*
+        * If we didn't change anything, or only removed counters,
+        * no need to recalculate MMCR* settings and reset the PMCs.
+        * Just reenable the PMU with the current MMCR* settings
+        * (possibly updated for removal of counters).
+        */
+       if (!cpuhw->n_added) {
+               mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+               mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
+               if (cpuhw->n_counters == 0)
+                       get_lppaca()->pmcregs_in_use = 0;
+               goto out_enable;
+       }
+
+       /*
+        * Compute MMCR* values for the new set of counters
+        */
+       if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
+                              cpuhw->mmcr)) {
+               /* shouldn't ever get here */
+               printk(KERN_ERR "oops compute_mmcr failed\n");
+               goto out;
+       }
+
+       /*
+        * Add in MMCR0 freeze bits corresponding to the
+        * attr.exclude_* bits for the first counter.
+        * We have already checked that all counters have the
+        * same values for these bits as the first counter.
+        */
+       counter = cpuhw->counter[0];
+       if (counter->attr.exclude_user)
+               cpuhw->mmcr[0] |= MMCR0_FCP;
+       if (counter->attr.exclude_kernel)
+               cpuhw->mmcr[0] |= freeze_counters_kernel;
+       if (counter->attr.exclude_hv)
+               cpuhw->mmcr[0] |= MMCR0_FCHV;
+
+       /*
+        * Write the new configuration to MMCR* with the freeze
+        * bit set and set the hardware counters to their initial values.
+        * Then unfreeze the counters.
+        */
+       get_lppaca()->pmcregs_in_use = 1;
+       mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+       mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
+       mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
+                               | MMCR0_FC);
+
+       /*
+        * Read off any pre-existing counters that need to move
+        * to another PMC.
+        */
+       for (i = 0; i < cpuhw->n_counters; ++i) {
+               counter = cpuhw->counter[i];
+               if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
+                       power_pmu_read(counter);
+                       write_pmc(counter->hw.idx, 0);
+                       counter->hw.idx = 0;
+               }
+       }
+
+       /*
+        * Initialize the PMCs for all the new and moved counters.
+        */
+       cpuhw->n_limited = n_lim = 0;
+       for (i = 0; i < cpuhw->n_counters; ++i) {
+               counter = cpuhw->counter[i];
+               if (counter->hw.idx)
+                       continue;
+               idx = hwc_index[i] + 1;
+               if (is_limited_pmc(idx)) {
+                       cpuhw->limited_counter[n_lim] = counter;
+                       cpuhw->limited_hwidx[n_lim] = idx;
+                       ++n_lim;
+                       continue;
+               }
+               val = 0;
+               if (counter->hw.sample_period) {
+                       left = atomic64_read(&counter->hw.period_left);
+                       if (left < 0x80000000L)
+                               val = 0x80000000L - left;
+               }
+               atomic64_set(&counter->hw.prev_count, val);
+               counter->hw.idx = idx;
+               write_pmc(idx, val);
+               perf_counter_update_userpage(counter);
+       }
+       cpuhw->n_limited = n_lim;
+       cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
+
+ out_enable:
+       mb();
+       write_mmcr0(cpuhw, cpuhw->mmcr[0]);
+
+       /*
+        * Enable instruction sampling if necessary
+        */
+       if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
+               mb();
+               mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
+       }
+
+ out:
+       local_irq_restore(flags);
+}
+
+static int collect_events(struct perf_counter *group, int max_count,
+                         struct perf_counter *ctrs[], u64 *events,
+                         unsigned int *flags)
+{
+       int n = 0;
+       struct perf_counter *counter;
+
+       if (!is_software_counter(group)) {
+               if (n >= max_count)
+                       return -1;
+               ctrs[n] = group;
+               flags[n] = group->hw.counter_base;
+               events[n++] = group->hw.config;
+       }
+       list_for_each_entry(counter, &group->sibling_list, list_entry) {
+               if (!is_software_counter(counter) &&
+                   counter->state != PERF_COUNTER_STATE_OFF) {
+                       if (n >= max_count)
+                               return -1;
+                       ctrs[n] = counter;
+                       flags[n] = counter->hw.counter_base;
+                       events[n++] = counter->hw.config;
+               }
+       }
+       return n;
+}
+
+static void counter_sched_in(struct perf_counter *counter, int cpu)
+{
+       counter->state = PERF_COUNTER_STATE_ACTIVE;
+       counter->oncpu = cpu;
+       counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
+       if (is_software_counter(counter))
+               counter->pmu->enable(counter);
+}
+
+/*
+ * Called to enable a whole group of counters.
+ * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
+ * Assumes the caller has disabled interrupts and has
+ * frozen the PMU with hw_perf_save_disable.
+ */
+int hw_perf_group_sched_in(struct perf_counter *group_leader,
+              struct perf_cpu_context *cpuctx,
+              struct perf_counter_context *ctx, int cpu)
+{
+       struct cpu_hw_counters *cpuhw;
+       long i, n, n0;
+       struct perf_counter *sub;
+
+       cpuhw = &__get_cpu_var(cpu_hw_counters);
+       n0 = cpuhw->n_counters;
+       n = collect_events(group_leader, ppmu->n_counter - n0,
+                          &cpuhw->counter[n0], &cpuhw->events[n0],
+                          &cpuhw->flags[n0]);
+       if (n < 0)
+               return -EAGAIN;
+       if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
+               return -EAGAIN;
+       i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0);
+       if (i < 0)
+               return -EAGAIN;
+       cpuhw->n_counters = n0 + n;
+       cpuhw->n_added += n;
+
+       /*
+        * OK, this group can go on; update counter states etc.,
+        * and enable any software counters
+        */
+       for (i = n0; i < n0 + n; ++i)
+               cpuhw->counter[i]->hw.config = cpuhw->events[i];
+       cpuctx->active_oncpu += n;
+       n = 1;
+       counter_sched_in(group_leader, cpu);
+       list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
+               if (sub->state != PERF_COUNTER_STATE_OFF) {
+                       counter_sched_in(sub, cpu);
+                       ++n;
+               }
+       }
+       ctx->nr_active += n;
+
+       return 1;
+}
+
+/*
+ * Add a counter to the PMU.
+ * If all counters are not already frozen, then we disable and
+ * re-enable the PMU in order to get hw_perf_enable to do the
+ * actual work of reconfiguring the PMU.
+ */
+static int power_pmu_enable(struct perf_counter *counter)
+{
+       struct cpu_hw_counters *cpuhw;
+       unsigned long flags;
+       int n0;
+       int ret = -EAGAIN;
+
+       local_irq_save(flags);
+       perf_disable();
+
+       /*
+        * Add the counter to the list (if there is room)
+        * and check whether the total set is still feasible.
+        */
+       cpuhw = &__get_cpu_var(cpu_hw_counters);
+       n0 = cpuhw->n_counters;
+       if (n0 >= ppmu->n_counter)
+               goto out;
+       cpuhw->counter[n0] = counter;
+       cpuhw->events[n0] = counter->hw.config;
+       cpuhw->flags[n0] = counter->hw.counter_base;
+       if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
+               goto out;
+       if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1))
+               goto out;
+
+       counter->hw.config = cpuhw->events[n0];
+       ++cpuhw->n_counters;
+       ++cpuhw->n_added;
+
+       ret = 0;
+ out:
+       perf_enable();
+       local_irq_restore(flags);
+       return ret;
+}
+
+/*
+ * Remove a counter from the PMU.
+ */
+static void power_pmu_disable(struct perf_counter *counter)
+{
+       struct cpu_hw_counters *cpuhw;
+       long i;
+       unsigned long flags;
+
+       local_irq_save(flags);
+       perf_disable();
+
+       power_pmu_read(counter);
+
+       cpuhw = &__get_cpu_var(cpu_hw_counters);
+       for (i = 0; i < cpuhw->n_counters; ++i) {
+               if (counter == cpuhw->counter[i]) {
+                       while (++i < cpuhw->n_counters)
+                               cpuhw->counter[i-1] = cpuhw->counter[i];
+                       --cpuhw->n_counters;
+                       ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
+                       if (counter->hw.idx) {
+                               write_pmc(counter->hw.idx, 0);
+                               counter->hw.idx = 0;
+                       }
+                       perf_counter_update_userpage(counter);
+                       break;
+               }
+       }
+       for (i = 0; i < cpuhw->n_limited; ++i)
+               if (counter == cpuhw->limited_counter[i])
+                       break;
+       if (i < cpuhw->n_limited) {
+               while (++i < cpuhw->n_limited) {
+                       cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
+                       cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
+               }
+               --cpuhw->n_limited;
+       }
+       if (cpuhw->n_counters == 0) {
+               /* disable exceptions if no counters are running */
+               cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
+       }
+
+       perf_enable();
+       local_irq_restore(flags);
+}
+
+/*
+ * Re-enable interrupts on a counter after they were throttled
+ * because they were coming too fast.
+ */
+static void power_pmu_unthrottle(struct perf_counter *counter)
+{
+       s64 val, left;
+       unsigned long flags;
+
+       if (!counter->hw.idx || !counter->hw.sample_period)
+               return;
+       local_irq_save(flags);
+       perf_disable();
+       power_pmu_read(counter);
+       left = counter->hw.sample_period;
+       counter->hw.last_period = left;
+       val = 0;
+       if (left < 0x80000000L)
+               val = 0x80000000L - left;
+       write_pmc(counter->hw.idx, val);
+       atomic64_set(&counter->hw.prev_count, val);
+       atomic64_set(&counter->hw.period_left, left);
+       perf_counter_update_userpage(counter);
+       perf_enable();
+       local_irq_restore(flags);
+}
+
+struct pmu power_pmu = {
+       .enable         = power_pmu_enable,
+       .disable        = power_pmu_disable,
+       .read           = power_pmu_read,
+       .unthrottle     = power_pmu_unthrottle,
+};
+
+/*
+ * Return 1 if we might be able to put counter on a limited PMC,
+ * or 0 if not.
+ * A counter can only go on a limited PMC if it counts something
+ * that a limited PMC can count, doesn't require interrupts, and
+ * doesn't exclude any processor mode.
+ */
+static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
+                                unsigned int flags)
+{
+       int n;
+       u64 alt[MAX_EVENT_ALTERNATIVES];
+
+       if (counter->attr.exclude_user
+           || counter->attr.exclude_kernel
+           || counter->attr.exclude_hv
+           || counter->attr.sample_period)
+               return 0;
+
+       if (ppmu->limited_pmc_event(ev))
+               return 1;
+
+       /*
+        * The requested event isn't on a limited PMC already;
+        * see if any alternative code goes on a limited PMC.
+        */
+       if (!ppmu->get_alternatives)
+               return 0;
+
+       flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
+       n = ppmu->get_alternatives(ev, flags, alt);
+
+       return n > 0;
+}
+
+/*
+ * Find an alternative event that goes on a normal PMC, if possible,
+ * and return the event code, or 0 if there is no such alternative.
+ * (Note: event code 0 is "don't count" on all machines.)
+ */
+static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
+{
+       u64 alt[MAX_EVENT_ALTERNATIVES];
+       int n;
+
+       flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
+       n = ppmu->get_alternatives(ev, flags, alt);
+       if (!n)
+               return 0;
+       return alt[0];
+}
+
+/* Number of perf_counters counting hardware events */
+static atomic_t num_counters;
+/* Used to avoid races in calling reserve/release_pmc_hardware */
+static DEFINE_MUTEX(pmc_reserve_mutex);
+
+/*
+ * Release the PMU if this is the last perf_counter.
+ */
+static void hw_perf_counter_destroy(struct perf_counter *counter)
+{
+       if (!atomic_add_unless(&num_counters, -1, 1)) {
+               mutex_lock(&pmc_reserve_mutex);
+               if (atomic_dec_return(&num_counters) == 0)
+                       release_pmc_hardware();
+               mutex_unlock(&pmc_reserve_mutex);
+       }
+}
+
+/*
+ * Translate a generic cache event config to a raw event code.
+ */
+static int hw_perf_cache_event(u64 config, u64 *eventp)
+{
+       unsigned long type, op, result;
+       int ev;
+
+       if (!ppmu->cache_events)
+               return -EINVAL;
+
+       /* unpack config */
+       type = config & 0xff;
+       op = (config >> 8) & 0xff;
+       result = (config >> 16) & 0xff;
+
+       if (type >= PERF_COUNT_HW_CACHE_MAX ||
+           op >= PERF_COUNT_HW_CACHE_OP_MAX ||
+           result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+               return -EINVAL;
+
+       ev = (*ppmu->cache_events)[type][op][result];
+       if (ev == 0)
+               return -EOPNOTSUPP;
+       if (ev == -1)
+               return -EINVAL;
+       *eventp = ev;
+       return 0;
+}
+
+const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+{
+       u64 ev;
+       unsigned long flags;
+       struct perf_counter *ctrs[MAX_HWCOUNTERS];
+       u64 events[MAX_HWCOUNTERS];
+       unsigned int cflags[MAX_HWCOUNTERS];
+       int n;
+       int err;
+
+       if (!ppmu)
+               return ERR_PTR(-ENXIO);
+       switch (counter->attr.type) {
+       case PERF_TYPE_HARDWARE:
+               ev = counter->attr.config;
+               if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
+                       return ERR_PTR(-EOPNOTSUPP);
+               ev = ppmu->generic_events[ev];
+               break;
+       case PERF_TYPE_HW_CACHE:
+               err = hw_perf_cache_event(counter->attr.config, &ev);
+               if (err)
+                       return ERR_PTR(err);
+               break;
+       case PERF_TYPE_RAW:
+               ev = counter->attr.config;
+               break;
+       }
+       counter->hw.config_base = ev;
+       counter->hw.idx = 0;
+
+       /*
+        * If we are not running on a hypervisor, force the
+        * exclude_hv bit to 0 so that we don't care what
+        * the user set it to.
+        */
+       if (!firmware_has_feature(FW_FEATURE_LPAR))
+               counter->attr.exclude_hv = 0;
+
+       /*
+        * If this is a per-task counter, then we can use
+        * PM_RUN_* events interchangeably with their non RUN_*
+        * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
+        * XXX we should check if the task is an idle task.
+        */
+       flags = 0;
+       if (counter->ctx->task)
+               flags |= PPMU_ONLY_COUNT_RUN;
+
+       /*
+        * If this machine has limited counters, check whether this
+        * event could go on a limited counter.
+        */
+       if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
+               if (can_go_on_limited_pmc(counter, ev, flags)) {
+                       flags |= PPMU_LIMITED_PMC_OK;
+               } else if (ppmu->limited_pmc_event(ev)) {
+                       /*
+                        * The requested event is on a limited PMC,
+                        * but we can't use a limited PMC; see if any
+                        * alternative goes on a normal PMC.
+                        */
+                       ev = normal_pmc_alternative(ev, flags);
+                       if (!ev)
+                               return ERR_PTR(-EINVAL);
+               }
+       }
+
+       /*
+        * If this is in a group, check if it can go on with all the
+        * other hardware counters in the group.  We assume the counter
+        * hasn't been linked into its leader's sibling list at this point.
+        */
+       n = 0;
+       if (counter->group_leader != counter) {
+               n = collect_events(counter->group_leader, ppmu->n_counter - 1,
+                                  ctrs, events, cflags);
+               if (n < 0)
+                       return ERR_PTR(-EINVAL);
+       }
+       events[n] = ev;
+       ctrs[n] = counter;
+       cflags[n] = flags;
+       if (check_excludes(ctrs, cflags, n, 1))
+               return ERR_PTR(-EINVAL);
+       if (power_check_constraints(events, cflags, n + 1))
+               return ERR_PTR(-EINVAL);
+
+       counter->hw.config = events[n];
+       counter->hw.counter_base = cflags[n];
+       counter->hw.last_period = counter->hw.sample_period;
+       atomic64_set(&counter->hw.period_left, counter->hw.last_period);
+
+       /*
+        * See if we need to reserve the PMU.
+        * If no counters are currently in use, then we have to take a
+        * mutex to ensure that we don't race with another task doing
+        * reserve_pmc_hardware or release_pmc_hardware.
+        */
+       err = 0;
+       if (!atomic_inc_not_zero(&num_counters)) {
+               mutex_lock(&pmc_reserve_mutex);
+               if (atomic_read(&num_counters) == 0 &&
+                   reserve_pmc_hardware(perf_counter_interrupt))
+                       err = -EBUSY;
+               else
+                       atomic_inc(&num_counters);
+               mutex_unlock(&pmc_reserve_mutex);
+       }
+       counter->destroy = hw_perf_counter_destroy;
+
+       if (err)
+               return ERR_PTR(err);
+       return &power_pmu;
+}
+
+/*
+ * A counter has overflowed; update its count and record
+ * things if requested.  Note that interrupts are hard-disabled
+ * here so there is no possibility of being interrupted.
+ */
+static void record_and_restart(struct perf_counter *counter, long val,
+                              struct pt_regs *regs, int nmi)
+{
+       u64 period = counter->hw.sample_period;
+       s64 prev, delta, left;
+       int record = 0;
+       u64 addr, mmcra, sdsync;
+
+       /* we don't have to worry about interrupts here */
+       prev = atomic64_read(&counter->hw.prev_count);
+       delta = (val - prev) & 0xfffffffful;
+       atomic64_add(delta, &counter->count);
+
+       /*
+        * See if the total period for this counter has expired,
+        * and update for the next period.
+        */
+       val = 0;
+       left = atomic64_read(&counter->hw.period_left) - delta;
+       if (period) {
+               if (left <= 0) {
+                       left += period;
+                       if (left <= 0)
+                               left = period;
+                       record = 1;
+               }
+               if (left < 0x80000000L)
+                       val = 0x80000000L - left;
+       }
+
+       /*
+        * Finally record data if requested.
+        */
+       if (record) {
+               struct perf_sample_data data = {
+                       .regs   = regs,
+                       .addr   = 0,
+                       .period = counter->hw.last_period,
+               };
+
+               if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
+                       /*
+                        * The user wants a data address recorded.
+                        * If we're not doing instruction sampling,
+                        * give them the SDAR (sampled data address).
+                        * If we are doing instruction sampling, then only
+                        * give them the SDAR if it corresponds to the
+                        * instruction pointed to by SIAR; this is indicated
+                        * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
+                        */
+                       mmcra = regs->dsisr;
+                       sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
+                               POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
+                       if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
+                               data.addr = mfspr(SPRN_SDAR);
+               }
+               if (perf_counter_overflow(counter, nmi, &data)) {
+                       /*
+                        * Interrupts are coming too fast - throttle them
+                        * by setting the counter to 0, so it will be
+                        * at least 2^30 cycles until the next interrupt
+                        * (assuming each counter counts at most 2 counts
+                        * per cycle).
+                        */
+                       val = 0;
+                       left = ~0ULL >> 1;
+               }
+       }
+
+       write_pmc(counter->hw.idx, val);
+       atomic64_set(&counter->hw.prev_count, val);
+       atomic64_set(&counter->hw.period_left, left);
+       perf_counter_update_userpage(counter);
+}
+
+/*
+ * Called from generic code to get the misc flags (i.e. processor mode)
+ * for an event.
+ */
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+       unsigned long mmcra;
+
+       if (TRAP(regs) != 0xf00) {
+               /* not a PMU interrupt */
+               return user_mode(regs) ? PERF_EVENT_MISC_USER :
+                       PERF_EVENT_MISC_KERNEL;
+       }
+
+       mmcra = regs->dsisr;
+       if (ppmu->flags & PPMU_ALT_SIPR) {
+               if (mmcra & POWER6_MMCRA_SIHV)
+                       return PERF_EVENT_MISC_HYPERVISOR;
+               return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
+                       PERF_EVENT_MISC_KERNEL;
+       }
+       if (mmcra & MMCRA_SIHV)
+               return PERF_EVENT_MISC_HYPERVISOR;
+       return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
+                       PERF_EVENT_MISC_KERNEL;
+}
+
+/*
+ * Called from generic code to get the instruction pointer
+ * for an event.
+ */
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+       unsigned long mmcra;
+       unsigned long ip;
+       unsigned long slot;
+
+       if (TRAP(regs) != 0xf00)
+               return regs->nip;       /* not a PMU interrupt */
+
+       ip = mfspr(SPRN_SIAR);
+       mmcra = regs->dsisr;
+       if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
+               slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
+               if (slot > 1)
+                       ip += 4 * (slot - 1);
+       }
+       return ip;
+}
+
+/*
+ * Performance monitor interrupt stuff
+ */
+static void perf_counter_interrupt(struct pt_regs *regs)
+{
+       int i;
+       struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
+       struct perf_counter *counter;
+       long val;
+       int found = 0;
+       int nmi;
+
+       if (cpuhw->n_limited)
+               freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
+                                       mfspr(SPRN_PMC6));
+
+       /*
+        * Overload regs->dsisr to store MMCRA so we only need to read it once.
+        */
+       regs->dsisr = mfspr(SPRN_MMCRA);
+
+       /*
+        * If interrupts were soft-disabled when this PMU interrupt
+        * occurred, treat it as an NMI.
+        */
+       nmi = !regs->softe;
+       if (nmi)
+               nmi_enter();
+       else
+               irq_enter();
+
+       for (i = 0; i < cpuhw->n_counters; ++i) {
+               counter = cpuhw->counter[i];
+               if (!counter->hw.idx || is_limited_pmc(counter->hw.idx))
+                       continue;
+               val = read_pmc(counter->hw.idx);
+               if ((int)val < 0) {
+                       /* counter has overflowed */
+                       found = 1;
+                       record_and_restart(counter, val, regs, nmi);
+               }
+       }
+
+       /*
+        * In case we didn't find and reset the counter that caused
+        * the interrupt, scan all counters and reset any that are
+        * negative, to avoid getting continual interrupts.
+        * Any that we processed in the previous loop will not be negative.
+        */
+       if (!found) {
+               for (i = 0; i < ppmu->n_counter; ++i) {
+                       if (is_limited_pmc(i + 1))
+                               continue;
+                       val = read_pmc(i + 1);
+                       if ((int)val < 0)
+                               write_pmc(i + 1, 0);
+               }
+       }
+
+       /*
+        * Reset MMCR0 to its normal value.  This will set PMXE and
+        * clear FC (freeze counters) and PMAO (perf mon alert occurred)
+        * and thus allow interrupts to occur again.
+        * XXX might want to use MSR.PM to keep the counters frozen until
+        * we get back out of this interrupt.
+        */
+       write_mmcr0(cpuhw, cpuhw->mmcr[0]);
+
+       if (nmi)
+               nmi_exit();
+       else
+               irq_exit();
+}
+
+void hw_perf_counter_setup(int cpu)
+{
+       struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
+
+       memset(cpuhw, 0, sizeof(*cpuhw));
+       cpuhw->mmcr[0] = MMCR0_FC;
+}
+
+extern struct power_pmu power4_pmu;
+extern struct power_pmu ppc970_pmu;
+extern struct power_pmu power5_pmu;
+extern struct power_pmu power5p_pmu;
+extern struct power_pmu power6_pmu;
+extern struct power_pmu power7_pmu;
+
+static int init_perf_counters(void)
+{
+       unsigned long pvr;
+
+       /* XXX should get this from cputable */
+       pvr = mfspr(SPRN_PVR);
+       switch (PVR_VER(pvr)) {
+       case PV_POWER4:
+       case PV_POWER4p:
+               ppmu = &power4_pmu;
+               break;
+       case PV_970:
+       case PV_970FX:
+       case PV_970MP:
+               ppmu = &ppc970_pmu;
+               break;
+       case PV_POWER5:
+               ppmu = &power5_pmu;
+               break;
+       case PV_POWER5p:
+               ppmu = &power5p_pmu;
+               break;
+       case 0x3e:
+               ppmu = &power6_pmu;
+               break;
+       case 0x3f:
+               ppmu = &power7_pmu;
+               break;
+       }
+
+       /*
+        * Use FCHV to ignore kernel events if MSR.HV is set.
+        */
+       if (mfmsr() & MSR_HV)
+               freeze_counters_kernel = MMCR0_FCHV;
+
+       return 0;
+}
+
+arch_initcall(init_perf_counters);
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
new file mode 100644 (file)
index 0000000..07bd308
--- /dev/null
@@ -0,0 +1,598 @@
+/*
+ * Performance counter support for POWER4 (GP) and POWER4+ (GQ) processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for POWER4
+ */
+#define PM_PMC_SH      12      /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK     0xf
+#define PM_UNIT_SH     8       /* TTMMUX number and setting - unit select */
+#define PM_UNIT_MSK    0xf
+#define PM_LOWER_SH    6
+#define PM_LOWER_MSK   1
+#define PM_LOWER_MSKS  0x40
+#define PM_BYTE_SH     4       /* Byte number of event bus to use */
+#define PM_BYTE_MSK    3
+#define PM_PMCSEL_MSK  7
+
+/*
+ * Unit code values
+ */
+#define PM_FPU         1
+#define PM_ISU1                2
+#define PM_IFU         3
+#define PM_IDU0                4
+#define PM_ISU1_ALT    6
+#define PM_ISU2                7
+#define PM_IFU_ALT     8
+#define PM_LSU0                9
+#define PM_LSU1                0xc
+#define PM_GPS         0xf
+
+/*
+ * Bits in MMCR0 for POWER4
+ */
+#define MMCR0_PMC1SEL_SH       8
+#define MMCR0_PMC2SEL_SH       1
+#define MMCR_PMCSEL_MSK                0x1f
+
+/*
+ * Bits in MMCR1 for POWER4
+ */
+#define MMCR1_TTM0SEL_SH       62
+#define MMCR1_TTC0SEL_SH       61
+#define MMCR1_TTM1SEL_SH       59
+#define MMCR1_TTC1SEL_SH       58
+#define MMCR1_TTM2SEL_SH       56
+#define MMCR1_TTC2SEL_SH       55
+#define MMCR1_TTM3SEL_SH       53
+#define MMCR1_TTC3SEL_SH       52
+#define MMCR1_TTMSEL_MSK       3
+#define MMCR1_TD_CP_DBG0SEL_SH 50
+#define MMCR1_TD_CP_DBG1SEL_SH 48
+#define MMCR1_TD_CP_DBG2SEL_SH 46
+#define MMCR1_TD_CP_DBG3SEL_SH 44
+#define MMCR1_DEBUG0SEL_SH     43
+#define MMCR1_DEBUG1SEL_SH     42
+#define MMCR1_DEBUG2SEL_SH     41
+#define MMCR1_DEBUG3SEL_SH     40
+#define MMCR1_PMC1_ADDER_SEL_SH        39
+#define MMCR1_PMC2_ADDER_SEL_SH        38
+#define MMCR1_PMC6_ADDER_SEL_SH        37
+#define MMCR1_PMC5_ADDER_SEL_SH        36
+#define MMCR1_PMC8_ADDER_SEL_SH        35
+#define MMCR1_PMC7_ADDER_SEL_SH        34
+#define MMCR1_PMC3_ADDER_SEL_SH        33
+#define MMCR1_PMC4_ADDER_SEL_SH        32
+#define MMCR1_PMC3SEL_SH       27
+#define MMCR1_PMC4SEL_SH       22
+#define MMCR1_PMC5SEL_SH       17
+#define MMCR1_PMC6SEL_SH       12
+#define MMCR1_PMC7SEL_SH       7
+#define MMCR1_PMC8SEL_SH       2       /* note bit 0 is in MMCRA for GP */
+
+static short mmcr1_adder_bits[8] = {
+       MMCR1_PMC1_ADDER_SEL_SH,
+       MMCR1_PMC2_ADDER_SEL_SH,
+       MMCR1_PMC3_ADDER_SEL_SH,
+       MMCR1_PMC4_ADDER_SEL_SH,
+       MMCR1_PMC5_ADDER_SEL_SH,
+       MMCR1_PMC6_ADDER_SEL_SH,
+       MMCR1_PMC7_ADDER_SEL_SH,
+       MMCR1_PMC8_ADDER_SEL_SH
+};
+
+/*
+ * Bits in MMCRA
+ */
+#define MMCRA_PMC8SEL0_SH      17      /* PMC8SEL bit 0 for GP */
+
+/*
+ * Layout of constraint bits:
+ * 6666555555555544444444443333333333222222222211111111110000000000
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ *        |[  >[  >[   >|||[  >[  ><  ><  ><  ><  ><><><><><><><><>
+ *        | UC1 UC2 UC3 ||| PS1 PS2 B0  B1  B2  B3 P1P2P3P4P5P6P7P8
+ *       \SMPL         ||\TTC3SEL
+ *                     |\TTC_IFU_SEL
+ *                     \TTM2SEL0
+ *
+ * SMPL - SAMPLE_ENABLE constraint
+ *     56: SAMPLE_ENABLE value 0x0100_0000_0000_0000
+ *
+ * UC1 - unit constraint 1: can't have all three of FPU/ISU1/IDU0|ISU2
+ *     55: UC1 error 0x0080_0000_0000_0000
+ *     54: FPU events needed 0x0040_0000_0000_0000
+ *     53: ISU1 events needed 0x0020_0000_0000_0000
+ *     52: IDU0|ISU2 events needed 0x0010_0000_0000_0000
+ *
+ * UC2 - unit constraint 2: can't have all three of FPU/IFU/LSU0
+ *     51: UC2 error 0x0008_0000_0000_0000
+ *     50: FPU events needed 0x0004_0000_0000_0000
+ *     49: IFU events needed 0x0002_0000_0000_0000
+ *     48: LSU0 events needed 0x0001_0000_0000_0000
+ *
+ * UC3 - unit constraint 3: can't have all four of LSU0/IFU/IDU0|ISU2/ISU1
+ *     47: UC3 error 0x8000_0000_0000
+ *     46: LSU0 events needed 0x4000_0000_0000
+ *     45: IFU events needed 0x2000_0000_0000
+ *     44: IDU0|ISU2 events needed 0x1000_0000_0000
+ *     43: ISU1 events needed 0x0800_0000_0000
+ *
+ * TTM2SEL0
+ *     42: 0 = IDU0 events needed
+ *                1 = ISU2 events needed 0x0400_0000_0000
+ *
+ * TTC_IFU_SEL
+ *     41: 0 = IFU.U events needed
+ *                1 = IFU.L events needed 0x0200_0000_0000
+ *
+ * TTC3SEL
+ *     40: 0 = LSU1.U events needed
+ *                1 = LSU1.L events needed 0x0100_0000_0000
+ *
+ * PS1
+ *     39: PS1 error 0x0080_0000_0000
+ *     36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000
+ *
+ * PS2
+ *     35: PS2 error 0x0008_0000_0000
+ *     32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000
+ *
+ * B0
+ *     28-31: Byte 0 event source 0xf000_0000
+ *                1 = FPU
+ *        2 = ISU1
+ *        3 = IFU
+ *        4 = IDU0
+ *        7 = ISU2
+ *        9 = LSU0
+ *        c = LSU1
+ *        f = GPS
+ *
+ * B1, B2, B3
+ *     24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
+ *
+ * P8
+ *     15: P8 error 0x8000
+ *     14-15: Count of events needing PMC8
+ *
+ * P1..P7
+ *     0-13: Count of events needing PMC1..PMC7
+ *
+ * Note: this doesn't allow events using IFU.U to be combined with events
+ * using IFU.L, though that is feasible (using TTM0 and TTM2).  However
+ * there are no listed events for IFU.L (they are debug events not
+ * verified for performance monitoring) so this shouldn't cause a
+ * problem.
+ */
+
+static struct unitinfo {
+       u64     value, mask;
+       int     unit;
+       int     lowerbit;
+} p4_unitinfo[16] = {
+       [PM_FPU]  = { 0x44000000000000ull, 0x88000000000000ull, PM_FPU, 0 },
+       [PM_ISU1] = { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 },
+       [PM_ISU1_ALT] =
+                   { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 },
+       [PM_IFU]  = { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 },
+       [PM_IFU_ALT] =
+                   { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 },
+       [PM_IDU0] = { 0x10100000000000ull, 0x80840000000000ull, PM_IDU0, 1 },
+       [PM_ISU2] = { 0x10140000000000ull, 0x80840000000000ull, PM_ISU2, 0 },
+       [PM_LSU0] = { 0x01400000000000ull, 0x08800000000000ull, PM_LSU0, 0 },
+       [PM_LSU1] = { 0x00000000000000ull, 0x00010000000000ull, PM_LSU1, 40 },
+       [PM_GPS]  = { 0x00000000000000ull, 0x00000000000000ull, PM_GPS, 0 }
+};
+
+static unsigned char direct_marked_event[8] = {
+       (1<<2) | (1<<3),        /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */
+       (1<<3) | (1<<5),        /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */
+       (1<<3),                 /* PMC3: PM_MRK_ST_CMPL_INT */
+       (1<<4) | (1<<5),        /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */
+       (1<<4) | (1<<5),        /* PMC5: PM_MRK_GRP_TIMEO */
+       (1<<3) | (1<<4) | (1<<5),
+               /* PMC6: PM_MRK_ST_GPS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */
+       (1<<4) | (1<<5),        /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */
+       (1<<4),                 /* PMC8: PM_MRK_LSU_FIN */
+};
+
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int p4_marked_instr_event(u64 event)
+{
+       int pmc, psel, unit, byte, bit;
+       unsigned int mask;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       psel = event & PM_PMCSEL_MSK;
+       if (pmc) {
+               if (direct_marked_event[pmc - 1] & (1 << psel))
+                       return 1;
+               if (psel == 0)          /* add events */
+                       bit = (pmc <= 4)? pmc - 1: 8 - pmc;
+               else if (psel == 6)     /* decode events */
+                       bit = 4;
+               else
+                       return 0;
+       } else
+               bit = psel;
+
+       byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+       unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+       mask = 0;
+       switch (unit) {
+       case PM_LSU1:
+               if (event & PM_LOWER_MSKS)
+                       mask = 1 << 28;         /* byte 7 bit 4 */
+               else
+                       mask = 6 << 24;         /* byte 3 bits 1 and 2 */
+               break;
+       case PM_LSU0:
+               /* byte 3, bit 3; byte 2 bits 0,2,3,4,5; byte 1 */
+               mask = 0x083dff00;
+       }
+       return (mask >> (byte * 8 + bit)) & 1;
+}
+
+static int p4_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+       int pmc, byte, unit, lower, sh;
+       u64 mask = 0, value = 0;
+       int grp = -1;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       if (pmc) {
+               if (pmc > 8)
+                       return -1;
+               sh = (pmc - 1) * 2;
+               mask |= 2 << sh;
+               value |= 1 << sh;
+               grp = ((pmc - 1) >> 1) & 1;
+       }
+       unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+       byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+       if (unit) {
+               lower = (event >> PM_LOWER_SH) & PM_LOWER_MSK;
+
+               /*
+                * Bus events on bytes 0 and 2 can be counted
+                * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8.
+                */
+               if (!pmc)
+                       grp = byte & 1;
+
+               if (!p4_unitinfo[unit].unit)
+                       return -1;
+               mask  |= p4_unitinfo[unit].mask;
+               value |= p4_unitinfo[unit].value;
+               sh = p4_unitinfo[unit].lowerbit;
+               if (sh > 1)
+                       value |= (u64)lower << sh;
+               else if (lower != sh)
+                       return -1;
+               unit = p4_unitinfo[unit].unit;
+
+               /* Set byte lane select field */
+               mask  |= 0xfULL << (28 - 4 * byte);
+               value |= (u64)unit << (28 - 4 * byte);
+       }
+       if (grp == 0) {
+               /* increment PMC1/2/5/6 field */
+               mask  |= 0x8000000000ull;
+               value |= 0x1000000000ull;
+       } else {
+               /* increment PMC3/4/7/8 field */
+               mask  |= 0x800000000ull;
+               value |= 0x100000000ull;
+       }
+
+       /* Marked instruction events need sample_enable set */
+       if (p4_marked_instr_event(event)) {
+               mask  |= 1ull << 56;
+               value |= 1ull << 56;
+       }
+
+       /* PMCSEL=6 decode events on byte 2 need sample_enable clear */
+       if (pmc && (event & PM_PMCSEL_MSK) == 6 && byte == 2)
+               mask  |= 1ull << 56;
+
+       *maskp = mask;
+       *valp = value;
+       return 0;
+}
+
+static unsigned int ppc_inst_cmpl[] = {
+       0x1001, 0x4001, 0x6001, 0x7001, 0x8001
+};
+
+static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+       int i, j, na;
+
+       alt[0] = event;
+       na = 1;
+
+       /* 2 possibilities for PM_GRP_DISP_REJECT */
+       if (event == 0x8003 || event == 0x0224) {
+               alt[1] = event ^ (0x8003 ^ 0x0224);
+               return 2;
+       }
+
+       /* 2 possibilities for PM_ST_MISS_L1 */
+       if (event == 0x0c13 || event == 0x0c23) {
+               alt[1] = event ^ (0x0c13 ^ 0x0c23);
+               return 2;
+       }
+
+       /* several possibilities for PM_INST_CMPL */
+       for (i = 0; i < ARRAY_SIZE(ppc_inst_cmpl); ++i) {
+               if (event == ppc_inst_cmpl[i]) {
+                       for (j = 0; j < ARRAY_SIZE(ppc_inst_cmpl); ++j)
+                               if (j != i)
+                                       alt[na++] = ppc_inst_cmpl[j];
+                       break;
+               }
+       }
+
+       return na;
+}
+
+static int p4_compute_mmcr(u64 event[], int n_ev,
+                          unsigned int hwc[], u64 mmcr[])
+{
+       u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0;
+       unsigned int pmc, unit, byte, psel, lower;
+       unsigned int ttm, grp;
+       unsigned int pmc_inuse = 0;
+       unsigned int pmc_grp_use[2];
+       unsigned char busbyte[4];
+       unsigned char unituse[16];
+       unsigned int unitlower = 0;
+       int i;
+
+       if (n_ev > 8)
+               return -1;
+
+       /* First pass to count resource use */
+       pmc_grp_use[0] = pmc_grp_use[1] = 0;
+       memset(busbyte, 0, sizeof(busbyte));
+       memset(unituse, 0, sizeof(unituse));
+       for (i = 0; i < n_ev; ++i) {
+               pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+               if (pmc) {
+                       if (pmc_inuse & (1 << (pmc - 1)))
+                               return -1;
+                       pmc_inuse |= 1 << (pmc - 1);
+                       /* count 1/2/5/6 vs 3/4/7/8 use */
+                       ++pmc_grp_use[((pmc - 1) >> 1) & 1];
+               }
+               unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+               byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+               lower = (event[i] >> PM_LOWER_SH) & PM_LOWER_MSK;
+               if (unit) {
+                       if (!pmc)
+                               ++pmc_grp_use[byte & 1];
+                       if (unit == 6 || unit == 8)
+                               /* map alt ISU1/IFU codes: 6->2, 8->3 */
+                               unit = (unit >> 1) - 1;
+                       if (busbyte[byte] && busbyte[byte] != unit)
+                               return -1;
+                       busbyte[byte] = unit;
+                       lower <<= unit;
+                       if (unituse[unit] && lower != (unitlower & lower))
+                               return -1;
+                       unituse[unit] = 1;
+                       unitlower |= lower;
+               }
+       }
+       if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4)
+               return -1;
+
+       /*
+        * Assign resources and set multiplexer selects.
+        *
+        * Units 1,2,3 are on TTM0, 4,6,7 on TTM1, 8,10 on TTM2.
+        * Each TTMx can only select one unit, but since
+        * units 2 and 6 are both ISU1, and 3 and 8 are both IFU,
+        * we have some choices.
+        */
+       if (unituse[2] & (unituse[1] | (unituse[3] & unituse[9]))) {
+               unituse[6] = 1;         /* Move 2 to 6 */
+               unituse[2] = 0;
+       }
+       if (unituse[3] & (unituse[1] | unituse[2])) {
+               unituse[8] = 1;         /* Move 3 to 8 */
+               unituse[3] = 0;
+               unitlower = (unitlower & ~8) | ((unitlower & 8) << 5);
+       }
+       /* Check only one unit per TTMx */
+       if (unituse[1] + unituse[2] + unituse[3] > 1 ||
+           unituse[4] + unituse[6] + unituse[7] > 1 ||
+           unituse[8] + unituse[9] > 1 ||
+           (unituse[5] | unituse[10] | unituse[11] |
+            unituse[13] | unituse[14]))
+               return -1;
+
+       /* Set TTMxSEL fields.  Note, units 1-3 => TTM0SEL codes 0-2 */
+       mmcr1 |= (u64)(unituse[3] * 2 + unituse[2]) << MMCR1_TTM0SEL_SH;
+       mmcr1 |= (u64)(unituse[7] * 3 + unituse[6] * 2) << MMCR1_TTM1SEL_SH;
+       mmcr1 |= (u64)unituse[9] << MMCR1_TTM2SEL_SH;
+
+       /* Set TTCxSEL fields. */
+       if (unitlower & 0xe)
+               mmcr1 |= 1ull << MMCR1_TTC0SEL_SH;
+       if (unitlower & 0xf0)
+               mmcr1 |= 1ull << MMCR1_TTC1SEL_SH;
+       if (unitlower & 0xf00)
+               mmcr1 |= 1ull << MMCR1_TTC2SEL_SH;
+       if (unitlower & 0x7000)
+               mmcr1 |= 1ull << MMCR1_TTC3SEL_SH;
+
+       /* Set byte lane select fields. */
+       for (byte = 0; byte < 4; ++byte) {
+               unit = busbyte[byte];
+               if (!unit)
+                       continue;
+               if (unit == 0xf) {
+                       /* special case for GPS */
+                       mmcr1 |= 1ull << (MMCR1_DEBUG0SEL_SH - byte);
+               } else {
+                       if (!unituse[unit])
+                               ttm = unit - 1;         /* 2->1, 3->2 */
+                       else
+                               ttm = unit >> 2;
+                       mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2*byte);
+               }
+       }
+
+       /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
+       for (i = 0; i < n_ev; ++i) {
+               pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+               unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+               byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+               psel = event[i] & PM_PMCSEL_MSK;
+               if (!pmc) {
+                       /* Bus event or 00xxx direct event (off or cycles) */
+                       if (unit)
+                               psel |= 0x10 | ((byte & 2) << 2);
+                       for (pmc = 0; pmc < 8; ++pmc) {
+                               if (pmc_inuse & (1 << pmc))
+                                       continue;
+                               grp = (pmc >> 1) & 1;
+                               if (unit) {
+                                       if (grp == (byte & 1))
+                                               break;
+                               } else if (pmc_grp_use[grp] < 4) {
+                                       ++pmc_grp_use[grp];
+                                       break;
+                               }
+                       }
+                       pmc_inuse |= 1 << pmc;
+               } else {
+                       /* Direct event */
+                       --pmc;
+                       if (psel == 0 && (byte & 2))
+                               /* add events on higher-numbered bus */
+                               mmcr1 |= 1ull << mmcr1_adder_bits[pmc];
+                       else if (psel == 6 && byte == 3)
+                               /* seem to need to set sample_enable here */
+                               mmcra |= MMCRA_SAMPLE_ENABLE;
+                       psel |= 8;
+               }
+               if (pmc <= 1)
+                       mmcr0 |= psel << (MMCR0_PMC1SEL_SH - 7 * pmc);
+               else
+                       mmcr1 |= psel << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
+               if (pmc == 7)   /* PMC8 */
+                       mmcra |= (psel & 1) << MMCRA_PMC8SEL0_SH;
+               hwc[i] = pmc;
+               if (p4_marked_instr_event(event[i]))
+                       mmcra |= MMCRA_SAMPLE_ENABLE;
+       }
+
+       if (pmc_inuse & 1)
+               mmcr0 |= MMCR0_PMC1CE;
+       if (pmc_inuse & 0xfe)
+               mmcr0 |= MMCR0_PMCjCE;
+
+       mmcra |= 0x2000;        /* mark only one IOP per PPC instruction */
+
+       /* Return MMCRx values */
+       mmcr[0] = mmcr0;
+       mmcr[1] = mmcr1;
+       mmcr[2] = mmcra;
+       return 0;
+}
+
+static void p4_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+       /*
+        * Setting the PMCxSEL field to 0 disables PMC x.
+        * (Note that pmc is 0-based here, not 1-based.)
+        */
+       if (pmc <= 1) {
+               mmcr[0] &= ~(0x1fUL << (MMCR0_PMC1SEL_SH - 7 * pmc));
+       } else {
+               mmcr[1] &= ~(0x1fUL << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)));
+               if (pmc == 7)
+                       mmcr[2] &= ~(1UL << MMCRA_PMC8SEL0_SH);
+       }
+}
+
+static int p4_generic_events[] = {
+       [PERF_COUNT_HW_CPU_CYCLES]              = 7,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x1001,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x8c10, /* PM_LD_REF_L1 */
+       [PERF_COUNT_HW_CACHE_MISSES]            = 0x3c10, /* PM_LD_MISS_L1 */
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x330,  /* PM_BR_ISSUED */
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x331,  /* PM_BR_MPRED_CR */
+};
+
+#define C(x)   PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+       [C(L1D)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x8c10,         0x3c10  },
+               [C(OP_WRITE)] = {       0x7c10,         0xc13   },
+               [C(OP_PREFETCH)] = {    0xc35,          0       },
+       },
+       [C(L1I)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0       },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    0,              0       },
+       },
+       [C(LL)] = {             /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0       },
+               [C(OP_WRITE)] = {       0,              0       },
+               [C(OP_PREFETCH)] = {    0xc34,          0       },
+       },
+       [C(DTLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x904   },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+       [C(ITLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x900   },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+       [C(BPU)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x330,          0x331   },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+};
+
+struct power_pmu power4_pmu = {
+       .n_counter = 8,
+       .max_alternatives = 5,
+       .add_fields = 0x0000001100005555ull,
+       .test_adder = 0x0011083300000000ull,
+       .compute_mmcr = p4_compute_mmcr,
+       .get_constraint = p4_get_constraint,
+       .get_alternatives = p4_get_alternatives,
+       .disable_pmc = p4_disable_pmc,
+       .n_generic = ARRAY_SIZE(p4_generic_events),
+       .generic_events = p4_generic_events,
+       .cache_events = &power4_cache_events,
+};
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
new file mode 100644 (file)
index 0000000..41e5d2d
--- /dev/null
@@ -0,0 +1,671 @@
+/*
+ * Performance counter support for POWER5+/++ (not POWER5) processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3)
+ */
+#define PM_PMC_SH      20      /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK     0xf
+#define PM_PMC_MSKS    (PM_PMC_MSK << PM_PMC_SH)
+#define PM_UNIT_SH     16      /* TTMMUX number and setting - unit select */
+#define PM_UNIT_MSK    0xf
+#define PM_BYTE_SH     12      /* Byte number of event bus to use */
+#define PM_BYTE_MSK    7
+#define PM_GRS_SH      8       /* Storage subsystem mux select */
+#define PM_GRS_MSK     7
+#define PM_BUSEVENT_MSK        0x80    /* Set if event uses event bus */
+#define PM_PMCSEL_MSK  0x7f
+
+/* Values in PM_UNIT field */
+#define PM_FPU         0
+#define PM_ISU0                1
+#define PM_IFU         2
+#define PM_ISU1                3
+#define PM_IDU         4
+#define PM_ISU0_ALT    6
+#define PM_GRS         7
+#define PM_LSU0                8
+#define PM_LSU1                0xc
+#define PM_LASTUNIT    0xc
+
+/*
+ * Bits in MMCR1 for POWER5+
+ */
+#define MMCR1_TTM0SEL_SH       62
+#define MMCR1_TTM1SEL_SH       60
+#define MMCR1_TTM2SEL_SH       58
+#define MMCR1_TTM3SEL_SH       56
+#define MMCR1_TTMSEL_MSK       3
+#define MMCR1_TD_CP_DBG0SEL_SH 54
+#define MMCR1_TD_CP_DBG1SEL_SH 52
+#define MMCR1_TD_CP_DBG2SEL_SH 50
+#define MMCR1_TD_CP_DBG3SEL_SH 48
+#define MMCR1_GRS_L2SEL_SH     46
+#define MMCR1_GRS_L2SEL_MSK    3
+#define MMCR1_GRS_L3SEL_SH     44
+#define MMCR1_GRS_L3SEL_MSK    3
+#define MMCR1_GRS_MCSEL_SH     41
+#define MMCR1_GRS_MCSEL_MSK    7
+#define MMCR1_GRS_FABSEL_SH    39
+#define MMCR1_GRS_FABSEL_MSK   3
+#define MMCR1_PMC1_ADDER_SEL_SH        35
+#define MMCR1_PMC2_ADDER_SEL_SH        34
+#define MMCR1_PMC3_ADDER_SEL_SH        33
+#define MMCR1_PMC4_ADDER_SEL_SH        32
+#define MMCR1_PMC1SEL_SH       25
+#define MMCR1_PMC2SEL_SH       17
+#define MMCR1_PMC3SEL_SH       9
+#define MMCR1_PMC4SEL_SH       1
+#define MMCR1_PMCSEL_SH(n)     (MMCR1_PMC1SEL_SH - (n) * 8)
+#define MMCR1_PMCSEL_MSK       0x7f
+
+/*
+ * Bits in MMCRA
+ */
+
+/*
+ * Layout of constraint bits:
+ * 6666555555555544444444443333333333222222222211111111110000000000
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ *             [  ><><>< ><> <><>[  >  <  ><  ><  ><  ><><><><><><>
+ *             NC  G0G1G2 G3 T0T1 UC    B0  B1  B2  B3 P6P5P4P3P2P1
+ *
+ * NC - number of counters
+ *     51: NC error 0x0008_0000_0000_0000
+ *     48-50: number of events needing PMC1-4 0x0007_0000_0000_0000
+ *
+ * G0..G3 - GRS mux constraints
+ *     46-47: GRS_L2SEL value
+ *     44-45: GRS_L3SEL value
+ *     41-44: GRS_MCSEL value
+ *     39-40: GRS_FABSEL value
+ *     Note that these match up with their bit positions in MMCR1
+ *
+ * T0 - TTM0 constraint
+ *     36-37: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0x30_0000_0000
+ *
+ * T1 - TTM1 constraint
+ *     34-35: TTM1SEL value (0=IDU, 3=GRS) 0x0c_0000_0000
+ *
+ * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS
+ *     33: UC3 error 0x02_0000_0000
+ *     32: FPU|IFU|ISU1 events needed 0x01_0000_0000
+ *     31: ISU0 events needed 0x01_8000_0000
+ *     30: IDU|GRS events needed 0x00_4000_0000
+ *
+ * B0
+ *     24-27: Byte 0 event source 0x0f00_0000
+ *           Encoding as for the event code
+ *
+ * B1, B2, B3
+ *     20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
+ *
+ * P6
+ *     11: P6 error 0x800
+ *     10-11: Count of events needing PMC6
+ *
+ * P1..P5
+ *     0-9: Count of events needing PMC1..PMC5
+ */
+
+static const int grsel_shift[8] = {
+       MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH,
+       MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH,
+       MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH
+};
+
+/* Masks and values for using events from the various units */
+static u64 unit_cons[PM_LASTUNIT+1][2] = {
+       [PM_FPU] =   { 0x3200000000ull, 0x0100000000ull },
+       [PM_ISU0] =  { 0x0200000000ull, 0x0080000000ull },
+       [PM_ISU1] =  { 0x3200000000ull, 0x3100000000ull },
+       [PM_IFU] =   { 0x3200000000ull, 0x2100000000ull },
+       [PM_IDU] =   { 0x0e00000000ull, 0x0040000000ull },
+       [PM_GRS] =   { 0x0e00000000ull, 0x0c40000000ull },
+};
+
+static int power5p_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+       int pmc, byte, unit, sh;
+       int bit, fmask;
+       u64 mask = 0, value = 0;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       if (pmc) {
+               if (pmc > 6)
+                       return -1;
+               sh = (pmc - 1) * 2;
+               mask |= 2 << sh;
+               value |= 1 << sh;
+               if (pmc >= 5 && !(event == 0x500009 || event == 0x600005))
+                       return -1;
+       }
+       if (event & PM_BUSEVENT_MSK) {
+               unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+               if (unit > PM_LASTUNIT)
+                       return -1;
+               if (unit == PM_ISU0_ALT)
+                       unit = PM_ISU0;
+               mask |= unit_cons[unit][0];
+               value |= unit_cons[unit][1];
+               byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+               if (byte >= 4) {
+                       if (unit != PM_LSU1)
+                               return -1;
+                       /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */
+                       ++unit;
+                       byte &= 3;
+               }
+               if (unit == PM_GRS) {
+                       bit = event & 7;
+                       fmask = (bit == 6)? 7: 3;
+                       sh = grsel_shift[bit];
+                       mask |= (u64)fmask << sh;
+                       value |= (u64)((event >> PM_GRS_SH) & fmask) << sh;
+               }
+               /* Set byte lane select field */
+               mask  |= 0xfULL << (24 - 4 * byte);
+               value |= (u64)unit << (24 - 4 * byte);
+       }
+       if (pmc < 5) {
+               /* need a counter from PMC1-4 set */
+               mask  |= 0x8000000000000ull;
+               value |= 0x1000000000000ull;
+       }
+       *maskp = mask;
+       *valp = value;
+       return 0;
+}
+
+static int power5p_limited_pmc_event(u64 event)
+{
+       int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+
+       return pmc == 5 || pmc == 6;
+}
+
+#define MAX_ALT        3       /* at most 3 alternatives for any event */
+
+static const unsigned int event_alternatives[][MAX_ALT] = {
+       { 0x100c0,  0x40001f },                 /* PM_GCT_FULL_CYC */
+       { 0x120e4,  0x400002 },                 /* PM_GRP_DISP_REJECT */
+       { 0x230e2,  0x323087 },                 /* PM_BR_PRED_CR */
+       { 0x230e3,  0x223087, 0x3230a0 },       /* PM_BR_PRED_TA */
+       { 0x410c7,  0x441084 },                 /* PM_THRD_L2MISS_BOTH_CYC */
+       { 0x800c4,  0xc20e0 },                  /* PM_DTLB_MISS */
+       { 0xc50c6,  0xc60e0 },                  /* PM_MRK_DTLB_MISS */
+       { 0x100005, 0x600005 },                 /* PM_RUN_CYC */
+       { 0x100009, 0x200009 },                 /* PM_INST_CMPL */
+       { 0x200015, 0x300015 },                 /* PM_LSU_LMQ_SRQ_EMPTY_CYC */
+       { 0x300009, 0x400009 },                 /* PM_INST_DISP */
+};
+
+/*
+ * Scan the alternatives table for a match and return the
+ * index into the alternatives table if found, else -1.
+ */
+static int find_alternative(unsigned int event)
+{
+       int i, j;
+
+       for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+               if (event < event_alternatives[i][0])
+                       break;
+               for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
+                       if (event == event_alternatives[i][j])
+                               return i;
+       }
+       return -1;
+}
+
+static const unsigned char bytedecode_alternatives[4][4] = {
+       /* PMC 1 */     { 0x21, 0x23, 0x25, 0x27 },
+       /* PMC 2 */     { 0x07, 0x17, 0x0e, 0x1e },
+       /* PMC 3 */     { 0x20, 0x22, 0x24, 0x26 },
+       /* PMC 4 */     { 0x07, 0x17, 0x0e, 0x1e }
+};
+
+/*
+ * Some direct events for decodes of event bus byte 3 have alternative
+ * PMCSEL values on other counters.  This returns the alternative
+ * event code for those that do, or -1 otherwise.  This also handles
+ * alternative PCMSEL values for add events.
+ */
+static s64 find_alternative_bdecode(u64 event)
+{
+       int pmc, altpmc, pp, j;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       if (pmc == 0 || pmc > 4)
+               return -1;
+       altpmc = 5 - pmc;       /* 1 <-> 4, 2 <-> 3 */
+       pp = event & PM_PMCSEL_MSK;
+       for (j = 0; j < 4; ++j) {
+               if (bytedecode_alternatives[pmc - 1][j] == pp) {
+                       return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) |
+                               (altpmc << PM_PMC_SH) |
+                               bytedecode_alternatives[altpmc - 1][j];
+               }
+       }
+
+       /* new decode alternatives for power5+ */
+       if (pmc == 1 && (pp == 0x0d || pp == 0x0e))
+               return event + (2 << PM_PMC_SH) + (0x2e - 0x0d);
+       if (pmc == 3 && (pp == 0x2e || pp == 0x2f))
+               return event - (2 << PM_PMC_SH) - (0x2e - 0x0d);
+
+       /* alternative add event encodings */
+       if (pp == 0x10 || pp == 0x28)
+               return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) |
+                       (altpmc << PM_PMC_SH);
+
+       return -1;
+}
+
+static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+       int i, j, nalt = 1;
+       int nlim;
+       s64 ae;
+
+       alt[0] = event;
+       nalt = 1;
+       nlim = power5p_limited_pmc_event(event);
+       i = find_alternative(event);
+       if (i >= 0) {
+               for (j = 0; j < MAX_ALT; ++j) {
+                       ae = event_alternatives[i][j];
+                       if (ae && ae != event)
+                               alt[nalt++] = ae;
+                       nlim += power5p_limited_pmc_event(ae);
+               }
+       } else {
+               ae = find_alternative_bdecode(event);
+               if (ae > 0)
+                       alt[nalt++] = ae;
+       }
+
+       if (flags & PPMU_ONLY_COUNT_RUN) {
+               /*
+                * We're only counting in RUN state,
+                * so PM_CYC is equivalent to PM_RUN_CYC
+                * and PM_INST_CMPL === PM_RUN_INST_CMPL.
+                * This doesn't include alternatives that don't provide
+                * any extra flexibility in assigning PMCs (e.g.
+                * 0x100005 for PM_RUN_CYC vs. 0xf for PM_CYC).
+                * Note that even with these additional alternatives
+                * we never end up with more than 3 alternatives for any event.
+                */
+               j = nalt;
+               for (i = 0; i < nalt; ++i) {
+                       switch (alt[i]) {
+                       case 0xf:       /* PM_CYC */
+                               alt[j++] = 0x600005;    /* PM_RUN_CYC */
+                               ++nlim;
+                               break;
+                       case 0x600005:  /* PM_RUN_CYC */
+                               alt[j++] = 0xf;
+                               break;
+                       case 0x100009:  /* PM_INST_CMPL */
+                               alt[j++] = 0x500009;    /* PM_RUN_INST_CMPL */
+                               ++nlim;
+                               break;
+                       case 0x500009:  /* PM_RUN_INST_CMPL */
+                               alt[j++] = 0x100009;    /* PM_INST_CMPL */
+                               alt[j++] = 0x200009;
+                               break;
+                       }
+               }
+               nalt = j;
+       }
+
+       if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) {
+               /* remove the limited PMC events */
+               j = 0;
+               for (i = 0; i < nalt; ++i) {
+                       if (!power5p_limited_pmc_event(alt[i])) {
+                               alt[j] = alt[i];
+                               ++j;
+                       }
+               }
+               nalt = j;
+       } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) {
+               /* remove all but the limited PMC events */
+               j = 0;
+               for (i = 0; i < nalt; ++i) {
+                       if (power5p_limited_pmc_event(alt[i])) {
+                               alt[j] = alt[i];
+                               ++j;
+                       }
+               }
+               nalt = j;
+       }
+
+       return nalt;
+}
+
+/*
+ * Map of which direct events on which PMCs are marked instruction events.
+ * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
+ * Bit 0 is set if it is marked for all PMCs.
+ * The 0x80 bit indicates a byte decode PMCSEL value.
+ */
+static unsigned char direct_event_is_marked[0x28] = {
+       0,      /* 00 */
+       0x1f,   /* 01 PM_IOPS_CMPL */
+       0x2,    /* 02 PM_MRK_GRP_DISP */
+       0xe,    /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
+       0,      /* 04 */
+       0x1c,   /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */
+       0x80,   /* 06 */
+       0x80,   /* 07 */
+       0, 0, 0,/* 08 - 0a */
+       0x18,   /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */
+       0,      /* 0c */
+       0x80,   /* 0d */
+       0x80,   /* 0e */
+       0,      /* 0f */
+       0,      /* 10 */
+       0x14,   /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */
+       0,      /* 12 */
+       0x10,   /* 13 PM_MRK_GRP_CMPL */
+       0x1f,   /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */
+       0x2,    /* 15 PM_MRK_GRP_ISSUED */
+       0x80,   /* 16 */
+       0x80,   /* 17 */
+       0, 0, 0, 0, 0,
+       0x80,   /* 1d */
+       0x80,   /* 1e */
+       0,      /* 1f */
+       0x80,   /* 20 */
+       0x80,   /* 21 */
+       0x80,   /* 22 */
+       0x80,   /* 23 */
+       0x80,   /* 24 */
+       0x80,   /* 25 */
+       0x80,   /* 26 */
+       0x80,   /* 27 */
+};
+
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int power5p_marked_instr_event(u64 event)
+{
+       int pmc, psel;
+       int bit, byte, unit;
+       u32 mask;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       psel = event & PM_PMCSEL_MSK;
+       if (pmc >= 5)
+               return 0;
+
+       bit = -1;
+       if (psel < sizeof(direct_event_is_marked)) {
+               if (direct_event_is_marked[psel] & (1 << pmc))
+                       return 1;
+               if (direct_event_is_marked[psel] & 0x80)
+                       bit = 4;
+               else if (psel == 0x08)
+                       bit = pmc - 1;
+               else if (psel == 0x10)
+                       bit = 4 - pmc;
+               else if (psel == 0x1b && (pmc == 1 || pmc == 3))
+                       bit = 4;
+       } else if ((psel & 0x48) == 0x40) {
+               bit = psel & 7;
+       } else if (psel == 0x28) {
+               bit = pmc - 1;
+       } else if (pmc == 3 && (psel == 0x2e || psel == 0x2f)) {
+               bit = 4;
+       }
+
+       if (!(event & PM_BUSEVENT_MSK) || bit == -1)
+               return 0;
+
+       byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+       unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+       if (unit == PM_LSU0) {
+               /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */
+               mask = 0x5dff00;
+       } else if (unit == PM_LSU1 && byte >= 4) {
+               byte -= 4;
+               /* byte 5 bits 6-7, byte 6 bits 0,4, byte 7 bits 0-4,6 */
+               mask = 0x5f11c000;
+       } else
+               return 0;
+
+       return (mask >> (byte * 8 + bit)) & 1;
+}
+
+static int power5p_compute_mmcr(u64 event[], int n_ev,
+                               unsigned int hwc[], u64 mmcr[])
+{
+       u64 mmcr1 = 0;
+       u64 mmcra = 0;
+       unsigned int pmc, unit, byte, psel;
+       unsigned int ttm;
+       int i, isbus, bit, grsel;
+       unsigned int pmc_inuse = 0;
+       unsigned char busbyte[4];
+       unsigned char unituse[16];
+       int ttmuse;
+
+       if (n_ev > 6)
+               return -1;
+
+       /* First pass to count resource use */
+       memset(busbyte, 0, sizeof(busbyte));
+       memset(unituse, 0, sizeof(unituse));
+       for (i = 0; i < n_ev; ++i) {
+               pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+               if (pmc) {
+                       if (pmc > 6)
+                               return -1;
+                       if (pmc_inuse & (1 << (pmc - 1)))
+                               return -1;
+                       pmc_inuse |= 1 << (pmc - 1);
+               }
+               if (event[i] & PM_BUSEVENT_MSK) {
+                       unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+                       byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+                       if (unit > PM_LASTUNIT)
+                               return -1;
+                       if (unit == PM_ISU0_ALT)
+                               unit = PM_ISU0;
+                       if (byte >= 4) {
+                               if (unit != PM_LSU1)
+                                       return -1;
+                               ++unit;
+                               byte &= 3;
+                       }
+                       if (busbyte[byte] && busbyte[byte] != unit)
+                               return -1;
+                       busbyte[byte] = unit;
+                       unituse[unit] = 1;
+               }
+       }
+
+       /*
+        * Assign resources and set multiplexer selects.
+        *
+        * PM_ISU0 can go either on TTM0 or TTM1, but that's the only
+        * choice we have to deal with.
+        */
+       if (unituse[PM_ISU0] &
+           (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) {
+               unituse[PM_ISU0_ALT] = 1;       /* move ISU to TTM1 */
+               unituse[PM_ISU0] = 0;
+       }
+       /* Set TTM[01]SEL fields. */
+       ttmuse = 0;
+       for (i = PM_FPU; i <= PM_ISU1; ++i) {
+               if (!unituse[i])
+                       continue;
+               if (ttmuse++)
+                       return -1;
+               mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH;
+       }
+       ttmuse = 0;
+       for (; i <= PM_GRS; ++i) {
+               if (!unituse[i])
+                       continue;
+               if (ttmuse++)
+                       return -1;
+               mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH;
+       }
+       if (ttmuse > 1)
+               return -1;
+
+       /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */
+       for (byte = 0; byte < 4; ++byte) {
+               unit = busbyte[byte];
+               if (!unit)
+                       continue;
+               if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) {
+                       /* get ISU0 through TTM1 rather than TTM0 */
+                       unit = PM_ISU0_ALT;
+               } else if (unit == PM_LSU1 + 1) {
+                       /* select lower word of LSU1 for this byte */
+                       mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+               }
+               ttm = unit >> 2;
+               mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+       }
+
+       /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
+       for (i = 0; i < n_ev; ++i) {
+               pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+               unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+               byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+               psel = event[i] & PM_PMCSEL_MSK;
+               isbus = event[i] & PM_BUSEVENT_MSK;
+               if (!pmc) {
+                       /* Bus event or any-PMC direct event */
+                       for (pmc = 0; pmc < 4; ++pmc) {
+                               if (!(pmc_inuse & (1 << pmc)))
+                                       break;
+                       }
+                       if (pmc >= 4)
+                               return -1;
+                       pmc_inuse |= 1 << pmc;
+               } else if (pmc <= 4) {
+                       /* Direct event */
+                       --pmc;
+                       if (isbus && (byte & 2) &&
+                           (psel == 8 || psel == 0x10 || psel == 0x28))
+                               /* add events on higher-numbered bus */
+                               mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
+               } else {
+                       /* Instructions or run cycles on PMC5/6 */
+                       --pmc;
+               }
+               if (isbus && unit == PM_GRS) {
+                       bit = psel & 7;
+                       grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
+                       mmcr1 |= (u64)grsel << grsel_shift[bit];
+               }
+               if (power5p_marked_instr_event(event[i]))
+                       mmcra |= MMCRA_SAMPLE_ENABLE;
+               if ((psel & 0x58) == 0x40 && (byte & 1) != ((pmc >> 1) & 1))
+                       /* select alternate byte lane */
+                       psel |= 0x10;
+               if (pmc <= 3)
+                       mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
+               hwc[i] = pmc;
+       }
+
+       /* Return MMCRx values */
+       mmcr[0] = 0;
+       if (pmc_inuse & 1)
+               mmcr[0] = MMCR0_PMC1CE;
+       if (pmc_inuse & 0x3e)
+               mmcr[0] |= MMCR0_PMCjCE;
+       mmcr[1] = mmcr1;
+       mmcr[2] = mmcra;
+       return 0;
+}
+
+static void power5p_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+       if (pmc <= 3)
+               mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
+}
+
+static int power5p_generic_events[] = {
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0xf,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x100009,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x1c10a8, /* LD_REF_L1 */
+       [PERF_COUNT_HW_CACHE_MISSES]            = 0x3c1088, /* LD_MISS_L1 */
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x230e4,  /* BR_ISSUED */
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x230e5,  /* BR_MPRED_CR */
+};
+
+#define C(x)   PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+       [C(L1D)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x1c10a8,       0x3c1088        },
+               [C(OP_WRITE)] = {       0x2c10a8,       0xc10c3         },
+               [C(OP_PREFETCH)] = {    0xc70e7,        -1              },
+       },
+       [C(L1I)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0               },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    0,              0               },
+       },
+       [C(LL)] = {             /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0               },
+               [C(OP_WRITE)] = {       0,              0               },
+               [C(OP_PREFETCH)] = {    0xc50c3,        0               },
+       },
+       [C(DTLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0xc20e4,        0x800c4         },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    -1,             -1              },
+       },
+       [C(ITLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x800c0         },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    -1,             -1              },
+       },
+       [C(BPU)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x230e4,        0x230e5         },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    -1,             -1              },
+       },
+};
+
+struct power_pmu power5p_pmu = {
+       .n_counter = 6,
+       .max_alternatives = MAX_ALT,
+       .add_fields = 0x7000000000055ull,
+       .test_adder = 0x3000040000000ull,
+       .compute_mmcr = power5p_compute_mmcr,
+       .get_constraint = power5p_get_constraint,
+       .get_alternatives = power5p_get_alternatives,
+       .disable_pmc = power5p_disable_pmc,
+       .limited_pmc_event = power5p_limited_pmc_event,
+       .flags = PPMU_LIMITED_PMC5_6,
+       .n_generic = ARRAY_SIZE(power5p_generic_events),
+       .generic_events = power5p_generic_events,
+       .cache_events = &power5p_cache_events,
+};
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
new file mode 100644 (file)
index 0000000..05600b6
--- /dev/null
@@ -0,0 +1,611 @@
+/*
+ * Performance counter support for POWER5 (not POWER5++) processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for POWER5 (not POWER5++)
+ */
+#define PM_PMC_SH      20      /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK     0xf
+#define PM_PMC_MSKS    (PM_PMC_MSK << PM_PMC_SH)
+#define PM_UNIT_SH     16      /* TTMMUX number and setting - unit select */
+#define PM_UNIT_MSK    0xf
+#define PM_BYTE_SH     12      /* Byte number of event bus to use */
+#define PM_BYTE_MSK    7
+#define PM_GRS_SH      8       /* Storage subsystem mux select */
+#define PM_GRS_MSK     7
+#define PM_BUSEVENT_MSK        0x80    /* Set if event uses event bus */
+#define PM_PMCSEL_MSK  0x7f
+
+/* Values in PM_UNIT field */
+#define PM_FPU         0
+#define PM_ISU0                1
+#define PM_IFU         2
+#define PM_ISU1                3
+#define PM_IDU         4
+#define PM_ISU0_ALT    6
+#define PM_GRS         7
+#define PM_LSU0                8
+#define PM_LSU1                0xc
+#define PM_LASTUNIT    0xc
+
+/*
+ * Bits in MMCR1 for POWER5
+ */
+#define MMCR1_TTM0SEL_SH       62
+#define MMCR1_TTM1SEL_SH       60
+#define MMCR1_TTM2SEL_SH       58
+#define MMCR1_TTM3SEL_SH       56
+#define MMCR1_TTMSEL_MSK       3
+#define MMCR1_TD_CP_DBG0SEL_SH 54
+#define MMCR1_TD_CP_DBG1SEL_SH 52
+#define MMCR1_TD_CP_DBG2SEL_SH 50
+#define MMCR1_TD_CP_DBG3SEL_SH 48
+#define MMCR1_GRS_L2SEL_SH     46
+#define MMCR1_GRS_L2SEL_MSK    3
+#define MMCR1_GRS_L3SEL_SH     44
+#define MMCR1_GRS_L3SEL_MSK    3
+#define MMCR1_GRS_MCSEL_SH     41
+#define MMCR1_GRS_MCSEL_MSK    7
+#define MMCR1_GRS_FABSEL_SH    39
+#define MMCR1_GRS_FABSEL_MSK   3
+#define MMCR1_PMC1_ADDER_SEL_SH        35
+#define MMCR1_PMC2_ADDER_SEL_SH        34
+#define MMCR1_PMC3_ADDER_SEL_SH        33
+#define MMCR1_PMC4_ADDER_SEL_SH        32
+#define MMCR1_PMC1SEL_SH       25
+#define MMCR1_PMC2SEL_SH       17
+#define MMCR1_PMC3SEL_SH       9
+#define MMCR1_PMC4SEL_SH       1
+#define MMCR1_PMCSEL_SH(n)     (MMCR1_PMC1SEL_SH - (n) * 8)
+#define MMCR1_PMCSEL_MSK       0x7f
+
+/*
+ * Bits in MMCRA
+ */
+
+/*
+ * Layout of constraint bits:
+ * 6666555555555544444444443333333333222222222211111111110000000000
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ *         <><>[  ><><>< ><> [  >[ >[ ><  ><  ><  ><  ><><><><><><>
+ *         T0T1 NC G0G1G2 G3  UC PS1PS2 B0  B1  B2  B3 P6P5P4P3P2P1
+ *
+ * T0 - TTM0 constraint
+ *     54-55: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0xc0_0000_0000_0000
+ *
+ * T1 - TTM1 constraint
+ *     52-53: TTM1SEL value (0=IDU, 3=GRS) 0x30_0000_0000_0000
+ *
+ * NC - number of counters
+ *     51: NC error 0x0008_0000_0000_0000
+ *     48-50: number of events needing PMC1-4 0x0007_0000_0000_0000
+ *
+ * G0..G3 - GRS mux constraints
+ *     46-47: GRS_L2SEL value
+ *     44-45: GRS_L3SEL value
+ *     41-44: GRS_MCSEL value
+ *     39-40: GRS_FABSEL value
+ *     Note that these match up with their bit positions in MMCR1
+ *
+ * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS
+ *     37: UC3 error 0x20_0000_0000
+ *     36: FPU|IFU|ISU1 events needed 0x10_0000_0000
+ *     35: ISU0 events needed 0x08_0000_0000
+ *     34: IDU|GRS events needed 0x04_0000_0000
+ *
+ * PS1
+ *     33: PS1 error 0x2_0000_0000
+ *     31-32: count of events needing PMC1/2 0x1_8000_0000
+ *
+ * PS2
+ *     30: PS2 error 0x4000_0000
+ *     28-29: count of events needing PMC3/4 0x3000_0000
+ *
+ * B0
+ *     24-27: Byte 0 event source 0x0f00_0000
+ *           Encoding as for the event code
+ *
+ * B1, B2, B3
+ *     20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
+ *
+ * P1..P6
+ *     0-11: Count of events needing PMC1..PMC6
+ */
+
+static const int grsel_shift[8] = {
+       MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH,
+       MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH,
+       MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH
+};
+
+/* Masks and values for using events from the various units */
+static u64 unit_cons[PM_LASTUNIT+1][2] = {
+       [PM_FPU] =   { 0xc0002000000000ull, 0x00001000000000ull },
+       [PM_ISU0] =  { 0x00002000000000ull, 0x00000800000000ull },
+       [PM_ISU1] =  { 0xc0002000000000ull, 0xc0001000000000ull },
+       [PM_IFU] =   { 0xc0002000000000ull, 0x80001000000000ull },
+       [PM_IDU] =   { 0x30002000000000ull, 0x00000400000000ull },
+       [PM_GRS] =   { 0x30002000000000ull, 0x30000400000000ull },
+};
+
+static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+       int pmc, byte, unit, sh;
+       int bit, fmask;
+       u64 mask = 0, value = 0;
+       int grp = -1;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       if (pmc) {
+               if (pmc > 6)
+                       return -1;
+               sh = (pmc - 1) * 2;
+               mask |= 2 << sh;
+               value |= 1 << sh;
+               if (pmc <= 4)
+                       grp = (pmc - 1) >> 1;
+               else if (event != 0x500009 && event != 0x600005)
+                       return -1;
+       }
+       if (event & PM_BUSEVENT_MSK) {
+               unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+               if (unit > PM_LASTUNIT)
+                       return -1;
+               if (unit == PM_ISU0_ALT)
+                       unit = PM_ISU0;
+               mask |= unit_cons[unit][0];
+               value |= unit_cons[unit][1];
+               byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+               if (byte >= 4) {
+                       if (unit != PM_LSU1)
+                               return -1;
+                       /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */
+                       ++unit;
+                       byte &= 3;
+               }
+               if (unit == PM_GRS) {
+                       bit = event & 7;
+                       fmask = (bit == 6)? 7: 3;
+                       sh = grsel_shift[bit];
+                       mask |= (u64)fmask << sh;
+                       value |= (u64)((event >> PM_GRS_SH) & fmask) << sh;
+               }
+               /*
+                * Bus events on bytes 0 and 2 can be counted
+                * on PMC1/2; bytes 1 and 3 on PMC3/4.
+                */
+               if (!pmc)
+                       grp = byte & 1;
+               /* Set byte lane select field */
+               mask  |= 0xfULL << (24 - 4 * byte);
+               value |= (u64)unit << (24 - 4 * byte);
+       }
+       if (grp == 0) {
+               /* increment PMC1/2 field */
+               mask  |= 0x200000000ull;
+               value |= 0x080000000ull;
+       } else if (grp == 1) {
+               /* increment PMC3/4 field */
+               mask  |= 0x40000000ull;
+               value |= 0x10000000ull;
+       }
+       if (pmc < 5) {
+               /* need a counter from PMC1-4 set */
+               mask  |= 0x8000000000000ull;
+               value |= 0x1000000000000ull;
+       }
+       *maskp = mask;
+       *valp = value;
+       return 0;
+}
+
+#define MAX_ALT        3       /* at most 3 alternatives for any event */
+
+static const unsigned int event_alternatives[][MAX_ALT] = {
+       { 0x120e4,  0x400002 },                 /* PM_GRP_DISP_REJECT */
+       { 0x410c7,  0x441084 },                 /* PM_THRD_L2MISS_BOTH_CYC */
+       { 0x100005, 0x600005 },                 /* PM_RUN_CYC */
+       { 0x100009, 0x200009, 0x500009 },       /* PM_INST_CMPL */
+       { 0x300009, 0x400009 },                 /* PM_INST_DISP */
+};
+
+/*
+ * Scan the alternatives table for a match and return the
+ * index into the alternatives table if found, else -1.
+ */
+static int find_alternative(u64 event)
+{
+       int i, j;
+
+       for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+               if (event < event_alternatives[i][0])
+                       break;
+               for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
+                       if (event == event_alternatives[i][j])
+                               return i;
+       }
+       return -1;
+}
+
+static const unsigned char bytedecode_alternatives[4][4] = {
+       /* PMC 1 */     { 0x21, 0x23, 0x25, 0x27 },
+       /* PMC 2 */     { 0x07, 0x17, 0x0e, 0x1e },
+       /* PMC 3 */     { 0x20, 0x22, 0x24, 0x26 },
+       /* PMC 4 */     { 0x07, 0x17, 0x0e, 0x1e }
+};
+
+/*
+ * Some direct events for decodes of event bus byte 3 have alternative
+ * PMCSEL values on other counters.  This returns the alternative
+ * event code for those that do, or -1 otherwise.
+ */
+static s64 find_alternative_bdecode(u64 event)
+{
+       int pmc, altpmc, pp, j;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       if (pmc == 0 || pmc > 4)
+               return -1;
+       altpmc = 5 - pmc;       /* 1 <-> 4, 2 <-> 3 */
+       pp = event & PM_PMCSEL_MSK;
+       for (j = 0; j < 4; ++j) {
+               if (bytedecode_alternatives[pmc - 1][j] == pp) {
+                       return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) |
+                               (altpmc << PM_PMC_SH) |
+                               bytedecode_alternatives[altpmc - 1][j];
+               }
+       }
+       return -1;
+}
+
+static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+       int i, j, nalt = 1;
+       s64 ae;
+
+       alt[0] = event;
+       nalt = 1;
+       i = find_alternative(event);
+       if (i >= 0) {
+               for (j = 0; j < MAX_ALT; ++j) {
+                       ae = event_alternatives[i][j];
+                       if (ae && ae != event)
+                               alt[nalt++] = ae;
+               }
+       } else {
+               ae = find_alternative_bdecode(event);
+               if (ae > 0)
+                       alt[nalt++] = ae;
+       }
+       return nalt;
+}
+
+/*
+ * Map of which direct events on which PMCs are marked instruction events.
+ * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
+ * Bit 0 is set if it is marked for all PMCs.
+ * The 0x80 bit indicates a byte decode PMCSEL value.
+ */
+static unsigned char direct_event_is_marked[0x28] = {
+       0,      /* 00 */
+       0x1f,   /* 01 PM_IOPS_CMPL */
+       0x2,    /* 02 PM_MRK_GRP_DISP */
+       0xe,    /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
+       0,      /* 04 */
+       0x1c,   /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */
+       0x80,   /* 06 */
+       0x80,   /* 07 */
+       0, 0, 0,/* 08 - 0a */
+       0x18,   /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */
+       0,      /* 0c */
+       0x80,   /* 0d */
+       0x80,   /* 0e */
+       0,      /* 0f */
+       0,      /* 10 */
+       0x14,   /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */
+       0,      /* 12 */
+       0x10,   /* 13 PM_MRK_GRP_CMPL */
+       0x1f,   /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */
+       0x2,    /* 15 PM_MRK_GRP_ISSUED */
+       0x80,   /* 16 */
+       0x80,   /* 17 */
+       0, 0, 0, 0, 0,
+       0x80,   /* 1d */
+       0x80,   /* 1e */
+       0,      /* 1f */
+       0x80,   /* 20 */
+       0x80,   /* 21 */
+       0x80,   /* 22 */
+       0x80,   /* 23 */
+       0x80,   /* 24 */
+       0x80,   /* 25 */
+       0x80,   /* 26 */
+       0x80,   /* 27 */
+};
+
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int power5_marked_instr_event(u64 event)
+{
+       int pmc, psel;
+       int bit, byte, unit;
+       u32 mask;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       psel = event & PM_PMCSEL_MSK;
+       if (pmc >= 5)
+               return 0;
+
+       bit = -1;
+       if (psel < sizeof(direct_event_is_marked)) {
+               if (direct_event_is_marked[psel] & (1 << pmc))
+                       return 1;
+               if (direct_event_is_marked[psel] & 0x80)
+                       bit = 4;
+               else if (psel == 0x08)
+                       bit = pmc - 1;
+               else if (psel == 0x10)
+                       bit = 4 - pmc;
+               else if (psel == 0x1b && (pmc == 1 || pmc == 3))
+                       bit = 4;
+       } else if ((psel & 0x58) == 0x40)
+               bit = psel & 7;
+
+       if (!(event & PM_BUSEVENT_MSK))
+               return 0;
+
+       byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+       unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+       if (unit == PM_LSU0) {
+               /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */
+               mask = 0x5dff00;
+       } else if (unit == PM_LSU1 && byte >= 4) {
+               byte -= 4;
+               /* byte 4 bits 1,3,5,7, byte 5 bits 6-7, byte 7 bits 0-4,6 */
+               mask = 0x5f00c0aa;
+       } else
+               return 0;
+
+       return (mask >> (byte * 8 + bit)) & 1;
+}
+
+static int power5_compute_mmcr(u64 event[], int n_ev,
+                              unsigned int hwc[], u64 mmcr[])
+{
+       u64 mmcr1 = 0;
+       u64 mmcra = 0;
+       unsigned int pmc, unit, byte, psel;
+       unsigned int ttm, grp;
+       int i, isbus, bit, grsel;
+       unsigned int pmc_inuse = 0;
+       unsigned int pmc_grp_use[2];
+       unsigned char busbyte[4];
+       unsigned char unituse[16];
+       int ttmuse;
+
+       if (n_ev > 6)
+               return -1;
+
+       /* First pass to count resource use */
+       pmc_grp_use[0] = pmc_grp_use[1] = 0;
+       memset(busbyte, 0, sizeof(busbyte));
+       memset(unituse, 0, sizeof(unituse));
+       for (i = 0; i < n_ev; ++i) {
+               pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+               if (pmc) {
+                       if (pmc > 6)
+                               return -1;
+                       if (pmc_inuse & (1 << (pmc - 1)))
+                               return -1;
+                       pmc_inuse |= 1 << (pmc - 1);
+                       /* count 1/2 vs 3/4 use */
+                       if (pmc <= 4)
+                               ++pmc_grp_use[(pmc - 1) >> 1];
+               }
+               if (event[i] & PM_BUSEVENT_MSK) {
+                       unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+                       byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+                       if (unit > PM_LASTUNIT)
+                               return -1;
+                       if (unit == PM_ISU0_ALT)
+                               unit = PM_ISU0;
+                       if (byte >= 4) {
+                               if (unit != PM_LSU1)
+                                       return -1;
+                               ++unit;
+                               byte &= 3;
+                       }
+                       if (!pmc)
+                               ++pmc_grp_use[byte & 1];
+                       if (busbyte[byte] && busbyte[byte] != unit)
+                               return -1;
+                       busbyte[byte] = unit;
+                       unituse[unit] = 1;
+               }
+       }
+       if (pmc_grp_use[0] > 2 || pmc_grp_use[1] > 2)
+               return -1;
+
+       /*
+        * Assign resources and set multiplexer selects.
+        *
+        * PM_ISU0 can go either on TTM0 or TTM1, but that's the only
+        * choice we have to deal with.
+        */
+       if (unituse[PM_ISU0] &
+           (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) {
+               unituse[PM_ISU0_ALT] = 1;       /* move ISU to TTM1 */
+               unituse[PM_ISU0] = 0;
+       }
+       /* Set TTM[01]SEL fields. */
+       ttmuse = 0;
+       for (i = PM_FPU; i <= PM_ISU1; ++i) {
+               if (!unituse[i])
+                       continue;
+               if (ttmuse++)
+                       return -1;
+               mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH;
+       }
+       ttmuse = 0;
+       for (; i <= PM_GRS; ++i) {
+               if (!unituse[i])
+                       continue;
+               if (ttmuse++)
+                       return -1;
+               mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH;
+       }
+       if (ttmuse > 1)
+               return -1;
+
+       /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */
+       for (byte = 0; byte < 4; ++byte) {
+               unit = busbyte[byte];
+               if (!unit)
+                       continue;
+               if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) {
+                       /* get ISU0 through TTM1 rather than TTM0 */
+                       unit = PM_ISU0_ALT;
+               } else if (unit == PM_LSU1 + 1) {
+                       /* select lower word of LSU1 for this byte */
+                       mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+               }
+               ttm = unit >> 2;
+               mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+       }
+
+       /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
+       for (i = 0; i < n_ev; ++i) {
+               pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+               unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+               byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+               psel = event[i] & PM_PMCSEL_MSK;
+               isbus = event[i] & PM_BUSEVENT_MSK;
+               if (!pmc) {
+                       /* Bus event or any-PMC direct event */
+                       for (pmc = 0; pmc < 4; ++pmc) {
+                               if (pmc_inuse & (1 << pmc))
+                                       continue;
+                               grp = (pmc >> 1) & 1;
+                               if (isbus) {
+                                       if (grp == (byte & 1))
+                                               break;
+                               } else if (pmc_grp_use[grp] < 2) {
+                                       ++pmc_grp_use[grp];
+                                       break;
+                               }
+                       }
+                       pmc_inuse |= 1 << pmc;
+               } else if (pmc <= 4) {
+                       /* Direct event */
+                       --pmc;
+                       if ((psel == 8 || psel == 0x10) && isbus && (byte & 2))
+                               /* add events on higher-numbered bus */
+                               mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
+               } else {
+                       /* Instructions or run cycles on PMC5/6 */
+                       --pmc;
+               }
+               if (isbus && unit == PM_GRS) {
+                       bit = psel & 7;
+                       grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
+                       mmcr1 |= (u64)grsel << grsel_shift[bit];
+               }
+               if (power5_marked_instr_event(event[i]))
+                       mmcra |= MMCRA_SAMPLE_ENABLE;
+               if (pmc <= 3)
+                       mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
+               hwc[i] = pmc;
+       }
+
+       /* Return MMCRx values */
+       mmcr[0] = 0;
+       if (pmc_inuse & 1)
+               mmcr[0] = MMCR0_PMC1CE;
+       if (pmc_inuse & 0x3e)
+               mmcr[0] |= MMCR0_PMCjCE;
+       mmcr[1] = mmcr1;
+       mmcr[2] = mmcra;
+       return 0;
+}
+
+static void power5_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+       if (pmc <= 3)
+               mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
+}
+
+static int power5_generic_events[] = {
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0xf,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 0x100009,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x4c1090, /* LD_REF_L1 */
+       [PERF_COUNT_HW_CACHE_MISSES]            = 0x3c1088, /* LD_MISS_L1 */
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x230e4,  /* BR_ISSUED */
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x230e5,  /* BR_MPRED_CR */
+};
+
+#define C(x)   PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+       [C(L1D)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x4c1090,       0x3c1088        },
+               [C(OP_WRITE)] = {       0x3c1090,       0xc10c3         },
+               [C(OP_PREFETCH)] = {    0xc70e7,        0               },
+       },
+       [C(L1I)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0               },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    0,              0               },
+       },
+       [C(LL)] = {             /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x3c309b        },
+               [C(OP_WRITE)] = {       0,              0               },
+               [C(OP_PREFETCH)] = {    0xc50c3,        0               },
+       },
+       [C(DTLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x2c4090,       0x800c4         },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    -1,             -1              },
+       },
+       [C(ITLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x800c0         },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    -1,             -1              },
+       },
+       [C(BPU)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x230e4,        0x230e5         },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    -1,             -1              },
+       },
+};
+
+struct power_pmu power5_pmu = {
+       .n_counter = 6,
+       .max_alternatives = MAX_ALT,
+       .add_fields = 0x7000090000555ull,
+       .test_adder = 0x3000490000000ull,
+       .compute_mmcr = power5_compute_mmcr,
+       .get_constraint = power5_get_constraint,
+       .get_alternatives = power5_get_alternatives,
+       .disable_pmc = power5_disable_pmc,
+       .n_generic = ARRAY_SIZE(power5_generic_events),
+       .generic_events = power5_generic_events,
+       .cache_events = &power5_cache_events,
+};
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
new file mode 100644 (file)
index 0000000..46f74be
--- /dev/null
@@ -0,0 +1,532 @@
+/*
+ * Performance counter support for POWER6 processors.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for POWER6
+ */
+#define PM_PMC_SH      20      /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK     0x7
+#define PM_PMC_MSKS    (PM_PMC_MSK << PM_PMC_SH)
+#define PM_UNIT_SH     16      /* Unit event comes (TTMxSEL encoding) */
+#define PM_UNIT_MSK    0xf
+#define PM_UNIT_MSKS   (PM_UNIT_MSK << PM_UNIT_SH)
+#define PM_LLAV                0x8000  /* Load lookahead match value */
+#define PM_LLA         0x4000  /* Load lookahead match enable */
+#define PM_BYTE_SH     12      /* Byte of event bus to use */
+#define PM_BYTE_MSK    3
+#define PM_SUBUNIT_SH  8       /* Subunit event comes from (NEST_SEL enc.) */
+#define PM_SUBUNIT_MSK 7
+#define PM_SUBUNIT_MSKS        (PM_SUBUNIT_MSK << PM_SUBUNIT_SH)
+#define PM_PMCSEL_MSK  0xff    /* PMCxSEL value */
+#define PM_BUSEVENT_MSK        0xf3700
+
+/*
+ * Bits in MMCR1 for POWER6
+ */
+#define MMCR1_TTM0SEL_SH       60
+#define MMCR1_TTMSEL_SH(n)     (MMCR1_TTM0SEL_SH - (n) * 4)
+#define MMCR1_TTMSEL_MSK       0xf
+#define MMCR1_TTMSEL(m, n)     (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK)
+#define MMCR1_NESTSEL_SH       45
+#define MMCR1_NESTSEL_MSK      0x7
+#define MMCR1_NESTSEL(m)       (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK)
+#define MMCR1_PMC1_LLA         ((u64)1 << 44)
+#define MMCR1_PMC1_LLA_VALUE   ((u64)1 << 39)
+#define MMCR1_PMC1_ADDR_SEL    ((u64)1 << 35)
+#define MMCR1_PMC1SEL_SH       24
+#define MMCR1_PMCSEL_SH(n)     (MMCR1_PMC1SEL_SH - (n) * 8)
+#define MMCR1_PMCSEL_MSK       0xff
+
+/*
+ * Map of which direct events on which PMCs are marked instruction events.
+ * Indexed by PMCSEL value >> 1.
+ * Bottom 4 bits are a map of which PMCs are interesting,
+ * top 4 bits say what sort of event:
+ *   0 = direct marked event,
+ *   1 = byte decode event,
+ *   4 = add/and event (PMC1 -> bits 0 & 4),
+ *   5 = add/and event (PMC1 -> bits 1 & 5),
+ *   6 = add/and event (PMC1 -> bits 2 & 6),
+ *   7 = add/and event (PMC1 -> bits 3 & 7).
+ */
+static unsigned char direct_event_is_marked[0x60 >> 1] = {
+       0,      /* 00 */
+       0,      /* 02 */
+       0,      /* 04 */
+       0x07,   /* 06 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
+       0x04,   /* 08 PM_MRK_DFU_FIN */
+       0x06,   /* 0a PM_MRK_IFU_FIN, PM_MRK_INST_FIN */
+       0,      /* 0c */
+       0,      /* 0e */
+       0x02,   /* 10 PM_MRK_INST_DISP */
+       0x08,   /* 12 PM_MRK_LSU_DERAT_MISS */
+       0,      /* 14 */
+       0,      /* 16 */
+       0x0c,   /* 18 PM_THRESH_TIMEO, PM_MRK_INST_FIN */
+       0x0f,   /* 1a PM_MRK_INST_DISP, PM_MRK_{FXU,FPU,LSU}_FIN */
+       0x01,   /* 1c PM_MRK_INST_ISSUED */
+       0,      /* 1e */
+       0,      /* 20 */
+       0,      /* 22 */
+       0,      /* 24 */
+       0,      /* 26 */
+       0x15,   /* 28 PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L3MISS */
+       0,      /* 2a */
+       0,      /* 2c */
+       0,      /* 2e */
+       0x4f,   /* 30 */
+       0x7f,   /* 32 */
+       0x4f,   /* 34 */
+       0x5f,   /* 36 */
+       0x6f,   /* 38 */
+       0x4f,   /* 3a */
+       0,      /* 3c */
+       0x08,   /* 3e PM_MRK_INST_TIMEO */
+       0x1f,   /* 40 */
+       0x1f,   /* 42 */
+       0x1f,   /* 44 */
+       0x1f,   /* 46 */
+       0x1f,   /* 48 */
+       0x1f,   /* 4a */
+       0x1f,   /* 4c */
+       0x1f,   /* 4e */
+       0,      /* 50 */
+       0x05,   /* 52 PM_MRK_BR_TAKEN, PM_MRK_BR_MPRED */
+       0x1c,   /* 54 PM_MRK_PTEG_FROM_L3MISS, PM_MRK_PTEG_FROM_L2MISS */
+       0x02,   /* 56 PM_MRK_LD_MISS_L1 */
+       0,      /* 58 */
+       0,      /* 5a */
+       0,      /* 5c */
+       0,      /* 5e */
+};
+
+/*
+ * Masks showing for each unit which bits are marked events.
+ * These masks are in LE order, i.e. 0x00000001 is byte 0, bit 0.
+ */
+static u32 marked_bus_events[16] = {
+       0x01000000,     /* direct events set 1: byte 3 bit 0 */
+       0x00010000,     /* direct events set 2: byte 2 bit 0 */
+       0, 0, 0, 0,     /* IDU, IFU, nest: nothing */
+       0x00000088,     /* VMX set 1: byte 0 bits 3, 7 */
+       0x000000c0,     /* VMX set 2: byte 0 bits 4-7 */
+       0x04010000,     /* LSU set 1: byte 2 bit 0, byte 3 bit 2 */
+       0xff010000u,    /* LSU set 2: byte 2 bit 0, all of byte 3 */
+       0,              /* LSU set 3 */
+       0x00000010,     /* VMX set 3: byte 0 bit 4 */
+       0,              /* BFP set 1 */
+       0x00000022,     /* BFP set 2: byte 0 bits 1, 5 */
+       0, 0
+};
+       
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int power6_marked_instr_event(u64 event)
+{
+       int pmc, psel, ptype;
+       int bit, byte, unit;
+       u32 mask;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       psel = (event & PM_PMCSEL_MSK) >> 1;    /* drop edge/level bit */
+       if (pmc >= 5)
+               return 0;
+
+       bit = -1;
+       if (psel < sizeof(direct_event_is_marked)) {
+               ptype = direct_event_is_marked[psel];
+               if (pmc == 0 || !(ptype & (1 << (pmc - 1))))
+                       return 0;
+               ptype >>= 4;
+               if (ptype == 0)
+                       return 1;
+               if (ptype == 1)
+                       bit = 0;
+               else
+                       bit = ptype ^ (pmc - 1);
+       } else if ((psel & 0x48) == 0x40)
+               bit = psel & 7;
+
+       if (!(event & PM_BUSEVENT_MSK) || bit == -1)
+               return 0;
+
+       byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+       unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+       mask = marked_bus_events[unit];
+       return (mask >> (byte * 8 + bit)) & 1;
+}
+
+/*
+ * Assign PMC numbers and compute MMCR1 value for a set of events
+ */
+static int p6_compute_mmcr(u64 event[], int n_ev,
+                          unsigned int hwc[], u64 mmcr[])
+{
+       u64 mmcr1 = 0;
+       u64 mmcra = 0;
+       int i;
+       unsigned int pmc, ev, b, u, s, psel;
+       unsigned int ttmset = 0;
+       unsigned int pmc_inuse = 0;
+
+       if (n_ev > 6)
+               return -1;
+       for (i = 0; i < n_ev; ++i) {
+               pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+               if (pmc) {
+                       if (pmc_inuse & (1 << (pmc - 1)))
+                               return -1;      /* collision! */
+                       pmc_inuse |= 1 << (pmc - 1);
+               }
+       }
+       for (i = 0; i < n_ev; ++i) {
+               ev = event[i];
+               pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
+               if (pmc) {
+                       --pmc;
+               } else {
+                       /* can go on any PMC; find a free one */
+                       for (pmc = 0; pmc < 4; ++pmc)
+                               if (!(pmc_inuse & (1 << pmc)))
+                                       break;
+                       if (pmc >= 4)
+                               return -1;
+                       pmc_inuse |= 1 << pmc;
+               }
+               hwc[i] = pmc;
+               psel = ev & PM_PMCSEL_MSK;
+               if (ev & PM_BUSEVENT_MSK) {
+                       /* this event uses the event bus */
+                       b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK;
+                       u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK;
+                       /* check for conflict on this byte of event bus */
+                       if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u)
+                               return -1;
+                       mmcr1 |= (u64)u << MMCR1_TTMSEL_SH(b);
+                       ttmset |= 1 << b;
+                       if (u == 5) {
+                               /* Nest events have a further mux */
+                               s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
+                               if ((ttmset & 0x10) &&
+                                   MMCR1_NESTSEL(mmcr1) != s)
+                                       return -1;
+                               ttmset |= 0x10;
+                               mmcr1 |= (u64)s << MMCR1_NESTSEL_SH;
+                       }
+                       if (0x30 <= psel && psel <= 0x3d) {
+                               /* these need the PMCx_ADDR_SEL bits */
+                               if (b >= 2)
+                                       mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc;
+                       }
+                       /* bus select values are different for PMC3/4 */
+                       if (pmc >= 2 && (psel & 0x90) == 0x80)
+                               psel ^= 0x20;
+               }
+               if (ev & PM_LLA) {
+                       mmcr1 |= MMCR1_PMC1_LLA >> pmc;
+                       if (ev & PM_LLAV)
+                               mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc;
+               }
+               if (power6_marked_instr_event(event[i]))
+                       mmcra |= MMCRA_SAMPLE_ENABLE;
+               if (pmc < 4)
+                       mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc);
+       }
+       mmcr[0] = 0;
+       if (pmc_inuse & 1)
+               mmcr[0] = MMCR0_PMC1CE;
+       if (pmc_inuse & 0xe)
+               mmcr[0] |= MMCR0_PMCjCE;
+       mmcr[1] = mmcr1;
+       mmcr[2] = mmcra;
+       return 0;
+}
+
+/*
+ * Layout of constraint bits:
+ *
+ *     0-1     add field: number of uses of PMC1 (max 1)
+ *     2-3, 4-5, 6-7, 8-9, 10-11: ditto for PMC2, 3, 4, 5, 6
+ *     12-15   add field: number of uses of PMC1-4 (max 4)
+ *     16-19   select field: unit on byte 0 of event bus
+ *     20-23, 24-27, 28-31 ditto for bytes 1, 2, 3
+ *     32-34   select field: nest (subunit) event selector
+ */
+static int p6_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+       int pmc, byte, sh, subunit;
+       u64 mask = 0, value = 0;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       if (pmc) {
+               if (pmc > 4 && !(event == 0x500009 || event == 0x600005))
+                       return -1;
+               sh = (pmc - 1) * 2;
+               mask |= 2 << sh;
+               value |= 1 << sh;
+       }
+       if (event & PM_BUSEVENT_MSK) {
+               byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+               sh = byte * 4 + (16 - PM_UNIT_SH);
+               mask |= PM_UNIT_MSKS << sh;
+               value |= (u64)(event & PM_UNIT_MSKS) << sh;
+               if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) {
+                       subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
+                       mask  |= (u64)PM_SUBUNIT_MSK << 32;
+                       value |= (u64)subunit << 32;
+               }
+       }
+       if (pmc <= 4) {
+               mask  |= 0x8000;        /* add field for count of PMC1-4 uses */
+               value |= 0x1000;
+       }
+       *maskp = mask;
+       *valp = value;
+       return 0;
+}
+
+static int p6_limited_pmc_event(u64 event)
+{
+       int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+
+       return pmc == 5 || pmc == 6;
+}
+
+#define MAX_ALT        4       /* at most 4 alternatives for any event */
+
+static const unsigned int event_alternatives[][MAX_ALT] = {
+       { 0x0130e8, 0x2000f6, 0x3000fc },       /* PM_PTEG_RELOAD_VALID */
+       { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */
+       { 0x080088, 0x200054, 0x3000f0 },       /* PM_ST_MISS_L1 */
+       { 0x10000a, 0x2000f4, 0x600005 },       /* PM_RUN_CYC */
+       { 0x10000b, 0x2000f5 },                 /* PM_RUN_COUNT */
+       { 0x10000e, 0x400010 },                 /* PM_PURR */
+       { 0x100010, 0x4000f8 },                 /* PM_FLUSH */
+       { 0x10001a, 0x200010 },                 /* PM_MRK_INST_DISP */
+       { 0x100026, 0x3000f8 },                 /* PM_TB_BIT_TRANS */
+       { 0x100054, 0x2000f0 },                 /* PM_ST_FIN */
+       { 0x100056, 0x2000fc },                 /* PM_L1_ICACHE_MISS */
+       { 0x1000f0, 0x40000a },                 /* PM_INST_IMC_MATCH_CMPL */
+       { 0x1000f8, 0x200008 },                 /* PM_GCT_EMPTY_CYC */
+       { 0x1000fc, 0x400006 },                 /* PM_LSU_DERAT_MISS_CYC */
+       { 0x20000e, 0x400007 },                 /* PM_LSU_DERAT_MISS */
+       { 0x200012, 0x300012 },                 /* PM_INST_DISP */
+       { 0x2000f2, 0x3000f2 },                 /* PM_INST_DISP */
+       { 0x2000f8, 0x300010 },                 /* PM_EXT_INT */
+       { 0x2000fe, 0x300056 },                 /* PM_DATA_FROM_L2MISS */
+       { 0x2d0030, 0x30001a },                 /* PM_MRK_FPU_FIN */
+       { 0x30000a, 0x400018 },                 /* PM_MRK_INST_FIN */
+       { 0x3000f6, 0x40000e },                 /* PM_L1_DCACHE_RELOAD_VALID */
+       { 0x3000fe, 0x400056 },                 /* PM_DATA_FROM_L3MISS */
+};
+
+/*
+ * This could be made more efficient with a binary search on
+ * a presorted list, if necessary
+ */
+static int find_alternatives_list(u64 event)
+{
+       int i, j;
+       unsigned int alt;
+
+       for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+               if (event < event_alternatives[i][0])
+                       return -1;
+               for (j = 0; j < MAX_ALT; ++j) {
+                       alt = event_alternatives[i][j];
+                       if (!alt || event < alt)
+                               break;
+                       if (event == alt)
+                               return i;
+               }
+       }
+       return -1;
+}
+
+static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+       int i, j, nlim;
+       unsigned int psel, pmc;
+       unsigned int nalt = 1;
+       u64 aevent;
+
+       alt[0] = event;
+       nlim = p6_limited_pmc_event(event);
+
+       /* check the alternatives table */
+       i = find_alternatives_list(event);
+       if (i >= 0) {
+               /* copy out alternatives from list */
+               for (j = 0; j < MAX_ALT; ++j) {
+                       aevent = event_alternatives[i][j];
+                       if (!aevent)
+                               break;
+                       if (aevent != event)
+                               alt[nalt++] = aevent;
+                       nlim += p6_limited_pmc_event(aevent);
+               }
+
+       } else {
+               /* Check for alternative ways of computing sum events */
+               /* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */
+               psel = event & (PM_PMCSEL_MSK & ~1);    /* ignore edge bit */
+               pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+               if (pmc && (psel == 0x32 || psel == 0x34))
+                       alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) |
+                               ((5 - pmc) << PM_PMC_SH);
+
+               /* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */
+               if (pmc && (psel == 0x38 || psel == 0x3a))
+                       alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) |
+                               ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH);
+       }
+
+       if (flags & PPMU_ONLY_COUNT_RUN) {
+               /*
+                * We're only counting in RUN state,
+                * so PM_CYC is equivalent to PM_RUN_CYC,
+                * PM_INST_CMPL === PM_RUN_INST_CMPL, PM_PURR === PM_RUN_PURR.
+                * This doesn't include alternatives that don't provide
+                * any extra flexibility in assigning PMCs (e.g.
+                * 0x10000a for PM_RUN_CYC vs. 0x1e for PM_CYC).
+                * Note that even with these additional alternatives
+                * we never end up with more than 4 alternatives for any event.
+                */
+               j = nalt;
+               for (i = 0; i < nalt; ++i) {
+                       switch (alt[i]) {
+                       case 0x1e:      /* PM_CYC */
+                               alt[j++] = 0x600005;    /* PM_RUN_CYC */
+                               ++nlim;
+                               break;
+                       case 0x10000a:  /* PM_RUN_CYC */
+                               alt[j++] = 0x1e;        /* PM_CYC */
+                               break;
+                       case 2:         /* PM_INST_CMPL */
+                               alt[j++] = 0x500009;    /* PM_RUN_INST_CMPL */
+                               ++nlim;
+                               break;
+                       case 0x500009:  /* PM_RUN_INST_CMPL */
+                               alt[j++] = 2;           /* PM_INST_CMPL */
+                               break;
+                       case 0x10000e:  /* PM_PURR */
+                               alt[j++] = 0x4000f4;    /* PM_RUN_PURR */
+                               break;
+                       case 0x4000f4:  /* PM_RUN_PURR */
+                               alt[j++] = 0x10000e;    /* PM_PURR */
+                               break;
+                       }
+               }
+               nalt = j;
+       }
+
+       if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) {
+               /* remove the limited PMC events */
+               j = 0;
+               for (i = 0; i < nalt; ++i) {
+                       if (!p6_limited_pmc_event(alt[i])) {
+                               alt[j] = alt[i];
+                               ++j;
+                       }
+               }
+               nalt = j;
+       } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) {
+               /* remove all but the limited PMC events */
+               j = 0;
+               for (i = 0; i < nalt; ++i) {
+                       if (p6_limited_pmc_event(alt[i])) {
+                               alt[j] = alt[i];
+                               ++j;
+                       }
+               }
+               nalt = j;
+       }
+
+       return nalt;
+}
+
+static void p6_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+       /* Set PMCxSEL to 0 to disable PMCx */
+       if (pmc <= 3)
+               mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
+}
+
+static int power6_generic_events[] = {
+       [PERF_COUNT_HW_CPU_CYCLES]              = 0x1e,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 2,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x280030, /* LD_REF_L1 */
+       [PERF_COUNT_HW_CACHE_MISSES]            = 0x30000c, /* LD_MISS_L1 */
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x410a0,  /* BR_PRED */
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x400052, /* BR_MPRED */
+};
+
+#define C(x)   PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ * The "DTLB" and "ITLB" events relate to the DERAT and IERAT.
+ */
+static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+       [C(L1D)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x80082,        0x80080         },
+               [C(OP_WRITE)] = {       0x80086,        0x80088         },
+               [C(OP_PREFETCH)] = {    0x810a4,        0               },
+       },
+       [C(L1I)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x100056        },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    0x4008c,        0               },
+       },
+       [C(LL)] = {             /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x150730,       0x250532        },
+               [C(OP_WRITE)] = {       0x250432,       0x150432        },
+               [C(OP_PREFETCH)] = {    0x810a6,        0               },
+       },
+       [C(DTLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x20000e        },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    -1,             -1              },
+       },
+       [C(ITLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x420ce         },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    -1,             -1              },
+       },
+       [C(BPU)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x430e6,        0x400052        },
+               [C(OP_WRITE)] = {       -1,             -1              },
+               [C(OP_PREFETCH)] = {    -1,             -1              },
+       },
+};
+
+struct power_pmu power6_pmu = {
+       .n_counter = 6,
+       .max_alternatives = MAX_ALT,
+       .add_fields = 0x1555,
+       .test_adder = 0x3000,
+       .compute_mmcr = p6_compute_mmcr,
+       .get_constraint = p6_get_constraint,
+       .get_alternatives = p6_get_alternatives,
+       .disable_pmc = p6_disable_pmc,
+       .limited_pmc_event = p6_limited_pmc_event,
+       .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
+       .n_generic = ARRAY_SIZE(power6_generic_events),
+       .generic_events = power6_generic_events,
+       .cache_events = &power6_cache_events,
+};
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
new file mode 100644 (file)
index 0000000..b3f7d12
--- /dev/null
@@ -0,0 +1,357 @@
+/*
+ * Performance counter support for POWER7 processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for POWER7
+ */
+#define PM_PMC_SH      16      /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK     0xf
+#define PM_PMC_MSKS    (PM_PMC_MSK << PM_PMC_SH)
+#define PM_UNIT_SH     12      /* TTMMUX number and setting - unit select */
+#define PM_UNIT_MSK    0xf
+#define PM_COMBINE_SH  11      /* Combined event bit */
+#define PM_COMBINE_MSK 1
+#define PM_COMBINE_MSKS        0x800
+#define PM_L2SEL_SH    8       /* L2 event select */
+#define PM_L2SEL_MSK   7
+#define PM_PMCSEL_MSK  0xff
+
+/*
+ * Bits in MMCR1 for POWER7
+ */
+#define MMCR1_TTM0SEL_SH       60
+#define MMCR1_TTM1SEL_SH       56
+#define MMCR1_TTM2SEL_SH       52
+#define MMCR1_TTM3SEL_SH       48
+#define MMCR1_TTMSEL_MSK       0xf
+#define MMCR1_L2SEL_SH         45
+#define MMCR1_L2SEL_MSK                7
+#define MMCR1_PMC1_COMBINE_SH  35
+#define MMCR1_PMC2_COMBINE_SH  34
+#define MMCR1_PMC3_COMBINE_SH  33
+#define MMCR1_PMC4_COMBINE_SH  32
+#define MMCR1_PMC1SEL_SH       24
+#define MMCR1_PMC2SEL_SH       16
+#define MMCR1_PMC3SEL_SH       8
+#define MMCR1_PMC4SEL_SH       0
+#define MMCR1_PMCSEL_SH(n)     (MMCR1_PMC1SEL_SH - (n) * 8)
+#define MMCR1_PMCSEL_MSK       0xff
+
+/*
+ * Bits in MMCRA
+ */
+
+/*
+ * Layout of constraint bits:
+ * 6666555555555544444444443333333333222222222211111111110000000000
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ *                                                 [  ><><><><><><>
+ *                                                  NC P6P5P4P3P2P1
+ *
+ * NC - number of counters
+ *     15: NC error 0x8000
+ *     12-14: number of events needing PMC1-4 0x7000
+ *
+ * P6
+ *     11: P6 error 0x800
+ *     10-11: Count of events needing PMC6
+ *
+ * P1..P5
+ *     0-9: Count of events needing PMC1..PMC5
+ */
+
+static int power7_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+       int pmc, sh;
+       u64 mask = 0, value = 0;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       if (pmc) {
+               if (pmc > 6)
+                       return -1;
+               sh = (pmc - 1) * 2;
+               mask |= 2 << sh;
+               value |= 1 << sh;
+               if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4))
+                       return -1;
+       }
+       if (pmc < 5) {
+               /* need a counter from PMC1-4 set */
+               mask  |= 0x8000;
+               value |= 0x1000;
+       }
+       *maskp = mask;
+       *valp = value;
+       return 0;
+}
+
+#define MAX_ALT        2       /* at most 2 alternatives for any event */
+
+static const unsigned int event_alternatives[][MAX_ALT] = {
+       { 0x200f2, 0x300f2 },           /* PM_INST_DISP */
+       { 0x200f4, 0x600f4 },           /* PM_RUN_CYC */
+       { 0x400fa, 0x500fa },           /* PM_RUN_INST_CMPL */
+};
+
+/*
+ * Scan the alternatives table for a match and return the
+ * index into the alternatives table if found, else -1.
+ */
+static int find_alternative(u64 event)
+{
+       int i, j;
+
+       for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+               if (event < event_alternatives[i][0])
+                       break;
+               for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
+                       if (event == event_alternatives[i][j])
+                               return i;
+       }
+       return -1;
+}
+
+static s64 find_alternative_decode(u64 event)
+{
+       int pmc, psel;
+
+       /* this only handles the 4x decode events */
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       psel = event & PM_PMCSEL_MSK;
+       if ((pmc == 2 || pmc == 4) && (psel & ~7) == 0x40)
+               return event - (1 << PM_PMC_SH) + 8;
+       if ((pmc == 1 || pmc == 3) && (psel & ~7) == 0x48)
+               return event + (1 << PM_PMC_SH) - 8;
+       return -1;
+}
+
+static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+       int i, j, nalt = 1;
+       s64 ae;
+
+       alt[0] = event;
+       nalt = 1;
+       i = find_alternative(event);
+       if (i >= 0) {
+               for (j = 0; j < MAX_ALT; ++j) {
+                       ae = event_alternatives[i][j];
+                       if (ae && ae != event)
+                               alt[nalt++] = ae;
+               }
+       } else {
+               ae = find_alternative_decode(event);
+               if (ae > 0)
+                       alt[nalt++] = ae;
+       }
+
+       if (flags & PPMU_ONLY_COUNT_RUN) {
+               /*
+                * We're only counting in RUN state,
+                * so PM_CYC is equivalent to PM_RUN_CYC
+                * and PM_INST_CMPL === PM_RUN_INST_CMPL.
+                * This doesn't include alternatives that don't provide
+                * any extra flexibility in assigning PMCs.
+                */
+               j = nalt;
+               for (i = 0; i < nalt; ++i) {
+                       switch (alt[i]) {
+                       case 0x1e:      /* PM_CYC */
+                               alt[j++] = 0x600f4;     /* PM_RUN_CYC */
+                               break;
+                       case 0x600f4:   /* PM_RUN_CYC */
+                               alt[j++] = 0x1e;
+                               break;
+                       case 0x2:       /* PM_PPC_CMPL */
+                               alt[j++] = 0x500fa;     /* PM_RUN_INST_CMPL */
+                               break;
+                       case 0x500fa:   /* PM_RUN_INST_CMPL */
+                               alt[j++] = 0x2; /* PM_PPC_CMPL */
+                               break;
+                       }
+               }
+               nalt = j;
+       }
+
+       return nalt;
+}
+
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int power7_marked_instr_event(u64 event)
+{
+       int pmc, psel;
+       int unit;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+       psel = event & PM_PMCSEL_MSK & ~1;      /* trim off edge/level bit */
+       if (pmc >= 5)
+               return 0;
+
+       switch (psel >> 4) {
+       case 2:
+               return pmc == 2 || pmc == 4;
+       case 3:
+               if (psel == 0x3c)
+                       return pmc == 1;
+               if (psel == 0x3e)
+                       return pmc != 2;
+               return 1;
+       case 4:
+       case 5:
+               return unit == 0xd;
+       case 6:
+               if (psel == 0x64)
+                       return pmc >= 3;
+       case 8:
+               return unit == 0xd;
+       }
+       return 0;
+}
+
+static int power7_compute_mmcr(u64 event[], int n_ev,
+                              unsigned int hwc[], u64 mmcr[])
+{
+       u64 mmcr1 = 0;
+       u64 mmcra = 0;
+       unsigned int pmc, unit, combine, l2sel, psel;
+       unsigned int pmc_inuse = 0;
+       int i;
+
+       /* First pass to count resource use */
+       for (i = 0; i < n_ev; ++i) {
+               pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+               if (pmc) {
+                       if (pmc > 6)
+                               return -1;
+                       if (pmc_inuse & (1 << (pmc - 1)))
+                               return -1;
+                       pmc_inuse |= 1 << (pmc - 1);
+               }
+       }
+
+       /* Second pass: assign PMCs, set all MMCR1 fields */
+       for (i = 0; i < n_ev; ++i) {
+               pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+               unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+               combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK;
+               l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK;
+               psel = event[i] & PM_PMCSEL_MSK;
+               if (!pmc) {
+                       /* Bus event or any-PMC direct event */
+                       for (pmc = 0; pmc < 4; ++pmc) {
+                               if (!(pmc_inuse & (1 << pmc)))
+                                       break;
+                       }
+                       if (pmc >= 4)
+                               return -1;
+                       pmc_inuse |= 1 << pmc;
+               } else {
+                       /* Direct or decoded event */
+                       --pmc;
+               }
+               if (pmc <= 3) {
+                       mmcr1 |= (u64) unit << (MMCR1_TTM0SEL_SH - 4 * pmc);
+                       mmcr1 |= (u64) combine << (MMCR1_PMC1_COMBINE_SH - pmc);
+                       mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
+                       if (unit == 6)  /* L2 events */
+                               mmcr1 |= (u64) l2sel << MMCR1_L2SEL_SH;
+               }
+               if (power7_marked_instr_event(event[i]))
+                       mmcra |= MMCRA_SAMPLE_ENABLE;
+               hwc[i] = pmc;
+       }
+
+       /* Return MMCRx values */
+       mmcr[0] = 0;
+       if (pmc_inuse & 1)
+               mmcr[0] = MMCR0_PMC1CE;
+       if (pmc_inuse & 0x3e)
+               mmcr[0] |= MMCR0_PMCjCE;
+       mmcr[1] = mmcr1;
+       mmcr[2] = mmcra;
+       return 0;
+}
+
+static void power7_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+       if (pmc <= 3)
+               mmcr[1] &= ~(0xffULL << MMCR1_PMCSEL_SH(pmc));
+}
+
+static int power7_generic_events[] = {
+       [PERF_COUNT_CPU_CYCLES] = 0x1e,
+       [PERF_COUNT_INSTRUCTIONS] = 2,
+       [PERF_COUNT_CACHE_REFERENCES] = 0xc880,         /* LD_REF_L1_LSU */
+       [PERF_COUNT_CACHE_MISSES] = 0x400f0,            /* LD_MISS_L1 */
+       [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x10068,     /* BRU_FIN */
+       [PERF_COUNT_BRANCH_MISSES] = 0x400f6,           /* BR_MPRED */
+};
+
+#define C(x)   PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+       [C(L1D)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x400f0,        0xc880  },
+               [C(OP_WRITE)] = {       0,              0x300f0 },
+               [C(OP_PREFETCH)] = {    0xd8b8,         0       },
+       },
+       [C(L1I)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x200fc },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    0x408a,         0       },
+       },
+       [C(LL)] = {             /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x6080,         0x6084  },
+               [C(OP_WRITE)] = {       0x6082,         0x6086  },
+               [C(OP_PREFETCH)] = {    0,              0       },
+       },
+       [C(DTLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x300fc },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+       [C(ITLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x400fc },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+       [C(BPU)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x10068,        0x400f6 },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+};
+
+struct power_pmu power7_pmu = {
+       .n_counter = 6,
+       .max_alternatives = MAX_ALT + 1,
+       .add_fields = 0x1555ull,
+       .test_adder = 0x3000ull,
+       .compute_mmcr = power7_compute_mmcr,
+       .get_constraint = power7_get_constraint,
+       .get_alternatives = power7_get_alternatives,
+       .disable_pmc = power7_disable_pmc,
+       .n_generic = ARRAY_SIZE(power7_generic_events),
+       .generic_events = power7_generic_events,
+       .cache_events = &power7_cache_events,
+};
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
new file mode 100644 (file)
index 0000000..ba0a357
--- /dev/null
@@ -0,0 +1,482 @@
+/*
+ * Performance counter support for PPC970-family processors.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/string.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for PPC970
+ */
+#define PM_PMC_SH      12      /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK     0xf
+#define PM_UNIT_SH     8       /* TTMMUX number and setting - unit select */
+#define PM_UNIT_MSK    0xf
+#define PM_SPCSEL_SH   6
+#define PM_SPCSEL_MSK  3
+#define PM_BYTE_SH     4       /* Byte number of event bus to use */
+#define PM_BYTE_MSK    3
+#define PM_PMCSEL_MSK  0xf
+
+/* Values in PM_UNIT field */
+#define PM_NONE                0
+#define PM_FPU         1
+#define PM_VPU         2
+#define PM_ISU         3
+#define PM_IFU         4
+#define PM_IDU         5
+#define PM_STS         6
+#define PM_LSU0                7
+#define PM_LSU1U       8
+#define PM_LSU1L       9
+#define PM_LASTUNIT    9
+
+/*
+ * Bits in MMCR0 for PPC970
+ */
+#define MMCR0_PMC1SEL_SH       8
+#define MMCR0_PMC2SEL_SH       1
+#define MMCR_PMCSEL_MSK                0x1f
+
+/*
+ * Bits in MMCR1 for PPC970
+ */
+#define MMCR1_TTM0SEL_SH       62
+#define MMCR1_TTM1SEL_SH       59
+#define MMCR1_TTM3SEL_SH       53
+#define MMCR1_TTMSEL_MSK       3
+#define MMCR1_TD_CP_DBG0SEL_SH 50
+#define MMCR1_TD_CP_DBG1SEL_SH 48
+#define MMCR1_TD_CP_DBG2SEL_SH 46
+#define MMCR1_TD_CP_DBG3SEL_SH 44
+#define MMCR1_PMC1_ADDER_SEL_SH        39
+#define MMCR1_PMC2_ADDER_SEL_SH        38
+#define MMCR1_PMC6_ADDER_SEL_SH        37
+#define MMCR1_PMC5_ADDER_SEL_SH        36
+#define MMCR1_PMC8_ADDER_SEL_SH        35
+#define MMCR1_PMC7_ADDER_SEL_SH        34
+#define MMCR1_PMC3_ADDER_SEL_SH        33
+#define MMCR1_PMC4_ADDER_SEL_SH        32
+#define MMCR1_PMC3SEL_SH       27
+#define MMCR1_PMC4SEL_SH       22
+#define MMCR1_PMC5SEL_SH       17
+#define MMCR1_PMC6SEL_SH       12
+#define MMCR1_PMC7SEL_SH       7
+#define MMCR1_PMC8SEL_SH       2
+
+static short mmcr1_adder_bits[8] = {
+       MMCR1_PMC1_ADDER_SEL_SH,
+       MMCR1_PMC2_ADDER_SEL_SH,
+       MMCR1_PMC3_ADDER_SEL_SH,
+       MMCR1_PMC4_ADDER_SEL_SH,
+       MMCR1_PMC5_ADDER_SEL_SH,
+       MMCR1_PMC6_ADDER_SEL_SH,
+       MMCR1_PMC7_ADDER_SEL_SH,
+       MMCR1_PMC8_ADDER_SEL_SH
+};
+
+/*
+ * Bits in MMCRA
+ */
+
+/*
+ * Layout of constraint bits:
+ * 6666555555555544444444443333333333222222222211111111110000000000
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ *               <><><>[  >[  >[  ><  ><  ><  ><  ><><><><><><><><>
+ *               SPT0T1 UC  PS1 PS2 B0  B1  B2  B3 P1P2P3P4P5P6P7P8
+ *
+ * SP - SPCSEL constraint
+ *     48-49: SPCSEL value 0x3_0000_0000_0000
+ *
+ * T0 - TTM0 constraint
+ *     46-47: TTM0SEL value (0=FPU, 2=IFU, 3=VPU) 0xC000_0000_0000
+ *
+ * T1 - TTM1 constraint
+ *     44-45: TTM1SEL value (0=IDU, 3=STS) 0x3000_0000_0000
+ *
+ * UC - unit constraint: can't have all three of FPU|IFU|VPU, ISU, IDU|STS
+ *     43: UC3 error 0x0800_0000_0000
+ *     42: FPU|IFU|VPU events needed 0x0400_0000_0000
+ *     41: ISU events needed 0x0200_0000_0000
+ *     40: IDU|STS events needed 0x0100_0000_0000
+ *
+ * PS1
+ *     39: PS1 error 0x0080_0000_0000
+ *     36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000
+ *
+ * PS2
+ *     35: PS2 error 0x0008_0000_0000
+ *     32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000
+ *
+ * B0
+ *     28-31: Byte 0 event source 0xf000_0000
+ *           Encoding as for the event code
+ *
+ * B1, B2, B3
+ *     24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
+ *
+ * P1
+ *     15: P1 error 0x8000
+ *     14-15: Count of events needing PMC1
+ *
+ * P2..P8
+ *     0-13: Count of events needing PMC2..PMC8
+ */
+
+static unsigned char direct_marked_event[8] = {
+       (1<<2) | (1<<3),        /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */
+       (1<<3) | (1<<5),        /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */
+       (1<<3) | (1<<5),        /* PMC3: PM_MRK_ST_CMPL_INT, PM_MRK_VMX_FIN */
+       (1<<4) | (1<<5),        /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */
+       (1<<4) | (1<<5),        /* PMC5: PM_GRP_MRK, PM_MRK_GRP_TIMEO */
+       (1<<3) | (1<<4) | (1<<5),
+               /* PMC6: PM_MRK_ST_STS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */
+       (1<<4) | (1<<5),        /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */
+       (1<<4)                  /* PMC8: PM_MRK_LSU_FIN */
+};
+
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int p970_marked_instr_event(u64 event)
+{
+       int pmc, psel, unit, byte, bit;
+       unsigned int mask;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       psel = event & PM_PMCSEL_MSK;
+       if (pmc) {
+               if (direct_marked_event[pmc - 1] & (1 << psel))
+                       return 1;
+               if (psel == 0)          /* add events */
+                       bit = (pmc <= 4)? pmc - 1: 8 - pmc;
+               else if (psel == 7 || psel == 13)       /* decode events */
+                       bit = 4;
+               else
+                       return 0;
+       } else
+               bit = psel;
+
+       byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+       unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+       mask = 0;
+       switch (unit) {
+       case PM_VPU:
+               mask = 0x4c;            /* byte 0 bits 2,3,6 */
+       case PM_LSU0:
+               /* byte 2 bits 0,2,3,4,6; all of byte 1 */
+               mask = 0x085dff00;
+       case PM_LSU1L:
+               mask = 0x50 << 24;      /* byte 3 bits 4,6 */
+               break;
+       }
+       return (mask >> (byte * 8 + bit)) & 1;
+}
+
+/* Masks and values for using events from the various units */
+static u64 unit_cons[PM_LASTUNIT+1][2] = {
+       [PM_FPU] =   { 0xc80000000000ull, 0x040000000000ull },
+       [PM_VPU] =   { 0xc80000000000ull, 0xc40000000000ull },
+       [PM_ISU] =   { 0x080000000000ull, 0x020000000000ull },
+       [PM_IFU] =   { 0xc80000000000ull, 0x840000000000ull },
+       [PM_IDU] =   { 0x380000000000ull, 0x010000000000ull },
+       [PM_STS] =   { 0x380000000000ull, 0x310000000000ull },
+};
+
+static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+       int pmc, byte, unit, sh, spcsel;
+       u64 mask = 0, value = 0;
+       int grp = -1;
+
+       pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+       if (pmc) {
+               if (pmc > 8)
+                       return -1;
+               sh = (pmc - 1) * 2;
+               mask |= 2 << sh;
+               value |= 1 << sh;
+               grp = ((pmc - 1) >> 1) & 1;
+       }
+       unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+       if (unit) {
+               if (unit > PM_LASTUNIT)
+                       return -1;
+               mask |= unit_cons[unit][0];
+               value |= unit_cons[unit][1];
+               byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+               /*
+                * Bus events on bytes 0 and 2 can be counted
+                * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8.
+                */
+               if (!pmc)
+                       grp = byte & 1;
+               /* Set byte lane select field */
+               mask  |= 0xfULL << (28 - 4 * byte);
+               value |= (u64)unit << (28 - 4 * byte);
+       }
+       if (grp == 0) {
+               /* increment PMC1/2/5/6 field */
+               mask  |= 0x8000000000ull;
+               value |= 0x1000000000ull;
+       } else if (grp == 1) {
+               /* increment PMC3/4/7/8 field */
+               mask  |= 0x800000000ull;
+               value |= 0x100000000ull;
+       }
+       spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
+       if (spcsel) {
+               mask  |= 3ull << 48;
+               value |= (u64)spcsel << 48;
+       }
+       *maskp = mask;
+       *valp = value;
+       return 0;
+}
+
+static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+       alt[0] = event;
+
+       /* 2 alternatives for LSU empty */
+       if (event == 0x2002 || event == 0x3002) {
+               alt[1] = event ^ 0x1000;
+               return 2;
+       }
+               
+       return 1;
+}
+
+static int p970_compute_mmcr(u64 event[], int n_ev,
+                            unsigned int hwc[], u64 mmcr[])
+{
+       u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0;
+       unsigned int pmc, unit, byte, psel;
+       unsigned int ttm, grp;
+       unsigned int pmc_inuse = 0;
+       unsigned int pmc_grp_use[2];
+       unsigned char busbyte[4];
+       unsigned char unituse[16];
+       unsigned char unitmap[] = { 0, 0<<3, 3<<3, 1<<3, 2<<3, 0|4, 3|4 };
+       unsigned char ttmuse[2];
+       unsigned char pmcsel[8];
+       int i;
+       int spcsel;
+
+       if (n_ev > 8)
+               return -1;
+
+       /* First pass to count resource use */
+       pmc_grp_use[0] = pmc_grp_use[1] = 0;
+       memset(busbyte, 0, sizeof(busbyte));
+       memset(unituse, 0, sizeof(unituse));
+       for (i = 0; i < n_ev; ++i) {
+               pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+               if (pmc) {
+                       if (pmc_inuse & (1 << (pmc - 1)))
+                               return -1;
+                       pmc_inuse |= 1 << (pmc - 1);
+                       /* count 1/2/5/6 vs 3/4/7/8 use */
+                       ++pmc_grp_use[((pmc - 1) >> 1) & 1];
+               }
+               unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+               byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+               if (unit) {
+                       if (unit > PM_LASTUNIT)
+                               return -1;
+                       if (!pmc)
+                               ++pmc_grp_use[byte & 1];
+                       if (busbyte[byte] && busbyte[byte] != unit)
+                               return -1;
+                       busbyte[byte] = unit;
+                       unituse[unit] = 1;
+               }
+       }
+       if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4)
+               return -1;
+
+       /*
+        * Assign resources and set multiplexer selects.
+        *
+        * PM_ISU can go either on TTM0 or TTM1, but that's the only
+        * choice we have to deal with.
+        */
+       if (unituse[PM_ISU] &
+           (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_VPU]))
+               unitmap[PM_ISU] = 2 | 4;        /* move ISU to TTM1 */
+       /* Set TTM[01]SEL fields. */
+       ttmuse[0] = ttmuse[1] = 0;
+       for (i = PM_FPU; i <= PM_STS; ++i) {
+               if (!unituse[i])
+                       continue;
+               ttm = unitmap[i];
+               ++ttmuse[(ttm >> 2) & 1];
+               mmcr1 |= (u64)(ttm & ~4) << MMCR1_TTM1SEL_SH;
+       }
+       /* Check only one unit per TTMx */
+       if (ttmuse[0] > 1 || ttmuse[1] > 1)
+               return -1;
+
+       /* Set byte lane select fields and TTM3SEL. */
+       for (byte = 0; byte < 4; ++byte) {
+               unit = busbyte[byte];
+               if (!unit)
+                       continue;
+               if (unit <= PM_STS)
+                       ttm = (unitmap[unit] >> 2) & 1;
+               else if (unit == PM_LSU0)
+                       ttm = 2;
+               else {
+                       ttm = 3;
+                       if (unit == PM_LSU1L && byte >= 2)
+                               mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+               }
+               mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+       }
+
+       /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
+       memset(pmcsel, 0x8, sizeof(pmcsel));    /* 8 means don't count */
+       for (i = 0; i < n_ev; ++i) {
+               pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+               unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+               byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+               psel = event[i] & PM_PMCSEL_MSK;
+               if (!pmc) {
+                       /* Bus event or any-PMC direct event */
+                       if (unit)
+                               psel |= 0x10 | ((byte & 2) << 2);
+                       else
+                               psel |= 8;
+                       for (pmc = 0; pmc < 8; ++pmc) {
+                               if (pmc_inuse & (1 << pmc))
+                                       continue;
+                               grp = (pmc >> 1) & 1;
+                               if (unit) {
+                                       if (grp == (byte & 1))
+                                               break;
+                               } else if (pmc_grp_use[grp] < 4) {
+                                       ++pmc_grp_use[grp];
+                                       break;
+                               }
+                       }
+                       pmc_inuse |= 1 << pmc;
+               } else {
+                       /* Direct event */
+                       --pmc;
+                       if (psel == 0 && (byte & 2))
+                               /* add events on higher-numbered bus */
+                               mmcr1 |= 1ull << mmcr1_adder_bits[pmc];
+               }
+               pmcsel[pmc] = psel;
+               hwc[i] = pmc;
+               spcsel = (event[i] >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
+               mmcr1 |= spcsel;
+               if (p970_marked_instr_event(event[i]))
+                       mmcra |= MMCRA_SAMPLE_ENABLE;
+       }
+       for (pmc = 0; pmc < 2; ++pmc)
+               mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc);
+       for (; pmc < 8; ++pmc)
+               mmcr1 |= (u64)pmcsel[pmc] << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
+       if (pmc_inuse & 1)
+               mmcr0 |= MMCR0_PMC1CE;
+       if (pmc_inuse & 0xfe)
+               mmcr0 |= MMCR0_PMCjCE;
+
+       mmcra |= 0x2000;        /* mark only one IOP per PPC instruction */
+
+       /* Return MMCRx values */
+       mmcr[0] = mmcr0;
+       mmcr[1] = mmcr1;
+       mmcr[2] = mmcra;
+       return 0;
+}
+
+static void p970_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+       int shift, i;
+
+       if (pmc <= 1) {
+               shift = MMCR0_PMC1SEL_SH - 7 * pmc;
+               i = 0;
+       } else {
+               shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2);
+               i = 1;
+       }
+       /*
+        * Setting the PMCxSEL field to 0x08 disables PMC x.
+        */
+       mmcr[i] = (mmcr[i] & ~(0x1fUL << shift)) | (0x08UL << shift);
+}
+
+static int ppc970_generic_events[] = {
+       [PERF_COUNT_HW_CPU_CYCLES]              = 7,
+       [PERF_COUNT_HW_INSTRUCTIONS]            = 1,
+       [PERF_COUNT_HW_CACHE_REFERENCES]        = 0x8810, /* PM_LD_REF_L1 */
+       [PERF_COUNT_HW_CACHE_MISSES]            = 0x3810, /* PM_LD_MISS_L1 */
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]     = 0x431,  /* PM_BR_ISSUED */
+       [PERF_COUNT_HW_BRANCH_MISSES]           = 0x327,  /* PM_GRP_BR_MPRED */
+};
+
+#define C(x)   PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+       [C(L1D)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x8810,         0x3810  },
+               [C(OP_WRITE)] = {       0x7810,         0x813   },
+               [C(OP_PREFETCH)] = {    0x731,          0       },
+       },
+       [C(L1I)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0       },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    0,              0       },
+       },
+       [C(LL)] = {             /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0       },
+               [C(OP_WRITE)] = {       0,              0       },
+               [C(OP_PREFETCH)] = {    0x733,          0       },
+       },
+       [C(DTLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x704   },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+       [C(ITLB)] = {           /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0,              0x700   },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+       [C(BPU)] = {            /*      RESULT_ACCESS   RESULT_MISS */
+               [C(OP_READ)] = {        0x431,          0x327   },
+               [C(OP_WRITE)] = {       -1,             -1      },
+               [C(OP_PREFETCH)] = {    -1,             -1      },
+       },
+};
+
+struct power_pmu ppc970_pmu = {
+       .n_counter = 8,
+       .max_alternatives = 2,
+       .add_fields = 0x001100005555ull,
+       .test_adder = 0x013300000000ull,
+       .compute_mmcr = p970_compute_mmcr,
+       .get_constraint = p970_get_constraint,
+       .get_alternatives = p970_get_alternatives,
+       .disable_pmc = p970_disable_pmc,
+       .n_generic = ARRAY_SIZE(ppc970_generic_events),
+       .generic_events = ppc970_generic_events,
+       .cache_events = &ppc970_cache_events,
+};
index 7699394..5beffc8 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
+#include <linux/perf_counter.h>
 
 #include <asm/firmware.h>
 #include <asm/page.h>
@@ -170,6 +171,8 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
                die("Weird page fault", regs, SIGSEGV);
        }
 
+       perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
         * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -309,6 +312,8 @@ good_area:
        }
        if (ret & VM_FAULT_MAJOR) {
                current->maj_flt++;
+               perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0,
+                                    regs, address);
 #ifdef CONFIG_PPC_SMLPAR
                if (firmware_has_feature(FW_FEATURE_CMO)) {
                        preempt_disable();
@@ -316,8 +321,11 @@ good_area:
                        preempt_enable();
                }
 #endif
-       } else
+       } else {
                current->min_flt++;
+               perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0,
+                                    regs, address);
+       }
        up_read(&mm->mmap_sem);
        return 0;
 
index 9da795e..732ee93 100644 (file)
@@ -1,6 +1,7 @@
 config PPC64
        bool "64-bit kernel"
        default n
+       select HAVE_PERF_COUNTERS
        help
          This option selects whether a 32-bit or a 64-bit kernel
          will be built.
index aafae3b..68f5578 100644 (file)
@@ -739,6 +739,7 @@ config X86_UP_IOAPIC
 config X86_LOCAL_APIC
        def_bool y
        depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
+       select HAVE_PERF_COUNTERS if (!M386 && !M486)
 
 config X86_IO_APIC
        def_bool y
index dcef387..e590261 100644 (file)
@@ -825,10 +825,11 @@ ia32_sys_call_table:
        .quad compat_sys_signalfd4
        .quad sys_eventfd2
        .quad sys_epoll_create1
-       .quad sys_dup3                  /* 330 */
+       .quad sys_dup3                          /* 330 */
        .quad sys_pipe2
        .quad sys_inotify_init1
        .quad compat_sys_preadv
        .quad compat_sys_pwritev
        .quad compat_sys_rt_tgsigqueueinfo      /* 335 */
+       .quad sys_perf_counter_open
 ia32_syscall_end:
index 85b46fb..aff9f1f 100644 (file)
@@ -247,5 +247,241 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+/* An 64bit atomic type */
+
+typedef struct {
+       unsigned long long counter;
+} atomic64_t;
+
+#define ATOMIC64_INIT(val)     { (val) }
+
+/**
+ * atomic64_read - read atomic64 variable
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically reads the value of @v.
+ * Doesn't imply a read memory barrier.
+ */
+#define __atomic64_read(ptr)           ((ptr)->counter)
+
+static inline unsigned long long
+cmpxchg8b(unsigned long long *ptr, unsigned long long old, unsigned long long new)
+{
+       asm volatile(
+
+               LOCK_PREFIX "cmpxchg8b (%[ptr])\n"
+
+                    :          "=A" (old)
+
+                    : [ptr]    "D" (ptr),
+                               "A" (old),
+                               "b" (ll_low(new)),
+                               "c" (ll_high(new))
+
+                    : "memory");
+
+       return old;
+}
+
+static inline unsigned long long
+atomic64_cmpxchg(atomic64_t *ptr, unsigned long long old_val,
+                unsigned long long new_val)
+{
+       return cmpxchg8b(&ptr->counter, old_val, new_val);
+}
+
+/**
+ * atomic64_xchg - xchg atomic64 variable
+ * @ptr:      pointer to type atomic64_t
+ * @new_val:  value to assign
+ * @old_val:  old value that was there
+ *
+ * Atomically xchgs the value of @ptr to @new_val and returns
+ * the old value.
+ */
+
+static inline unsigned long long
+atomic64_xchg(atomic64_t *ptr, unsigned long long new_val)
+{
+       unsigned long long old_val;
+
+       do {
+               old_val = atomic_read(ptr);
+       } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val);
+
+       return old_val;
+}
+
+/**
+ * atomic64_set - set atomic64 variable
+ * @ptr:      pointer to type atomic64_t
+ * @new_val:  value to assign
+ *
+ * Atomically sets the value of @ptr to @new_val.
+ */
+static inline void atomic64_set(atomic64_t *ptr, unsigned long long new_val)
+{
+       atomic64_xchg(ptr, new_val);
+}
+
+/**
+ * atomic64_read - read atomic64 variable
+ * @ptr:      pointer to type atomic64_t
+ *
+ * Atomically reads the value of @ptr and returns it.
+ */
+static inline unsigned long long atomic64_read(atomic64_t *ptr)
+{
+       unsigned long long curr_val;
+
+       do {
+               curr_val = __atomic64_read(ptr);
+       } while (atomic64_cmpxchg(ptr, curr_val, curr_val) != curr_val);
+
+       return curr_val;
+}
+
+/**
+ * atomic64_add_return - add and return
+ * @delta: integer value to add
+ * @ptr:   pointer to type atomic64_t
+ *
+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
+ */
+static inline unsigned long long
+atomic64_add_return(unsigned long long delta, atomic64_t *ptr)
+{
+       unsigned long long old_val, new_val;
+
+       do {
+               old_val = atomic_read(ptr);
+               new_val = old_val + delta;
+
+       } while (atomic64_cmpxchg(ptr, old_val, new_val) != old_val);
+
+       return new_val;
+}
+
+static inline long atomic64_sub_return(unsigned long long delta, atomic64_t *ptr)
+{
+       return atomic64_add_return(-delta, ptr);
+}
+
+static inline long atomic64_inc_return(atomic64_t *ptr)
+{
+       return atomic64_add_return(1, ptr);
+}
+
+static inline long atomic64_dec_return(atomic64_t *ptr)
+{
+       return atomic64_sub_return(1, ptr);
+}
+
+/**
+ * atomic64_add - add integer to atomic64 variable
+ * @delta: integer value to add
+ * @ptr:   pointer to type atomic64_t
+ *
+ * Atomically adds @delta to @ptr.
+ */
+static inline void atomic64_add(unsigned long long delta, atomic64_t *ptr)
+{
+       atomic64_add_return(delta, ptr);
+}
+
+/**
+ * atomic64_sub - subtract the atomic64 variable
+ * @delta: integer value to subtract
+ * @ptr:   pointer to type atomic64_t
+ *
+ * Atomically subtracts @delta from @ptr.
+ */
+static inline void atomic64_sub(unsigned long long delta, atomic64_t *ptr)
+{
+       atomic64_add(-delta, ptr);
+}
+
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @delta: integer value to subtract
+ * @ptr:   pointer to type atomic64_t
+ *
+ * Atomically subtracts @delta from @ptr and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int
+atomic64_sub_and_test(unsigned long long delta, atomic64_t *ptr)
+{
+       unsigned long long old_val = atomic64_sub_return(delta, ptr);
+
+       return old_val == 0;
+}
+
+/**
+ * atomic64_inc - increment atomic64 variable
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically increments @ptr by 1.
+ */
+static inline void atomic64_inc(atomic64_t *ptr)
+{
+       atomic64_add(1, ptr);
+}
+
+/**
+ * atomic64_dec - decrement atomic64 variable
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically decrements @ptr by 1.
+ */
+static inline void atomic64_dec(atomic64_t *ptr)
+{
+       atomic64_sub(1, ptr);
+}
+
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically decrements @ptr by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline int atomic64_dec_and_test(atomic64_t *ptr)
+{
+       return atomic64_sub_and_test(1, ptr);
+}
+
+/**
+ * atomic64_inc_and_test - increment and test
+ * @ptr: pointer to type atomic64_t
+ *
+ * Atomically increments @ptr by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+static inline int atomic64_inc_and_test(atomic64_t *ptr)
+{
+       return atomic64_sub_and_test(-1, ptr);
+}
+
+/**
+ * atomic64_add_negative - add and test if negative
+ * @delta: integer value to add
+ * @ptr:   pointer to type atomic64_t
+ *
+ * Atomically adds @delta to @ptr and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+static inline int
+atomic64_add_negative(unsigned long long delta, atomic64_t *ptr)
+{
+       long long old_val = atomic64_add_return(delta, ptr);
+
+       return old_val < 0;
+}
+
 #include <asm-generic/atomic.h>
 #endif /* _ASM_X86_ATOMIC_32_H */
index c2e6bed..d750a10 100644 (file)
@@ -49,7 +49,7 @@ BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
 BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
 
 #ifdef CONFIG_PERF_COUNTERS
-BUILD_INTERRUPT(perf_counter_interrupt, LOCAL_PERF_VECTOR)
+BUILD_INTERRUPT(perf_pending_interrupt, LOCAL_PENDING_VECTOR)
 #endif
 
 #ifdef CONFIG_X86_MCE_P4THERMAL
index 37555e5..9ebc5c2 100644 (file)
@@ -13,6 +13,8 @@ typedef struct {
        unsigned int irq_spurious_count;
 #endif
        unsigned int generic_irqs;      /* arch dependent */
+       unsigned int apic_perf_irqs;
+       unsigned int apic_pending_irqs;
 #ifdef CONFIG_SMP
        unsigned int irq_resched_count;
        unsigned int irq_call_count;
index 3bd1777..6df45f6 100644 (file)
@@ -29,6 +29,8 @@
 extern void apic_timer_interrupt(void);
 extern void generic_interrupt(void);
 extern void error_interrupt(void);
+extern void perf_pending_interrupt(void);
+
 extern void spurious_interrupt(void);
 extern void thermal_interrupt(void);
 extern void reschedule_interrupt(void);
diff --git a/arch/x86/include/asm/intel_arch_perfmon.h b/arch/x86/include/asm/intel_arch_perfmon.h
deleted file mode 100644 (file)
index fa0fd06..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-#ifndef _ASM_X86_INTEL_ARCH_PERFMON_H
-#define _ASM_X86_INTEL_ARCH_PERFMON_H
-
-#define MSR_ARCH_PERFMON_PERFCTR0              0xc1
-#define MSR_ARCH_PERFMON_PERFCTR1              0xc2
-
-#define MSR_ARCH_PERFMON_EVENTSEL0             0x186
-#define MSR_ARCH_PERFMON_EVENTSEL1             0x187
-
-#define ARCH_PERFMON_EVENTSEL0_ENABLE  (1 << 22)
-#define ARCH_PERFMON_EVENTSEL_INT      (1 << 20)
-#define ARCH_PERFMON_EVENTSEL_OS       (1 << 17)
-#define ARCH_PERFMON_EVENTSEL_USR      (1 << 16)
-
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL  (0x3c)
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK        (0x00 << 8)
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX (0)
-#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
-       (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
-
-union cpuid10_eax {
-       struct {
-               unsigned int version_id:8;
-               unsigned int num_counters:8;
-               unsigned int bit_width:8;
-               unsigned int mask_length:8;
-       } split;
-       unsigned int full;
-};
-
-#endif /* _ASM_X86_INTEL_ARCH_PERFMON_H */
index 910b5a3..e997be9 100644 (file)
 #define LOCAL_TIMER_VECTOR             0xef
 
 /*
- * Performance monitoring interrupt vector:
+ * Generic system vector for platform specific use
  */
-#define LOCAL_PERF_VECTOR              0xee
+#define GENERIC_INTERRUPT_VECTOR       0xed
 
 /*
- * Generic system vector for platform specific use
+ * Performance monitoring pending work vector:
  */
-#define GENERIC_INTERRUPT_VECTOR       0xed
+#define LOCAL_PENDING_VECTOR           0xec
 
 /*
  * First APIC vector available to drivers: (vectors 0x30-0xee) we
diff --git a/arch/x86/include/asm/perf_counter.h b/arch/x86/include/asm/perf_counter.h
new file mode 100644 (file)
index 0000000..876ed97
--- /dev/null
@@ -0,0 +1,100 @@
+#ifndef _ASM_X86_PERF_COUNTER_H
+#define _ASM_X86_PERF_COUNTER_H
+
+/*
+ * Performance counter hw details:
+ */
+
+#define X86_PMC_MAX_GENERIC                                    8
+#define X86_PMC_MAX_FIXED                                      3
+
+#define X86_PMC_IDX_GENERIC                                    0
+#define X86_PMC_IDX_FIXED                                     32
+#define X86_PMC_IDX_MAX                                               64
+
+#define MSR_ARCH_PERFMON_PERFCTR0                            0xc1
+#define MSR_ARCH_PERFMON_PERFCTR1                            0xc2
+
+#define MSR_ARCH_PERFMON_EVENTSEL0                          0x186
+#define MSR_ARCH_PERFMON_EVENTSEL1                          0x187
+
+#define ARCH_PERFMON_EVENTSEL0_ENABLE                    (1 << 22)
+#define ARCH_PERFMON_EVENTSEL_INT                        (1 << 20)
+#define ARCH_PERFMON_EVENTSEL_OS                         (1 << 17)
+#define ARCH_PERFMON_EVENTSEL_USR                        (1 << 16)
+
+/*
+ * Includes eventsel and unit mask as well:
+ */
+#define ARCH_PERFMON_EVENT_MASK                                    0xffff
+
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL                0x3c
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK                (0x00 << 8)
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX                 0
+#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
+               (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
+
+#define ARCH_PERFMON_BRANCH_MISSES_RETIRED                      6
+
+/*
+ * Intel "Architectural Performance Monitoring" CPUID
+ * detection/enumeration details:
+ */
+union cpuid10_eax {
+       struct {
+               unsigned int version_id:8;
+               unsigned int num_counters:8;
+               unsigned int bit_width:8;
+               unsigned int mask_length:8;
+       } split;
+       unsigned int full;
+};
+
+union cpuid10_edx {
+       struct {
+               unsigned int num_counters_fixed:4;
+               unsigned int reserved:28;
+       } split;
+       unsigned int full;
+};
+
+
+/*
+ * Fixed-purpose performance counters:
+ */
+
+/*
+ * All 3 fixed-mode PMCs are configured via this single MSR:
+ */
+#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL                        0x38d
+
+/*
+ * The counts are available in three separate MSRs:
+ */
+
+/* Instr_Retired.Any: */
+#define MSR_ARCH_PERFMON_FIXED_CTR0                    0x309
+#define X86_PMC_IDX_FIXED_INSTRUCTIONS                 (X86_PMC_IDX_FIXED + 0)
+
+/* CPU_CLK_Unhalted.Core: */
+#define MSR_ARCH_PERFMON_FIXED_CTR1                    0x30a
+#define X86_PMC_IDX_FIXED_CPU_CYCLES                   (X86_PMC_IDX_FIXED + 1)
+
+/* CPU_CLK_Unhalted.Ref: */
+#define MSR_ARCH_PERFMON_FIXED_CTR2                    0x30b
+#define X86_PMC_IDX_FIXED_BUS_CYCLES                   (X86_PMC_IDX_FIXED + 2)
+
+extern void set_perf_counter_pending(void);
+
+#define clear_perf_counter_pending()   do { } while (0)
+#define test_perf_counter_pending()    (0)
+
+#ifdef CONFIG_PERF_COUNTERS
+extern void init_hw_perf_counters(void);
+extern void perf_counters_lapic_init(void);
+#else
+static inline void init_hw_perf_counters(void)         { }
+static inline void perf_counters_lapic_init(void)      { }
+#endif
+
+#endif /* _ASM_X86_PERF_COUNTER_H */
index 708dae6..732a307 100644 (file)
 #define __NR_preadv            333
 #define __NR_pwritev           334
 #define __NR_rt_tgsigqueueinfo 335
+#define __NR_perf_counter_open 336
 
 #ifdef __KERNEL__
 
index 4e2b054..900e161 100644 (file)
@@ -659,7 +659,8 @@ __SYSCALL(__NR_preadv, sys_preadv)
 __SYSCALL(__NR_pwritev, sys_pwritev)
 #define __NR_rt_tgsigqueueinfo                 297
 __SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
-
+#define __NR_perf_counter_open                 298
+__SYSCALL(__NR_perf_counter_open, sys_perf_counter_open)
 
 #ifndef __NO_STUBS
 #define __ARCH_WANT_OLD_READDIR
index a4c9cf0..076d388 100644 (file)
@@ -14,6 +14,7 @@
  *     Mikael Pettersson       :       PM converted to driver model.
  */
 
+#include <linux/perf_counter.h>
 #include <linux/kernel_stat.h>
 #include <linux/mc146818rtc.h>
 #include <linux/acpi_pmtmr.h>
@@ -34,6 +35,7 @@
 #include <linux/smp.h>
 #include <linux/mm.h>
 
+#include <asm/perf_counter.h>
 #include <asm/pgalloc.h>
 #include <asm/atomic.h>
 #include <asm/mpspec.h>
@@ -1187,6 +1189,7 @@ void __cpuinit setup_local_APIC(void)
                apic_write(APIC_ESR, 0);
        }
 #endif
+       perf_counters_lapic_init();
 
        preempt_disable();
 
index 4e242f9..3efcb2b 100644 (file)
@@ -1,5 +1,5 @@
 #
-# Makefile for x86-compatible CPU details and quirks
+# Makefile for x86-compatible CPU details, features and quirks
 #
 
 # Don't trace early stages of a secondary CPU boot
@@ -23,11 +23,13 @@ obj-$(CONFIG_CPU_SUP_CENTAUR)               += centaur.o
 obj-$(CONFIG_CPU_SUP_TRANSMETA_32)     += transmeta.o
 obj-$(CONFIG_CPU_SUP_UMC_32)           += umc.o
 
-obj-$(CONFIG_X86_MCE)  += mcheck/
-obj-$(CONFIG_MTRR)     += mtrr/
-obj-$(CONFIG_CPU_FREQ) += cpufreq/
+obj-$(CONFIG_PERF_COUNTERS)            += perf_counter.o
 
-obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o
+obj-$(CONFIG_X86_MCE)                  += mcheck/
+obj-$(CONFIG_MTRR)                     += mtrr/
+obj-$(CONFIG_CPU_FREQ)                 += cpufreq/
+
+obj-$(CONFIG_X86_LOCAL_APIC)           += perfctr-watchdog.o
 
 quiet_cmd_mkcapflags = MKCAP   $@
       cmd_mkcapflags = $(PERL) $(srctree)/$(src)/mkcapflags.pl $< $@
index b0517aa..3ffdcfa 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/io.h>
 
 #include <asm/stackprotector.h>
+#include <asm/perf_counter.h>
 #include <asm/mmu_context.h>
 #include <asm/hypervisor.h>
 #include <asm/processor.h>
@@ -874,6 +875,7 @@ void __init identify_boot_cpu(void)
 #else
        vgetcpu_set_mode();
 #endif
+       init_hw_perf_counters();
 }
 
 void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
new file mode 100644 (file)
index 0000000..895c82e
--- /dev/null
@@ -0,0 +1,1704 @@
+/*
+ * Performance counter x86 architecture code
+ *
+ *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
+ *  Copyright (C) 2009 Jaswinder Singh Rajput
+ *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
+ *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
+ *
+ *  For licencing details see kernel-base/COPYING
+ */
+
+#include <linux/perf_counter.h>
+#include <linux/capability.h>
+#include <linux/notifier.h>
+#include <linux/hardirq.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/kdebug.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+
+#include <asm/apic.h>
+#include <asm/stacktrace.h>
+#include <asm/nmi.h>
+
+static u64 perf_counter_mask __read_mostly;
+
+struct cpu_hw_counters {
+       struct perf_counter     *counters[X86_PMC_IDX_MAX];
+       unsigned long           used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+       unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+       unsigned long           interrupts;
+       int                     enabled;
+};
+
+/*
+ * struct x86_pmu - generic x86 pmu
+ */
+struct x86_pmu {
+       const char      *name;
+       int             version;
+       int             (*handle_irq)(struct pt_regs *);
+       void            (*disable_all)(void);
+       void            (*enable_all)(void);
+       void            (*enable)(struct hw_perf_counter *, int);
+       void            (*disable)(struct hw_perf_counter *, int);
+       unsigned        eventsel;
+       unsigned        perfctr;
+       u64             (*event_map)(int);
+       u64             (*raw_event)(u64);
+       int             max_events;
+       int             num_counters;
+       int             num_counters_fixed;
+       int             counter_bits;
+       u64             counter_mask;
+       u64             max_period;
+       u64             intel_ctrl;
+};
+
+static struct x86_pmu x86_pmu __read_mostly;
+
+static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
+       .enabled = 1,
+};
+
+/*
+ * Intel PerfMon v3. Used on Core2 and later.
+ */
+static const u64 intel_perfmon_event_map[] =
+{
+  [PERF_COUNT_HW_CPU_CYCLES]           = 0x003c,
+  [PERF_COUNT_HW_INSTRUCTIONS]         = 0x00c0,
+  [PERF_COUNT_HW_CACHE_REFERENCES]     = 0x4f2e,
+  [PERF_COUNT_HW_CACHE_MISSES]         = 0x412e,
+  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]  = 0x00c4,
+  [PERF_COUNT_HW_BRANCH_MISSES]                = 0x00c5,
+  [PERF_COUNT_HW_BUS_CYCLES]           = 0x013c,
+};
+
+static u64 intel_pmu_event_map(int event)
+{
+       return intel_perfmon_event_map[event];
+}
+
+/*
+ * Generalized hw caching related event table, filled
+ * in on a per model basis. A value of 0 means
+ * 'not supported', -1 means 'event makes no sense on
+ * this CPU', any other value means the raw event
+ * ID.
+ */
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+static u64 __read_mostly hw_cache_event_ids
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX];
+
+static const u64 nehalem_hw_cache_event_ids
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
+               [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
+               [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
+               [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
+       },
+ },
+ [ C(L1I ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
+               [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0,
+               [ C(RESULT_MISS)   ] = 0x0,
+       },
+ },
+ [ C(LL  ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
+               [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
+               [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
+               [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
+       },
+ },
+ [ C(DTLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
+               [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
+               [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0,
+               [ C(RESULT_MISS)   ] = 0x0,
+       },
+ },
+ [ C(ITLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
+               [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
+ [ C(BPU ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
+               [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
+};
+
+static const u64 core2_hw_cache_event_ids
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
+               [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
+               [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(L1I ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
+               [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(LL  ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
+               [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
+               [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(DTLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
+               [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
+               [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(ITLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
+               [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
+ [ C(BPU ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
+               [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
+};
+
+static const u64 atom_hw_cache_event_ids
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
+               [ C(RESULT_MISS)   ] = 0,
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
+               [ C(RESULT_MISS)   ] = 0,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(L1I ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
+               [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(LL  ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
+               [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
+               [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(DTLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
+               [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
+               [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(ITLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
+               [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
+ [ C(BPU ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
+               [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
+};
+
+static u64 intel_pmu_raw_event(u64 event)
+{
+#define CORE_EVNTSEL_EVENT_MASK                0x000000FFULL
+#define CORE_EVNTSEL_UNIT_MASK         0x0000FF00ULL
+#define CORE_EVNTSEL_EDGE_MASK         0x00040000ULL
+#define CORE_EVNTSEL_INV_MASK          0x00800000ULL
+#define CORE_EVNTSEL_COUNTER_MASK      0xFF000000ULL
+
+#define CORE_EVNTSEL_MASK              \
+       (CORE_EVNTSEL_EVENT_MASK |      \
+        CORE_EVNTSEL_UNIT_MASK  |      \
+        CORE_EVNTSEL_EDGE_MASK  |      \
+        CORE_EVNTSEL_INV_MASK  |       \
+        CORE_EVNTSEL_COUNTER_MASK)
+
+       return event & CORE_EVNTSEL_MASK;
+}
+
+static const u64 amd_0f_hw_cache_event_ids
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(L1I ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
+               [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(LL  ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(DTLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(ITLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
+               [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
+ [ C(BPU ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
+               [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
+};
+
+/*
+ * AMD Performance Monitor K7 and later.
+ */
+static const u64 amd_perfmon_event_map[] =
+{
+  [PERF_COUNT_HW_CPU_CYCLES]           = 0x0076,
+  [PERF_COUNT_HW_INSTRUCTIONS]         = 0x00c0,
+  [PERF_COUNT_HW_CACHE_REFERENCES]     = 0x0080,
+  [PERF_COUNT_HW_CACHE_MISSES]         = 0x0081,
+  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]  = 0x00c4,
+  [PERF_COUNT_HW_BRANCH_MISSES]                = 0x00c5,
+};
+
+static u64 amd_pmu_event_map(int event)
+{
+       return amd_perfmon_event_map[event];
+}
+
+static u64 amd_pmu_raw_event(u64 event)
+{
+#define K7_EVNTSEL_EVENT_MASK  0x7000000FFULL
+#define K7_EVNTSEL_UNIT_MASK   0x00000FF00ULL
+#define K7_EVNTSEL_EDGE_MASK   0x000040000ULL
+#define K7_EVNTSEL_INV_MASK    0x000800000ULL
+#define K7_EVNTSEL_COUNTER_MASK        0x0FF000000ULL
+
+#define K7_EVNTSEL_MASK                        \
+       (K7_EVNTSEL_EVENT_MASK |        \
+        K7_EVNTSEL_UNIT_MASK  |        \
+        K7_EVNTSEL_EDGE_MASK  |        \
+        K7_EVNTSEL_INV_MASK   |        \
+        K7_EVNTSEL_COUNTER_MASK)
+
+       return event & K7_EVNTSEL_MASK;
+}
+
+/*
+ * Propagate counter elapsed time into the generic counter.
+ * Can only be executed on the CPU where the counter is active.
+ * Returns the delta events processed.
+ */
+static u64
+x86_perf_counter_update(struct perf_counter *counter,
+                       struct hw_perf_counter *hwc, int idx)
+{
+       int shift = 64 - x86_pmu.counter_bits;
+       u64 prev_raw_count, new_raw_count;
+       s64 delta;
+
+       /*
+        * Careful: an NMI might modify the previous counter value.
+        *
+        * Our tactic to handle this is to first atomically read and
+        * exchange a new raw count - then add that new-prev delta
+        * count to the generic counter atomically:
+        */
+again:
+       prev_raw_count = atomic64_read(&hwc->prev_count);
+       rdmsrl(hwc->counter_base + idx, new_raw_count);
+
+       if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
+                                       new_raw_count) != prev_raw_count)
+               goto again;
+
+       /*
+        * Now we have the new raw value and have updated the prev
+        * timestamp already. We can now calculate the elapsed delta
+        * (counter-)time and add that to the generic counter.
+        *
+        * Careful, not all hw sign-extends above the physical width
+        * of the count.
+        */
+       delta = (new_raw_count << shift) - (prev_raw_count << shift);
+       delta >>= shift;
+
+       atomic64_add(delta, &counter->count);
+       atomic64_sub(delta, &hwc->period_left);
+
+       return new_raw_count;
+}
+
+static atomic_t active_counters;
+static DEFINE_MUTEX(pmc_reserve_mutex);
+
+static bool reserve_pmc_hardware(void)
+{
+       int i;
+
+       if (nmi_watchdog == NMI_LOCAL_APIC)
+               disable_lapic_nmi_watchdog();
+
+       for (i = 0; i < x86_pmu.num_counters; i++) {
+               if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
+                       goto perfctr_fail;
+       }
+
+       for (i = 0; i < x86_pmu.num_counters; i++) {
+               if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
+                       goto eventsel_fail;
+       }
+
+       return true;
+
+eventsel_fail:
+       for (i--; i >= 0; i--)
+               release_evntsel_nmi(x86_pmu.eventsel + i);
+
+       i = x86_pmu.num_counters;
+
+perfctr_fail:
+       for (i--; i >= 0; i--)
+               release_perfctr_nmi(x86_pmu.perfctr + i);
+
+       if (nmi_watchdog == NMI_LOCAL_APIC)
+               enable_lapic_nmi_watchdog();
+
+       return false;
+}
+
+static void release_pmc_hardware(void)
+{
+       int i;
+
+       for (i = 0; i < x86_pmu.num_counters; i++) {
+               release_perfctr_nmi(x86_pmu.perfctr + i);
+               release_evntsel_nmi(x86_pmu.eventsel + i);
+       }
+
+       if (nmi_watchdog == NMI_LOCAL_APIC)
+               enable_lapic_nmi_watchdog();
+}
+
+static void hw_perf_counter_destroy(struct perf_counter *counter)
+{
+       if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
+               release_pmc_hardware();
+               mutex_unlock(&pmc_reserve_mutex);
+       }
+}
+
+static inline int x86_pmu_initialized(void)
+{
+       return x86_pmu.handle_irq != NULL;
+}
+
+static inline int
+set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr)
+{
+       unsigned int cache_type, cache_op, cache_result;
+       u64 config, val;
+
+       config = attr->config;
+
+       cache_type = (config >>  0) & 0xff;
+       if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+               return -EINVAL;
+
+       cache_op = (config >>  8) & 0xff;
+       if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+               return -EINVAL;
+
+       cache_result = (config >> 16) & 0xff;
+       if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+               return -EINVAL;
+
+       val = hw_cache_event_ids[cache_type][cache_op][cache_result];
+
+       if (val == 0)
+               return -ENOENT;
+
+       if (val == -1)
+               return -EINVAL;
+
+       hwc->config |= val;
+
+       return 0;
+}
+
+/*
+ * Setup the hardware configuration for a given attr_type
+ */
+static int __hw_perf_counter_init(struct perf_counter *counter)
+{
+       struct perf_counter_attr *attr = &counter->attr;
+       struct hw_perf_counter *hwc = &counter->hw;
+       int err;
+
+       if (!x86_pmu_initialized())
+               return -ENODEV;
+
+       err = 0;
+       if (!atomic_inc_not_zero(&active_counters)) {
+               mutex_lock(&pmc_reserve_mutex);
+               if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
+                       err = -EBUSY;
+               else
+                       atomic_inc(&active_counters);
+               mutex_unlock(&pmc_reserve_mutex);
+       }
+       if (err)
+               return err;
+
+       /*
+        * Generate PMC IRQs:
+        * (keep 'enabled' bit clear for now)
+        */
+       hwc->config = ARCH_PERFMON_EVENTSEL_INT;
+
+       /*
+        * Count user and OS events unless requested not to.
+        */
+       if (!attr->exclude_user)
+               hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
+       if (!attr->exclude_kernel)
+               hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
+
+       if (!hwc->sample_period) {
+               hwc->sample_period = x86_pmu.max_period;
+               hwc->last_period = hwc->sample_period;
+               atomic64_set(&hwc->period_left, hwc->sample_period);
+       }
+
+       counter->destroy = hw_perf_counter_destroy;
+
+       /*
+        * Raw event type provide the config in the event structure
+        */
+       if (attr->type == PERF_TYPE_RAW) {
+               hwc->config |= x86_pmu.raw_event(attr->config);
+               return 0;
+       }
+
+       if (attr->type == PERF_TYPE_HW_CACHE)
+               return set_ext_hw_attr(hwc, attr);
+
+       if (attr->config >= x86_pmu.max_events)
+               return -EINVAL;
+       /*
+        * The generic map:
+        */
+       hwc->config |= x86_pmu.event_map(attr->config);
+
+       return 0;
+}
+
+static void intel_pmu_disable_all(void)
+{
+       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
+}
+
+static void amd_pmu_disable_all(void)
+{
+       struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+       int idx;
+
+       if (!cpuc->enabled)
+               return;
+
+       cpuc->enabled = 0;
+       /*
+        * ensure we write the disable before we start disabling the
+        * counters proper, so that amd_pmu_enable_counter() does the
+        * right thing.
+        */
+       barrier();
+
+       for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+               u64 val;
+
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+               rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+               if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
+                       continue;
+               val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
+               wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
+       }
+}
+
+void hw_perf_disable(void)
+{
+       if (!x86_pmu_initialized())
+               return;
+       return x86_pmu.disable_all();
+}
+
+static void intel_pmu_enable_all(void)
+{
+       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
+}
+
+static void amd_pmu_enable_all(void)
+{
+       struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+       int idx;
+
+       if (cpuc->enabled)
+               return;
+
+       cpuc->enabled = 1;
+       barrier();
+
+       for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+               u64 val;
+
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+               rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+               if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
+                       continue;
+               val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
+               wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
+       }
+}
+
+void hw_perf_enable(void)
+{
+       if (!x86_pmu_initialized())
+               return;
+       x86_pmu.enable_all();
+}
+
+static inline u64 intel_pmu_get_status(void)
+{
+       u64 status;
+
+       rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+
+       return status;
+}
+
+static inline void intel_pmu_ack_status(u64 ack)
+{
+       wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
+}
+
+static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+{
+       int err;
+       err = checking_wrmsrl(hwc->config_base + idx,
+                             hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
+}
+
+static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+{
+       int err;
+       err = checking_wrmsrl(hwc->config_base + idx,
+                             hwc->config);
+}
+
+static inline void
+intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
+{
+       int idx = __idx - X86_PMC_IDX_FIXED;
+       u64 ctrl_val, mask;
+       int err;
+
+       mask = 0xfULL << (idx * 4);
+
+       rdmsrl(hwc->config_base, ctrl_val);
+       ctrl_val &= ~mask;
+       err = checking_wrmsrl(hwc->config_base, ctrl_val);
+}
+
+static inline void
+intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+{
+       if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+               intel_pmu_disable_fixed(hwc, idx);
+               return;
+       }
+
+       x86_pmu_disable_counter(hwc, idx);
+}
+
+static inline void
+amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
+{
+       x86_pmu_disable_counter(hwc, idx);
+}
+
+static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
+
+/*
+ * Set the next IRQ period, based on the hwc->period_left value.
+ * To be called with the counter disabled in hw:
+ */
+static int
+x86_perf_counter_set_period(struct perf_counter *counter,
+                            struct hw_perf_counter *hwc, int idx)
+{
+       s64 left = atomic64_read(&hwc->period_left);
+       s64 period = hwc->sample_period;
+       int err, ret = 0;
+
+       /*
+        * If we are way outside a reasoable range then just skip forward:
+        */
+       if (unlikely(left <= -period)) {
+               left = period;
+               atomic64_set(&hwc->period_left, left);
+               hwc->last_period = period;
+               ret = 1;
+       }
+
+       if (unlikely(left <= 0)) {
+               left += period;
+               atomic64_set(&hwc->period_left, left);
+               hwc->last_period = period;
+               ret = 1;
+       }
+       /*
+        * Quirk: certain CPUs dont like it if just 1 event is left:
+        */
+       if (unlikely(left < 2))
+               left = 2;
+
+       if (left > x86_pmu.max_period)
+               left = x86_pmu.max_period;
+
+       per_cpu(prev_left[idx], smp_processor_id()) = left;
+
+       /*
+        * The hw counter starts counting from this counter offset,
+        * mark it to be able to extra future deltas:
+        */
+       atomic64_set(&hwc->prev_count, (u64)-left);
+
+       err = checking_wrmsrl(hwc->counter_base + idx,
+                            (u64)(-left) & x86_pmu.counter_mask);
+
+       return ret;
+}
+
+static inline void
+intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
+{
+       int idx = __idx - X86_PMC_IDX_FIXED;
+       u64 ctrl_val, bits, mask;
+       int err;
+
+       /*
+        * Enable IRQ generation (0x8),
+        * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
+        * if requested:
+        */
+       bits = 0x8ULL;
+       if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
+               bits |= 0x2;
+       if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
+               bits |= 0x1;
+       bits <<= (idx * 4);
+       mask = 0xfULL << (idx * 4);
+
+       rdmsrl(hwc->config_base, ctrl_val);
+       ctrl_val &= ~mask;
+       ctrl_val |= bits;
+       err = checking_wrmsrl(hwc->config_base, ctrl_val);
+}
+
+static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+{
+       if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
+               intel_pmu_enable_fixed(hwc, idx);
+               return;
+       }
+
+       x86_pmu_enable_counter(hwc, idx);
+}
+
+static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
+{
+       struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+
+       if (cpuc->enabled)
+               x86_pmu_enable_counter(hwc, idx);
+       else
+               x86_pmu_disable_counter(hwc, idx);
+}
+
+static int
+fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
+{
+       unsigned int event;
+
+       if (!x86_pmu.num_counters_fixed)
+               return -1;
+
+       event = hwc->config & ARCH_PERFMON_EVENT_MASK;
+
+       if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
+               return X86_PMC_IDX_FIXED_INSTRUCTIONS;
+       if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
+               return X86_PMC_IDX_FIXED_CPU_CYCLES;
+       if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
+               return X86_PMC_IDX_FIXED_BUS_CYCLES;
+
+       return -1;
+}
+
+/*
+ * Find a PMC slot for the freshly enabled / scheduled in counter:
+ */
+static int x86_pmu_enable(struct perf_counter *counter)
+{
+       struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+       struct hw_perf_counter *hwc = &counter->hw;
+       int idx;
+
+       idx = fixed_mode_idx(counter, hwc);
+       if (idx >= 0) {
+               /*
+                * Try to get the fixed counter, if that is already taken
+                * then try to get a generic counter:
+                */
+               if (test_and_set_bit(idx, cpuc->used_mask))
+                       goto try_generic;
+
+               hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
+               /*
+                * We set it so that counter_base + idx in wrmsr/rdmsr maps to
+                * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
+                */
+               hwc->counter_base =
+                       MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
+               hwc->idx = idx;
+       } else {
+               idx = hwc->idx;
+               /* Try to get the previous generic counter again */
+               if (test_and_set_bit(idx, cpuc->used_mask)) {
+try_generic:
+                       idx = find_first_zero_bit(cpuc->used_mask,
+                                                 x86_pmu.num_counters);
+                       if (idx == x86_pmu.num_counters)
+                               return -EAGAIN;
+
+                       set_bit(idx, cpuc->used_mask);
+                       hwc->idx = idx;
+               }
+               hwc->config_base  = x86_pmu.eventsel;
+               hwc->counter_base = x86_pmu.perfctr;
+       }
+
+       perf_counters_lapic_init();
+
+       x86_pmu.disable(hwc, idx);
+
+       cpuc->counters[idx] = counter;
+       set_bit(idx, cpuc->active_mask);
+
+       x86_perf_counter_set_period(counter, hwc, idx);
+       x86_pmu.enable(hwc, idx);
+
+       return 0;
+}
+
+static void x86_pmu_unthrottle(struct perf_counter *counter)
+{
+       struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+       struct hw_perf_counter *hwc = &counter->hw;
+
+       if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
+                               cpuc->counters[hwc->idx] != counter))
+               return;
+
+       x86_pmu.enable(hwc, hwc->idx);
+}
+
+void perf_counter_print_debug(void)
+{
+       u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
+       struct cpu_hw_counters *cpuc;
+       unsigned long flags;
+       int cpu, idx;
+
+       if (!x86_pmu.num_counters)
+               return;
+
+       local_irq_save(flags);
+
+       cpu = smp_processor_id();
+       cpuc = &per_cpu(cpu_hw_counters, cpu);
+
+       if (x86_pmu.version >= 2) {
+               rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
+               rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
+               rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
+               rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
+
+               pr_info("\n");
+               pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
+               pr_info("CPU#%d: status:     %016llx\n", cpu, status);
+               pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
+               pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
+       }
+       pr_info("CPU#%d: used:       %016llx\n", cpu, *(u64 *)cpuc->used_mask);
+
+       for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+               rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
+               rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
+
+               prev_left = per_cpu(prev_left[idx], cpu);
+
+               pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
+                       cpu, idx, pmc_ctrl);
+               pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
+                       cpu, idx, pmc_count);
+               pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
+                       cpu, idx, prev_left);
+       }
+       for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+               rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
+
+               pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
+                       cpu, idx, pmc_count);
+       }
+       local_irq_restore(flags);
+}
+
+static void x86_pmu_disable(struct perf_counter *counter)
+{
+       struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
+       struct hw_perf_counter *hwc = &counter->hw;
+       int idx = hwc->idx;
+
+       /*
+        * Must be done before we disable, otherwise the nmi handler
+        * could reenable again:
+        */
+       clear_bit(idx, cpuc->active_mask);
+       x86_pmu.disable(hwc, idx);
+
+       /*
+        * Make sure the cleared pointer becomes visible before we
+        * (potentially) free the counter:
+        */
+       barrier();
+
+       /*
+        * Drain the remaining delta count out of a counter
+        * that we are disabling:
+        */
+       x86_perf_counter_update(counter, hwc, idx);
+       cpuc->counters[idx] = NULL;
+       clear_bit(idx, cpuc->used_mask);
+}
+
+/*
+ * Save and restart an expired counter. Called by NMI contexts,
+ * so it has to be careful about preempting normal counter ops:
+ */
+static int intel_pmu_save_and_restart(struct perf_counter *counter)
+{
+       struct hw_perf_counter *hwc = &counter->hw;
+       int idx = hwc->idx;
+       int ret;
+
+       x86_perf_counter_update(counter, hwc, idx);
+       ret = x86_perf_counter_set_period(counter, hwc, idx);
+
+       if (counter->state == PERF_COUNTER_STATE_ACTIVE)
+               intel_pmu_enable_counter(hwc, idx);
+
+       return ret;
+}
+
+static void intel_pmu_reset(void)
+{
+       unsigned long flags;
+       int idx;
+
+       if (!x86_pmu.num_counters)
+               return;
+
+       local_irq_save(flags);
+
+       printk("clearing PMU state on CPU#%d\n", smp_processor_id());
+
+       for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+               checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
+               checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
+       }
+       for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
+               checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
+       }
+
+       local_irq_restore(flags);
+}
+
+
+/*
+ * This handler is triggered by the local APIC, so the APIC IRQ handling
+ * rules apply:
+ */
+static int intel_pmu_handle_irq(struct pt_regs *regs)
+{
+       struct perf_sample_data data;
+       struct cpu_hw_counters *cpuc;
+       int bit, cpu, loops;
+       u64 ack, status;
+
+       data.regs = regs;
+       data.addr = 0;
+
+       cpu = smp_processor_id();
+       cpuc = &per_cpu(cpu_hw_counters, cpu);
+
+       perf_disable();
+       status = intel_pmu_get_status();
+       if (!status) {
+               perf_enable();
+               return 0;
+       }
+
+       loops = 0;
+again:
+       if (++loops > 100) {
+               WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
+               perf_counter_print_debug();
+               intel_pmu_reset();
+               perf_enable();
+               return 1;
+       }
+
+       inc_irq_stat(apic_perf_irqs);
+       ack = status;
+       for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
+               struct perf_counter *counter = cpuc->counters[bit];
+
+               clear_bit(bit, (unsigned long *) &status);
+               if (!test_bit(bit, cpuc->active_mask))
+                       continue;
+
+               if (!intel_pmu_save_and_restart(counter))
+                       continue;
+
+               if (perf_counter_overflow(counter, 1, &data))
+                       intel_pmu_disable_counter(&counter->hw, bit);
+       }
+
+       intel_pmu_ack_status(ack);
+
+       /*
+        * Repeat if there is more work to be done:
+        */
+       status = intel_pmu_get_status();
+       if (status)
+               goto again;
+
+       perf_enable();
+
+       return 1;
+}
+
+static int amd_pmu_handle_irq(struct pt_regs *regs)
+{
+       struct perf_sample_data data;
+       struct cpu_hw_counters *cpuc;
+       struct perf_counter *counter;
+       struct hw_perf_counter *hwc;
+       int cpu, idx, handled = 0;
+       u64 val;
+
+       data.regs = regs;
+       data.addr = 0;
+
+       cpu = smp_processor_id();
+       cpuc = &per_cpu(cpu_hw_counters, cpu);
+
+       for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+               if (!test_bit(idx, cpuc->active_mask))
+                       continue;
+
+               counter = cpuc->counters[idx];
+               hwc = &counter->hw;
+
+               val = x86_perf_counter_update(counter, hwc, idx);
+               if (val & (1ULL << (x86_pmu.counter_bits - 1)))
+                       continue;
+
+               /*
+                * counter overflow
+                */
+               handled         = 1;
+               data.period     = counter->hw.last_period;
+
+               if (!x86_perf_counter_set_period(counter, hwc, idx))
+                       continue;
+
+               if (perf_counter_overflow(counter, 1, &data))
+                       amd_pmu_disable_counter(hwc, idx);
+       }
+
+       if (handled)
+               inc_irq_stat(apic_perf_irqs);
+
+       return handled;
+}
+
+void smp_perf_pending_interrupt(struct pt_regs *regs)
+{
+       irq_enter();
+       ack_APIC_irq();
+       inc_irq_stat(apic_pending_irqs);
+       perf_counter_do_pending();
+       irq_exit();
+}
+
+void set_perf_counter_pending(void)
+{
+       apic->send_IPI_self(LOCAL_PENDING_VECTOR);
+}
+
+void perf_counters_lapic_init(void)
+{
+       if (!x86_pmu_initialized())
+               return;
+
+       /*
+        * Always use NMI for PMU
+        */
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+}
+
+static int __kprobes
+perf_counter_nmi_handler(struct notifier_block *self,
+                        unsigned long cmd, void *__args)
+{
+       struct die_args *args = __args;
+       struct pt_regs *regs;
+
+       if (!atomic_read(&active_counters))
+               return NOTIFY_DONE;
+
+       switch (cmd) {
+       case DIE_NMI:
+       case DIE_NMI_IPI:
+               break;
+
+       default:
+               return NOTIFY_DONE;
+       }
+
+       regs = args->regs;
+
+       apic_write(APIC_LVTPC, APIC_DM_NMI);
+       /*
+        * Can't rely on the handled return value to say it was our NMI, two
+        * counters could trigger 'simultaneously' raising two back-to-back NMIs.
+        *
+        * If the first NMI handles both, the latter will be empty and daze
+        * the CPU.
+        */
+       x86_pmu.handle_irq(regs);
+
+       return NOTIFY_STOP;
+}
+
+static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
+       .notifier_call          = perf_counter_nmi_handler,
+       .next                   = NULL,
+       .priority               = 1
+};
+
+static struct x86_pmu intel_pmu = {
+       .name                   = "Intel",
+       .handle_irq             = intel_pmu_handle_irq,
+       .disable_all            = intel_pmu_disable_all,
+       .enable_all             = intel_pmu_enable_all,
+       .enable                 = intel_pmu_enable_counter,
+       .disable                = intel_pmu_disable_counter,
+       .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
+       .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
+       .event_map              = intel_pmu_event_map,
+       .raw_event              = intel_pmu_raw_event,
+       .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
+       /*
+        * Intel PMCs cannot be accessed sanely above 32 bit width,
+        * so we install an artificial 1<<31 period regardless of
+        * the generic counter period:
+        */
+       .max_period             = (1ULL << 31) - 1,
+};
+
+static struct x86_pmu amd_pmu = {
+       .name                   = "AMD",
+       .handle_irq             = amd_pmu_handle_irq,
+       .disable_all            = amd_pmu_disable_all,
+       .enable_all             = amd_pmu_enable_all,
+       .enable                 = amd_pmu_enable_counter,
+       .disable                = amd_pmu_disable_counter,
+       .eventsel               = MSR_K7_EVNTSEL0,
+       .perfctr                = MSR_K7_PERFCTR0,
+       .event_map              = amd_pmu_event_map,
+       .raw_event              = amd_pmu_raw_event,
+       .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
+       .num_counters           = 4,
+       .counter_bits           = 48,
+       .counter_mask           = (1ULL << 48) - 1,
+       /* use highest bit to detect overflow */
+       .max_period             = (1ULL << 47) - 1,
+};
+
+static int intel_pmu_init(void)
+{
+       union cpuid10_edx edx;
+       union cpuid10_eax eax;
+       unsigned int unused;
+       unsigned int ebx;
+       int version;
+
+       if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
+               return -ENODEV;
+
+       /*
+        * Check whether the Architectural PerfMon supports
+        * Branch Misses Retired Event or not.
+        */
+       cpuid(10, &eax.full, &ebx, &unused, &edx.full);
+       if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
+               return -ENODEV;
+
+       version = eax.split.version_id;
+       if (version < 2)
+               return -ENODEV;
+
+       x86_pmu                         = intel_pmu;
+       x86_pmu.version                 = version;
+       x86_pmu.num_counters            = eax.split.num_counters;
+       x86_pmu.counter_bits            = eax.split.bit_width;
+       x86_pmu.counter_mask            = (1ULL << eax.split.bit_width) - 1;
+
+       /*
+        * Quirk: v2 perfmon does not report fixed-purpose counters, so
+        * assume at least 3 counters:
+        */
+       x86_pmu.num_counters_fixed      = max((int)edx.split.num_counters_fixed, 3);
+
+       rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
+
+       /*
+        * Install the hw-cache-events table:
+        */
+       switch (boot_cpu_data.x86_model) {
+       case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
+       case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
+       case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
+       case 29: /* six-core 45 nm xeon "Dunnington" */
+               memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
+                      sizeof(hw_cache_event_ids));
+
+               pr_cont("Core2 events, ");
+               break;
+       default:
+       case 26:
+               memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
+                      sizeof(hw_cache_event_ids));
+
+               pr_cont("Nehalem/Corei7 events, ");
+               break;
+       case 28:
+               memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
+                      sizeof(hw_cache_event_ids));
+
+               pr_cont("Atom events, ");
+               break;
+       }
+       return 0;
+}
+
+static int amd_pmu_init(void)
+{
+       x86_pmu = amd_pmu;
+
+       switch (boot_cpu_data.x86) {
+       case 0x0f:
+       case 0x10:
+       case 0x11:
+               memcpy(hw_cache_event_ids, amd_0f_hw_cache_event_ids,
+                      sizeof(hw_cache_event_ids));
+
+               pr_cont("AMD Family 0f/10/11 events, ");
+               break;
+       }
+       return 0;
+}
+
+void __init init_hw_perf_counters(void)
+{
+       int err;
+
+       pr_info("Performance Counters: ");
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_INTEL:
+               err = intel_pmu_init();
+               break;
+       case X86_VENDOR_AMD:
+               err = amd_pmu_init();
+               break;
+       default:
+               return;
+       }
+       if (err != 0) {
+               pr_cont("no PMU driver, software counters only.\n");
+               return;
+       }
+
+       pr_cont("%s PMU driver.\n", x86_pmu.name);
+
+       if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
+               x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
+               WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
+                    x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
+       }
+       perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
+       perf_max_counters = x86_pmu.num_counters;
+
+       if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
+               x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
+               WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
+                    x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
+       }
+
+       perf_counter_mask |=
+               ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
+
+       perf_counters_lapic_init();
+       register_die_notifier(&perf_counter_nmi_notifier);
+
+       pr_info("... version:                 %d\n",     x86_pmu.version);
+       pr_info("... bit width:               %d\n",     x86_pmu.counter_bits);
+       pr_info("... generic counters:        %d\n",     x86_pmu.num_counters);
+       pr_info("... value mask:              %016Lx\n", x86_pmu.counter_mask);
+       pr_info("... max period:              %016Lx\n", x86_pmu.max_period);
+       pr_info("... fixed-purpose counters:  %d\n",     x86_pmu.num_counters_fixed);
+       pr_info("... counter mask:            %016Lx\n", perf_counter_mask);
+}
+
+static inline void x86_pmu_read(struct perf_counter *counter)
+{
+       x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
+}
+
+static const struct pmu pmu = {
+       .enable         = x86_pmu_enable,
+       .disable        = x86_pmu_disable,
+       .read           = x86_pmu_read,
+       .unthrottle     = x86_pmu_unthrottle,
+};
+
+const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+{
+       int err;
+
+       err = __hw_perf_counter_init(counter);
+       if (err)
+               return ERR_PTR(err);
+
+       return &pmu;
+}
+
+/*
+ * callchain support
+ */
+
+static inline
+void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
+{
+       if (entry->nr < MAX_STACK_DEPTH)
+               entry->ip[entry->nr++] = ip;
+}
+
+static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
+static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
+
+
+static void
+backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
+{
+       /* Ignore warnings */
+}
+
+static void backtrace_warning(void *data, char *msg)
+{
+       /* Ignore warnings */
+}
+
+static int backtrace_stack(void *data, char *name)
+{
+       /* Don't bother with IRQ stacks for now */
+       return -1;
+}
+
+static void backtrace_address(void *data, unsigned long addr, int reliable)
+{
+       struct perf_callchain_entry *entry = data;
+
+       if (reliable)
+               callchain_store(entry, addr);
+}
+
+static const struct stacktrace_ops backtrace_ops = {
+       .warning                = backtrace_warning,
+       .warning_symbol         = backtrace_warning_symbol,
+       .stack                  = backtrace_stack,
+       .address                = backtrace_address,
+};
+
+static void
+perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+       unsigned long bp;
+       char *stack;
+       int nr = entry->nr;
+
+       callchain_store(entry, instruction_pointer(regs));
+
+       stack = ((char *)regs + sizeof(struct pt_regs));
+#ifdef CONFIG_FRAME_POINTER
+       bp = frame_pointer(regs);
+#else
+       bp = 0;
+#endif
+
+       dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
+
+       entry->kernel = entry->nr - nr;
+}
+
+
+struct stack_frame {
+       const void __user       *next_fp;
+       unsigned long           return_address;
+};
+
+static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
+{
+       int ret;
+
+       if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
+               return 0;
+
+       ret = 1;
+       pagefault_disable();
+       if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
+               ret = 0;
+       pagefault_enable();
+
+       return ret;
+}
+
+static void
+perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+       struct stack_frame frame;
+       const void __user *fp;
+       int nr = entry->nr;
+
+       regs = (struct pt_regs *)current->thread.sp0 - 1;
+       fp   = (void __user *)regs->bp;
+
+       callchain_store(entry, regs->ip);
+
+       while (entry->nr < MAX_STACK_DEPTH) {
+               frame.next_fp        = NULL;
+               frame.return_address = 0;
+
+               if (!copy_stack_frame(fp, &frame))
+                       break;
+
+               if ((unsigned long)fp < user_stack_pointer(regs))
+                       break;
+
+               callchain_store(entry, frame.return_address);
+               fp = frame.next_fp;
+       }
+
+       entry->user = entry->nr - nr;
+}
+
+static void
+perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+       int is_user;
+
+       if (!regs)
+               return;
+
+       is_user = user_mode(regs);
+
+       if (!current || current->pid == 0)
+               return;
+
+       if (is_user && current->state != TASK_RUNNING)
+               return;
+
+       if (!is_user)
+               perf_callchain_kernel(regs, entry);
+
+       if (current->mm)
+               perf_callchain_user(regs, entry);
+}
+
+struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
+{
+       struct perf_callchain_entry *entry;
+
+       if (in_nmi())
+               entry = &__get_cpu_var(nmi_entry);
+       else
+               entry = &__get_cpu_var(irq_entry);
+
+       entry->nr = 0;
+       entry->hv = 0;
+       entry->kernel = 0;
+       entry->user = 0;
+
+       perf_do_callchain(regs, entry);
+
+       return entry;
+}
index f6c70a1..d6f5b9f 100644 (file)
@@ -19,8 +19,8 @@
 #include <linux/nmi.h>
 #include <linux/kprobes.h>
 
-#include <asm/genapic.h>
-#include <asm/intel_arch_perfmon.h>
+#include <asm/apic.h>
+#include <asm/perf_counter.h>
 
 struct nmi_watchdog_ctlblk {
        unsigned int cccr_msr;
index 1c17d7c..a4742a3 100644 (file)
@@ -1012,6 +1012,11 @@ apicinterrupt ERROR_APIC_VECTOR \
 apicinterrupt SPURIOUS_APIC_VECTOR \
        spurious_interrupt smp_spurious_interrupt
 
+#ifdef CONFIG_PERF_COUNTERS
+apicinterrupt LOCAL_PENDING_VECTOR \
+       perf_pending_interrupt smp_perf_pending_interrupt
+#endif
+
 /*
  * Exception entry points.
  */
index 9a391bb..38287b5 100644 (file)
@@ -62,6 +62,14 @@ static int show_other_interrupts(struct seq_file *p, int prec)
        for_each_online_cpu(j)
                seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
        seq_printf(p, "  Spurious interrupts\n");
+       seq_printf(p, "%*s: ", prec, "CNT");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
+       seq_printf(p, "  Performance counter interrupts\n");
+       seq_printf(p, "%*s: ", prec, "PND");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
+       seq_printf(p, "  Performance pending work\n");
 #endif
        if (generic_interrupt_extension) {
                seq_printf(p, "%*s: ", prec, "PLT");
@@ -165,6 +173,8 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
 #ifdef CONFIG_X86_LOCAL_APIC
        sum += irq_stats(cpu)->apic_timer_irqs;
        sum += irq_stats(cpu)->irq_spurious_count;
+       sum += irq_stats(cpu)->apic_perf_irqs;
+       sum += irq_stats(cpu)->apic_pending_irqs;
 #endif
        if (generic_interrupt_extension)
                sum += irq_stats(cpu)->generic_irqs;
index 2e08b10..267c662 100644 (file)
@@ -181,10 +181,15 @@ static void __init apic_intr_init(void)
 {
        smp_intr_init();
 
-#ifdef CONFIG_X86_64
+#ifdef CONFIG_X86_THERMAL_VECTOR
        alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
+#endif
+#ifdef CONFIG_X86_THRESHOLD
        alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
 #endif
+#if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC)
+       alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt);
+#endif
 
 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
        /* self generated IPI for local APIC timer */
@@ -199,18 +204,10 @@ static void __init apic_intr_init(void)
 
        /* Performance monitoring interrupts: */
 # ifdef CONFIG_PERF_COUNTERS
-       alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt);
        alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
 # endif
 
 #endif
-
-#ifdef CONFIG_X86_32
-#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_MCE_P4THERMAL)
-       /* thermal monitor LVT interrupt */
-       alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
-#endif
-#endif
 }
 
 /**
index 1442516..0a813b1 100644 (file)
@@ -6,7 +6,6 @@
  *  2000-06-20  Pentium III FXSR, SSE support by Gareth Hughes
  *  2000-2002   x86-64 support by Andi Kleen
  */
-
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
index 734f92c..d51321d 100644 (file)
@@ -335,3 +335,4 @@ ENTRY(sys_call_table)
        .long sys_preadv
        .long sys_pwritev
        .long sys_rt_tgsigqueueinfo     /* 335 */
+       .long sys_perf_counter_open
index ede0245..07d60c8 100644 (file)
@@ -942,8 +942,13 @@ void __init trap_init(void)
 #endif
        set_intr_gate(19, &simd_coprocessor_error);
 
+       /* Reserve all the builtin and the syscall vector: */
+       for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
+               set_bit(i, used_vectors);
+
 #ifdef CONFIG_IA32_EMULATION
        set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
+       set_bit(IA32_SYSCALL_VECTOR, used_vectors);
 #endif
 
 #ifdef CONFIG_X86_32
@@ -960,14 +965,9 @@ void __init trap_init(void)
        }
 
        set_system_trap_gate(SYSCALL_VECTOR, &system_call);
+       set_bit(SYSCALL_VECTOR, used_vectors);
 #endif
 
-       /* Reserve all the builtin and the syscall vector: */
-       for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
-               set_bit(i, used_vectors);
-
-       set_bit(IA32_SYSCALL_VECTOR, used_vectors);
-
        /*
         * Should be a barrier for any external CPU state:
         */
index 5ec7ae3..c6acc63 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/bootmem.h>             /* max_low_pfn                  */
 #include <linux/kprobes.h>             /* __kprobes, ...               */
 #include <linux/mmiotrace.h>           /* kmmio_handler, ...           */
+#include <linux/perf_counter.h>                /* perf_swcounter_event         */
 
 #include <asm/traps.h>                 /* dotraplinkage, ...           */
 #include <asm/pgalloc.h>               /* pgd_*(), ...                 */
@@ -1013,6 +1014,8 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
        if (unlikely(error_code & PF_RSVD))
                pgtable_bad(regs, error_code, address);
 
+       perf_swcounter_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address);
+
        /*
         * If we're in an interrupt, have no user context or are running
         * in an atomic region then we must not take the fault:
@@ -1106,10 +1109,15 @@ good_area:
  &nbs