perf, x86: Change x86_pmu.{enable,disable} calling convention
[linux-2.6.git] / arch / x86 / kernel / cpu / perf_event_p6.c
1 #ifdef CONFIG_CPU_SUP_INTEL
2
3 /*
4  * Not sure about some of these
5  */
6 static const u64 p6_perfmon_event_map[] =
7 {
8   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
9   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
10   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
11   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
12   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
13   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
14   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
15 };
16
17 static u64 p6_pmu_event_map(int hw_event)
18 {
19         return p6_perfmon_event_map[hw_event];
20 }
21
22 /*
23  * Event setting that is specified not to count anything.
24  * We use this to effectively disable a counter.
25  *
26  * L2_RQSTS with 0 MESI unit mask.
27  */
28 #define P6_NOP_EVENT                    0x0000002EULL
29
30 static u64 p6_pmu_raw_event(u64 hw_event)
31 {
32 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
33 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
34 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
35 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
36 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
37
38 #define P6_EVNTSEL_MASK                 \
39         (P6_EVNTSEL_EVENT_MASK |        \
40          P6_EVNTSEL_UNIT_MASK  |        \
41          P6_EVNTSEL_EDGE_MASK  |        \
42          P6_EVNTSEL_INV_MASK   |        \
43          P6_EVNTSEL_REG_MASK)
44
45         return hw_event & P6_EVNTSEL_MASK;
46 }
47
48 static struct event_constraint p6_event_constraints[] =
49 {
50         INTEL_EVENT_CONSTRAINT(0xc1, 0x1),      /* FLOPS */
51         INTEL_EVENT_CONSTRAINT(0x10, 0x1),      /* FP_COMP_OPS_EXE */
52         INTEL_EVENT_CONSTRAINT(0x11, 0x1),      /* FP_ASSIST */
53         INTEL_EVENT_CONSTRAINT(0x12, 0x2),      /* MUL */
54         INTEL_EVENT_CONSTRAINT(0x13, 0x2),      /* DIV */
55         INTEL_EVENT_CONSTRAINT(0x14, 0x1),      /* CYCLES_DIV_BUSY */
56         EVENT_CONSTRAINT_END
57 };
58
59 static void p6_pmu_disable_all(void)
60 {
61         u64 val;
62
63         /* p6 only has one enable register */
64         rdmsrl(MSR_P6_EVNTSEL0, val);
65         val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
66         wrmsrl(MSR_P6_EVNTSEL0, val);
67 }
68
69 static void p6_pmu_enable_all(void)
70 {
71         unsigned long val;
72
73         /* p6 only has one enable register */
74         rdmsrl(MSR_P6_EVNTSEL0, val);
75         val |= ARCH_PERFMON_EVENTSEL_ENABLE;
76         wrmsrl(MSR_P6_EVNTSEL0, val);
77 }
78
79 static inline void
80 p6_pmu_disable_event(struct perf_event *event)
81 {
82         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
83         struct hw_perf_event *hwc = &event->hw;
84         u64 val = P6_NOP_EVENT;
85
86         if (cpuc->enabled)
87                 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
88
89         (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
90 }
91
92 static void p6_pmu_enable_event(struct perf_event *event)
93 {
94         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
95         struct hw_perf_event *hwc = &event->hw;
96         u64 val;
97
98         val = hwc->config;
99         if (cpuc->enabled)
100                 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
101
102         (void)checking_wrmsrl(hwc->config_base + hwc->idx, val);
103 }
104
105 static __initconst struct x86_pmu p6_pmu = {
106         .name                   = "p6",
107         .handle_irq             = x86_pmu_handle_irq,
108         .disable_all            = p6_pmu_disable_all,
109         .enable_all             = p6_pmu_enable_all,
110         .enable                 = p6_pmu_enable_event,
111         .disable                = p6_pmu_disable_event,
112         .eventsel               = MSR_P6_EVNTSEL0,
113         .perfctr                = MSR_P6_PERFCTR0,
114         .event_map              = p6_pmu_event_map,
115         .raw_event              = p6_pmu_raw_event,
116         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
117         .apic                   = 1,
118         .max_period             = (1ULL << 31) - 1,
119         .version                = 0,
120         .num_events             = 2,
121         /*
122          * Events have 40 bits implemented. However they are designed such
123          * that bits [32-39] are sign extensions of bit 31. As such the
124          * effective width of a event for P6-like PMU is 32 bits only.
125          *
126          * See IA-32 Intel Architecture Software developer manual Vol 3B
127          */
128         .event_bits             = 32,
129         .event_mask             = (1ULL << 32) - 1,
130         .get_event_constraints  = x86_get_event_constraints,
131         .event_constraints      = p6_event_constraints,
132 };
133
134 static __init int p6_pmu_init(void)
135 {
136         switch (boot_cpu_data.x86_model) {
137         case 1:
138         case 3:  /* Pentium Pro */
139         case 5:
140         case 6:  /* Pentium II */
141         case 7:
142         case 8:
143         case 11: /* Pentium III */
144         case 9:
145         case 13:
146                 /* Pentium M */
147                 break;
148         default:
149                 pr_cont("unsupported p6 CPU model %d ",
150                         boot_cpu_data.x86_model);
151                 return -ENODEV;
152         }
153
154         x86_pmu = p6_pmu;
155
156         return 0;
157 }
158
159 #endif /* CONFIG_CPU_SUP_INTEL */