blob: e984653b93aa8f77b0a2450f0118b4cc3bef2144 [file] [log] [blame]
Jamie Iles1b8873a2010-02-02 20:25:44 +01001#undef DEBUG
2
3/*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
Will Deacon43eab872010-11-13 19:04:32 +00007 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
Jean PIHET796d1292010-01-26 18:51:05 +01008 *
Jamie Iles1b8873a2010-02-02 20:25:44 +01009 * This code is based on the sparc64 perf event code, which is in turn based
Mark Rutlandd39976f2014-09-29 17:15:32 +010010 * on the x86 code.
Jamie Iles1b8873a2010-02-02 20:25:44 +010011 */
12#define pr_fmt(fmt) "hw perfevents: " fmt
13
Mark Rutland74cf0bc2015-05-26 17:23:39 +010014#include <linux/bitmap.h>
Mark Rutlandcc88116d2015-05-13 17:12:25 +010015#include <linux/cpumask.h>
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +000016#include <linux/cpu_pm.h>
Mark Rutland74cf0bc2015-05-26 17:23:39 +010017#include <linux/export.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010018#include <linux/kernel.h>
Sudeep Hollabc1e3c42015-06-30 13:56:57 +010019#include <linux/of_device.h>
Mark Rutlandfa8ad782015-07-06 12:23:53 +010020#include <linux/perf/arm_pmu.h>
Will Deacon49c006b2010-04-29 17:13:24 +010021#include <linux/platform_device.h>
Mark Rutland74cf0bc2015-05-26 17:23:39 +010022#include <linux/slab.h>
Ingo Molnare6017572017-02-01 16:36:40 +010023#include <linux/sched/clock.h>
Mark Rutland74cf0bc2015-05-26 17:23:39 +010024#include <linux/spinlock.h>
Stephen Boydbbd64552014-02-07 21:01:19 +000025#include <linux/irq.h>
26#include <linux/irqdesc.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010027
Mark Rutland74cf0bc2015-05-26 17:23:39 +010028#include <asm/cputype.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010029#include <asm/irq_regs.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010030
Jamie Iles1b8873a2010-02-02 20:25:44 +010031static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010032armpmu_map_cache_event(const unsigned (*cache_map)
33 [PERF_COUNT_HW_CACHE_MAX]
34 [PERF_COUNT_HW_CACHE_OP_MAX]
35 [PERF_COUNT_HW_CACHE_RESULT_MAX],
36 u64 config)
Jamie Iles1b8873a2010-02-02 20:25:44 +010037{
38 unsigned int cache_type, cache_op, cache_result, ret;
39
40 cache_type = (config >> 0) & 0xff;
41 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
42 return -EINVAL;
43
44 cache_op = (config >> 8) & 0xff;
45 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
46 return -EINVAL;
47
48 cache_result = (config >> 16) & 0xff;
49 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
50 return -EINVAL;
51
Mark Rutlande1f431b2011-04-28 15:47:10 +010052 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
Jamie Iles1b8873a2010-02-02 20:25:44 +010053
54 if (ret == CACHE_OP_UNSUPPORTED)
55 return -ENOENT;
56
57 return ret;
58}
59
60static int
Will Deacon6dbc0022012-07-29 12:36:28 +010061armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000062{
Stephen Boydd9f96632013-08-08 18:41:59 +010063 int mapping;
64
65 if (config >= PERF_COUNT_HW_MAX)
66 return -EINVAL;
67
68 mapping = (*event_map)[config];
Mark Rutlande1f431b2011-04-28 15:47:10 +010069 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
Will Deacon84fee972010-11-13 17:13:56 +000070}
71
72static int
Mark Rutlande1f431b2011-04-28 15:47:10 +010073armpmu_map_raw_event(u32 raw_event_mask, u64 config)
Will Deacon84fee972010-11-13 17:13:56 +000074{
Mark Rutlande1f431b2011-04-28 15:47:10 +010075 return (int)(config & raw_event_mask);
76}
77
Will Deacon6dbc0022012-07-29 12:36:28 +010078int
79armpmu_map_event(struct perf_event *event,
80 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
81 const unsigned (*cache_map)
82 [PERF_COUNT_HW_CACHE_MAX]
83 [PERF_COUNT_HW_CACHE_OP_MAX]
84 [PERF_COUNT_HW_CACHE_RESULT_MAX],
85 u32 raw_event_mask)
Mark Rutlande1f431b2011-04-28 15:47:10 +010086{
87 u64 config = event->attr.config;
Mark Rutland67b43052012-09-12 10:53:23 +010088 int type = event->attr.type;
Mark Rutlande1f431b2011-04-28 15:47:10 +010089
Mark Rutland67b43052012-09-12 10:53:23 +010090 if (type == event->pmu->type)
91 return armpmu_map_raw_event(raw_event_mask, config);
92
93 switch (type) {
Mark Rutlande1f431b2011-04-28 15:47:10 +010094 case PERF_TYPE_HARDWARE:
Will Deacon6dbc0022012-07-29 12:36:28 +010095 return armpmu_map_hw_event(event_map, config);
Mark Rutlande1f431b2011-04-28 15:47:10 +010096 case PERF_TYPE_HW_CACHE:
97 return armpmu_map_cache_event(cache_map, config);
98 case PERF_TYPE_RAW:
99 return armpmu_map_raw_event(raw_event_mask, config);
100 }
101
102 return -ENOENT;
Will Deacon84fee972010-11-13 17:13:56 +0000103}
104
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100105int armpmu_event_set_period(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100106{
Mark Rutland8a16b342011-04-28 16:27:54 +0100107 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100108 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200109 s64 left = local64_read(&hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100110 s64 period = hwc->sample_period;
111 int ret = 0;
112
113 if (unlikely(left <= -period)) {
114 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200115 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100116 hwc->last_period = period;
117 ret = 1;
118 }
119
120 if (unlikely(left <= 0)) {
121 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200122 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100123 hwc->last_period = period;
124 ret = 1;
125 }
126
Daniel Thompson2d9ed742015-01-05 15:58:54 +0100127 /*
128 * Limit the maximum period to prevent the counter value
129 * from overtaking the one we are about to program. In
130 * effect we are reducing max_period to account for
131 * interrupt latency (and we are being very conservative).
132 */
133 if (left > (armpmu->max_period >> 1))
134 left = armpmu->max_period >> 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100135
Peter Zijlstrae7850592010-05-21 14:43:08 +0200136 local64_set(&hwc->prev_count, (u64)-left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100137
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100138 armpmu->write_counter(event, (u64)(-left) & 0xffffffff);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100139
140 perf_event_update_userpage(event);
141
142 return ret;
143}
144
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100145u64 armpmu_event_update(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100146{
Mark Rutland8a16b342011-04-28 16:27:54 +0100147 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100148 struct hw_perf_event *hwc = &event->hw;
Will Deacona7378232011-03-25 17:12:37 +0100149 u64 delta, prev_raw_count, new_raw_count;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100150
151again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200152 prev_raw_count = local64_read(&hwc->prev_count);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100153 new_raw_count = armpmu->read_counter(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100154
Peter Zijlstrae7850592010-05-21 14:43:08 +0200155 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100156 new_raw_count) != prev_raw_count)
157 goto again;
158
Will Deacon57273472012-03-06 17:33:17 +0100159 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100160
Peter Zijlstrae7850592010-05-21 14:43:08 +0200161 local64_add(delta, &event->count);
162 local64_sub(delta, &hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100163
164 return new_raw_count;
165}
166
167static void
Jamie Iles1b8873a2010-02-02 20:25:44 +0100168armpmu_read(struct perf_event *event)
169{
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100170 armpmu_event_update(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100171}
172
173static void
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200174armpmu_stop(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100175{
Mark Rutland8a16b342011-04-28 16:27:54 +0100176 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100177 struct hw_perf_event *hwc = &event->hw;
178
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200179 /*
180 * ARM pmu always has to update the counter, so ignore
181 * PERF_EF_UPDATE, see comments in armpmu_start().
182 */
183 if (!(hwc->state & PERF_HES_STOPPED)) {
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100184 armpmu->disable(event);
185 armpmu_event_update(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200186 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
187 }
188}
189
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100190static void armpmu_start(struct perf_event *event, int flags)
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200191{
Mark Rutland8a16b342011-04-28 16:27:54 +0100192 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200193 struct hw_perf_event *hwc = &event->hw;
194
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200195 /*
196 * ARM pmu always has to reprogram the period, so ignore
197 * PERF_EF_RELOAD, see the comment below.
198 */
199 if (flags & PERF_EF_RELOAD)
200 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
201
202 hwc->state = 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100203 /*
204 * Set the period again. Some counters can't be stopped, so when we
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200205 * were stopped we simply disabled the IRQ source and the counter
Jamie Iles1b8873a2010-02-02 20:25:44 +0100206 * may have been left counting. If we don't do this step then we may
207 * get an interrupt too soon or *way* too late if the overflow has
208 * happened since disabling.
209 */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100210 armpmu_event_set_period(event);
211 armpmu->enable(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100212}
213
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200214static void
215armpmu_del(struct perf_event *event, int flags)
216{
Mark Rutland8a16b342011-04-28 16:27:54 +0100217 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland11679252014-05-13 19:36:31 +0100218 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200219 struct hw_perf_event *hwc = &event->hw;
220 int idx = hwc->idx;
221
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200222 armpmu_stop(event, PERF_EF_UPDATE);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100223 hw_events->events[idx] = NULL;
224 clear_bit(idx, hw_events->used_mask);
Stephen Boydeab443e2014-02-07 21:01:22 +0000225 if (armpmu->clear_event_idx)
226 armpmu->clear_event_idx(hw_events, event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200227
228 perf_event_update_userpage(event);
229}
230
Jamie Iles1b8873a2010-02-02 20:25:44 +0100231static int
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200232armpmu_add(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100233{
Mark Rutland8a16b342011-04-28 16:27:54 +0100234 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland11679252014-05-13 19:36:31 +0100235 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100236 struct hw_perf_event *hwc = &event->hw;
237 int idx;
238 int err = 0;
239
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100240 /* An event following a process won't be stopped earlier */
241 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
242 return -ENOENT;
243
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200244 perf_pmu_disable(event->pmu);
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200245
Jamie Iles1b8873a2010-02-02 20:25:44 +0100246 /* If we don't have a space for the counter then finish early. */
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100247 idx = armpmu->get_event_idx(hw_events, event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100248 if (idx < 0) {
249 err = idx;
250 goto out;
251 }
252
253 /*
254 * If there is an event in the counter we are going to use then make
255 * sure it is disabled.
256 */
257 event->hw.idx = idx;
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100258 armpmu->disable(event);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100259 hw_events->events[idx] = event;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100260
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200261 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
262 if (flags & PERF_EF_START)
263 armpmu_start(event, PERF_EF_RELOAD);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100264
265 /* Propagate our changes to the userspace mapping. */
266 perf_event_update_userpage(event);
267
268out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200269 perf_pmu_enable(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100270 return err;
271}
272
Jamie Iles1b8873a2010-02-02 20:25:44 +0100273static int
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000274validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events,
275 struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100276{
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000277 struct arm_pmu *armpmu;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100278
Will Deaconc95eb312013-08-07 23:39:41 +0100279 if (is_software_event(event))
280 return 1;
281
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000282 /*
283 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The
284 * core perf code won't check that the pmu->ctx == leader->ctx
285 * until after pmu->event_init(event).
286 */
287 if (event->pmu != pmu)
288 return 0;
289
Will Deacon2dfcb802013-10-09 13:51:29 +0100290 if (event->state < PERF_EVENT_STATE_OFF)
Will Deaconcb2d8b32013-04-12 19:04:19 +0100291 return 1;
292
293 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
Will Deacon65b47112010-09-02 09:32:08 +0100294 return 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100295
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000296 armpmu = to_arm_pmu(event->pmu);
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100297 return armpmu->get_event_idx(hw_events, event) >= 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100298}
299
300static int
301validate_group(struct perf_event *event)
302{
303 struct perf_event *sibling, *leader = event->group_leader;
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100304 struct pmu_hw_events fake_pmu;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100305
Will Deaconbce34d12011-11-17 15:05:14 +0000306 /*
307 * Initialise the fake PMU. We only need to populate the
308 * used_mask for the purposes of validation.
309 */
Mark Rutlanda4560842014-05-13 19:08:19 +0100310 memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask));
Jamie Iles1b8873a2010-02-02 20:25:44 +0100311
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000312 if (!validate_event(event->pmu, &fake_pmu, leader))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100313 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100314
315 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000316 if (!validate_event(event->pmu, &fake_pmu, sibling))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100317 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100318 }
319
Suzuki K. Poulosee4298172015-03-17 18:14:58 +0000320 if (!validate_event(event->pmu, &fake_pmu, event))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100321 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100322
323 return 0;
324}
325
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100326static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530327{
Stephen Boydbbd64552014-02-07 21:01:19 +0000328 struct arm_pmu *armpmu;
329 struct platform_device *plat_device;
330 struct arm_pmu_platdata *plat;
Will Deacon5f5092e2014-02-11 18:08:41 +0000331 int ret;
332 u64 start_clock, finish_clock;
Stephen Boydbbd64552014-02-07 21:01:19 +0000333
Mark Rutland5ebd9202014-05-13 19:46:10 +0100334 /*
335 * we request the IRQ with a (possibly percpu) struct arm_pmu**, but
336 * the handlers expect a struct arm_pmu*. The percpu_irq framework will
337 * do any necessary shifting, we just need to perform the first
338 * dereference.
339 */
340 armpmu = *(void **)dev;
Stephen Boydbbd64552014-02-07 21:01:19 +0000341 plat_device = armpmu->plat_device;
342 plat = dev_get_platdata(&plat_device->dev);
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530343
Will Deacon5f5092e2014-02-11 18:08:41 +0000344 start_clock = sched_clock();
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100345 if (plat && plat->handle_irq)
Mark Rutland5ebd9202014-05-13 19:46:10 +0100346 ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100347 else
Mark Rutland5ebd9202014-05-13 19:46:10 +0100348 ret = armpmu->handle_irq(irq, armpmu);
Will Deacon5f5092e2014-02-11 18:08:41 +0000349 finish_clock = sched_clock();
350
351 perf_sample_event_took(finish_clock - start_clock);
352 return ret;
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530353}
354
Will Deacon0b390e22011-07-27 15:18:59 +0100355static void
Mark Rutland8a16b342011-04-28 16:27:54 +0100356armpmu_release_hardware(struct arm_pmu *armpmu)
Will Deacon0b390e22011-07-27 15:18:59 +0100357{
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100358 armpmu->free_irq(armpmu);
Will Deacon0b390e22011-07-27 15:18:59 +0100359}
360
Jamie Iles1b8873a2010-02-02 20:25:44 +0100361static int
Mark Rutland8a16b342011-04-28 16:27:54 +0100362armpmu_reserve_hardware(struct arm_pmu *armpmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100363{
Mark Rutlanded61f982015-05-26 17:23:34 +0100364 int err = armpmu->request_irq(armpmu, armpmu_dispatch_irq);
Sudeep KarkadaNagesha051f1b12012-07-31 10:34:25 +0100365 if (err) {
366 armpmu_release_hardware(armpmu);
367 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100368 }
369
Will Deacon0b390e22011-07-27 15:18:59 +0100370 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100371}
372
Jamie Iles1b8873a2010-02-02 20:25:44 +0100373static void
374hw_perf_event_destroy(struct perf_event *event)
375{
Mark Rutland8a16b342011-04-28 16:27:54 +0100376 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100377 atomic_t *active_events = &armpmu->active_events;
378 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
379
380 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
Mark Rutland8a16b342011-04-28 16:27:54 +0100381 armpmu_release_hardware(armpmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100382 mutex_unlock(pmu_reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100383 }
384}
385
386static int
Will Deacon05d22fd2011-07-19 11:57:30 +0100387event_requires_mode_exclusion(struct perf_event_attr *attr)
388{
389 return attr->exclude_idle || attr->exclude_user ||
390 attr->exclude_kernel || attr->exclude_hv;
391}
392
393static int
Jamie Iles1b8873a2010-02-02 20:25:44 +0100394__hw_perf_event_init(struct perf_event *event)
395{
Mark Rutland8a16b342011-04-28 16:27:54 +0100396 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100397 struct hw_perf_event *hwc = &event->hw;
Mark Rutland9dcbf462013-01-18 16:10:06 +0000398 int mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100399
Mark Rutlande1f431b2011-04-28 15:47:10 +0100400 mapping = armpmu->map_event(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100401
402 if (mapping < 0) {
403 pr_debug("event %x:%llx not supported\n", event->attr.type,
404 event->attr.config);
405 return mapping;
406 }
407
408 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100409 * We don't assign an index until we actually place the event onto
410 * hardware. Use -1 to signify that we haven't decided where to put it
411 * yet. For SMP systems, each core has it's own PMU so we can't do any
412 * clever allocation or constraints checking at this point.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100413 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100414 hwc->idx = -1;
415 hwc->config_base = 0;
416 hwc->config = 0;
417 hwc->event_base = 0;
418
419 /*
420 * Check whether we need to exclude the counter from certain modes.
421 */
422 if ((!armpmu->set_event_filter ||
423 armpmu->set_event_filter(hwc, &event->attr)) &&
424 event_requires_mode_exclusion(&event->attr)) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100425 pr_debug("ARM performance counters do not support "
426 "mode exclusion\n");
Will Deaconfdeb8e32012-07-04 18:15:42 +0100427 return -EOPNOTSUPP;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100428 }
429
430 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100431 * Store the event encoding into the config_base field.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100432 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100433 hwc->config_base |= (unsigned long)mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100434
Vince Weaveredcb4d32014-05-16 17:15:49 -0400435 if (!is_sampling_event(event)) {
Will Deacon57273472012-03-06 17:33:17 +0100436 /*
437 * For non-sampling runs, limit the sample_period to half
438 * of the counter width. That way, the new counter value
439 * is far less likely to overtake the previous one unless
440 * you have some serious IRQ latency issues.
441 */
442 hwc->sample_period = armpmu->max_period >> 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100443 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200444 local64_set(&hwc->period_left, hwc->sample_period);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100445 }
446
Jamie Iles1b8873a2010-02-02 20:25:44 +0100447 if (event->group_leader != event) {
Chen Gange595ede2013-02-28 17:51:29 +0100448 if (validate_group(event) != 0)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100449 return -EINVAL;
450 }
451
Mark Rutland9dcbf462013-01-18 16:10:06 +0000452 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100453}
454
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200455static int armpmu_event_init(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100456{
Mark Rutland8a16b342011-04-28 16:27:54 +0100457 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100458 int err = 0;
Mark Rutland03b78982011-04-27 11:20:11 +0100459 atomic_t *active_events = &armpmu->active_events;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100460
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100461 /*
462 * Reject CPU-affine events for CPUs that are of a different class to
463 * that which this PMU handles. Process-following events (where
464 * event->cpu == -1) can be migrated between CPUs, and thus we have to
465 * reject them later (in armpmu_add) if they're scheduled on a
466 * different class of CPU.
467 */
468 if (event->cpu != -1 &&
469 !cpumask_test_cpu(event->cpu, &armpmu->supported_cpus))
470 return -ENOENT;
471
Stephane Eranian2481c5f2012-02-09 23:20:59 +0100472 /* does not support taken branch sampling */
473 if (has_branch_stack(event))
474 return -EOPNOTSUPP;
475
Mark Rutlande1f431b2011-04-28 15:47:10 +0100476 if (armpmu->map_event(event) == -ENOENT)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200477 return -ENOENT;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200478
Jamie Iles1b8873a2010-02-02 20:25:44 +0100479 event->destroy = hw_perf_event_destroy;
480
Mark Rutland03b78982011-04-27 11:20:11 +0100481 if (!atomic_inc_not_zero(active_events)) {
482 mutex_lock(&armpmu->reserve_mutex);
483 if (atomic_read(active_events) == 0)
Mark Rutland8a16b342011-04-28 16:27:54 +0100484 err = armpmu_reserve_hardware(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100485
486 if (!err)
Mark Rutland03b78982011-04-27 11:20:11 +0100487 atomic_inc(active_events);
488 mutex_unlock(&armpmu->reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100489 }
490
491 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200492 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100493
494 err = __hw_perf_event_init(event);
495 if (err)
496 hw_perf_event_destroy(event);
497
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200498 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100499}
500
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200501static void armpmu_enable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100502{
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100503 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Mark Rutland11679252014-05-13 19:36:31 +0100504 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
Mark Rutland7325eae2011-08-23 11:59:49 +0100505 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100506
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100507 /* For task-bound events we may be called on other CPUs */
508 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
509 return;
510
Will Deaconf4f38432011-07-01 14:38:12 +0100511 if (enabled)
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100512 armpmu->start(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100513}
514
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200515static void armpmu_disable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100516{
Mark Rutland8a16b342011-04-28 16:27:54 +0100517 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Mark Rutlandcc88116d2015-05-13 17:12:25 +0100518
519 /* For task-bound events we may be called on other CPUs */
520 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
521 return;
522
Sudeep KarkadaNageshaed6f2a52012-07-30 12:00:02 +0100523 armpmu->stop(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100524}
525
Mark Rutlandc904e322015-05-13 17:12:26 +0100526/*
527 * In heterogeneous systems, events are specific to a particular
528 * microarchitecture, and aren't suitable for another. Thus, only match CPUs of
529 * the same microarchitecture.
530 */
531static int armpmu_filter_match(struct perf_event *event)
532{
533 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
534 unsigned int cpu = smp_processor_id();
535 return cpumask_test_cpu(cpu, &armpmu->supported_cpus);
536}
537
Mark Rutland48538b52016-09-09 14:08:30 +0100538static ssize_t armpmu_cpumask_show(struct device *dev,
539 struct device_attribute *attr, char *buf)
540{
541 struct arm_pmu *armpmu = to_arm_pmu(dev_get_drvdata(dev));
542 return cpumap_print_to_pagebuf(true, buf, &armpmu->supported_cpus);
543}
544
545static DEVICE_ATTR(cpus, S_IRUGO, armpmu_cpumask_show, NULL);
546
547static struct attribute *armpmu_common_attrs[] = {
548 &dev_attr_cpus.attr,
549 NULL,
550};
551
552static struct attribute_group armpmu_common_attr_group = {
553 .attrs = armpmu_common_attrs,
554};
555
Stephen Boyd44d6b1f2013-03-05 03:54:06 +0100556static void armpmu_init(struct arm_pmu *armpmu)
Mark Rutland03b78982011-04-27 11:20:11 +0100557{
558 atomic_set(&armpmu->active_events, 0);
559 mutex_init(&armpmu->reserve_mutex);
Mark Rutland8a16b342011-04-28 16:27:54 +0100560
561 armpmu->pmu = (struct pmu) {
562 .pmu_enable = armpmu_enable,
563 .pmu_disable = armpmu_disable,
564 .event_init = armpmu_event_init,
565 .add = armpmu_add,
566 .del = armpmu_del,
567 .start = armpmu_start,
568 .stop = armpmu_stop,
569 .read = armpmu_read,
Mark Rutlandc904e322015-05-13 17:12:26 +0100570 .filter_match = armpmu_filter_match,
Mark Rutland15896802016-09-09 14:08:29 +0100571 .attr_groups = armpmu->attr_groups,
Mark Rutland8a16b342011-04-28 16:27:54 +0100572 };
Mark Rutland48538b52016-09-09 14:08:30 +0100573 armpmu->attr_groups[ARMPMU_ATTR_GROUP_COMMON] =
574 &armpmu_common_attr_group;
Mark Rutland8a16b342011-04-28 16:27:54 +0100575}
576
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100577/* Set at runtime when we know what CPU type we are. */
578static struct arm_pmu *__oprofile_cpu_pmu;
579
580/*
581 * Despite the names, these two functions are CPU-specific and are used
582 * by the OProfile/perf code.
583 */
584const char *perf_pmu_name(void)
585{
586 if (!__oprofile_cpu_pmu)
587 return NULL;
588
589 return __oprofile_cpu_pmu->name;
590}
591EXPORT_SYMBOL_GPL(perf_pmu_name);
592
593int perf_num_counters(void)
594{
595 int max_events = 0;
596
597 if (__oprofile_cpu_pmu != NULL)
598 max_events = __oprofile_cpu_pmu->num_events;
599
600 return max_events;
601}
602EXPORT_SYMBOL_GPL(perf_num_counters);
603
604static void cpu_pmu_enable_percpu_irq(void *data)
605{
606 int irq = *(int *)data;
607
608 enable_percpu_irq(irq, IRQ_TYPE_NONE);
609}
610
611static void cpu_pmu_disable_percpu_irq(void *data)
612{
613 int irq = *(int *)data;
614
615 disable_percpu_irq(irq);
616}
617
618static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
619{
Mark Rutland7ed98e02017-03-10 10:46:14 +0000620 int cpu;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100621 struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
622
Mark Rutland7ed98e02017-03-10 10:46:14 +0000623 for_each_cpu(cpu, &cpu_pmu->supported_cpus) {
624 int irq = per_cpu(hw_events->irq, cpu);
625 if (!irq)
626 continue;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100627
Mark Rutland7ed98e02017-03-10 10:46:14 +0000628 if (irq_is_percpu(irq)) {
629 on_each_cpu_mask(&cpu_pmu->supported_cpus,
630 cpu_pmu_disable_percpu_irq, &irq, 1);
631 free_percpu_irq(irq, &hw_events->percpu_pmu);
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100632
Mark Rutland7ed98e02017-03-10 10:46:14 +0000633 break;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100634 }
Mark Rutland7ed98e02017-03-10 10:46:14 +0000635
636 if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs))
637 continue;
638
639 free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100640 }
641}
642
643static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
644{
Mark Rutland7ed98e02017-03-10 10:46:14 +0000645 int cpu, err;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100646 struct pmu_hw_events __percpu *hw_events = cpu_pmu->hw_events;
647
Mark Rutland7ed98e02017-03-10 10:46:14 +0000648 for_each_cpu(cpu, &cpu_pmu->supported_cpus) {
649 int irq = per_cpu(hw_events->irq, cpu);
650 if (!irq)
651 continue;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100652
Mark Rutland7ed98e02017-03-10 10:46:14 +0000653 if (irq_is_percpu(irq)) {
654 err = request_percpu_irq(irq, handler, "arm-pmu",
655 &hw_events->percpu_pmu);
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100656 if (err) {
657 pr_err("unable to request IRQ%d for ARM PMU counters\n",
658 irq);
659 return err;
660 }
661
Mark Rutland7ed98e02017-03-10 10:46:14 +0000662 on_each_cpu_mask(&cpu_pmu->supported_cpus,
663 cpu_pmu_enable_percpu_irq, &irq, 1);
664
665 break;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100666 }
Mark Rutland7ed98e02017-03-10 10:46:14 +0000667
668 /*
669 * If we have a single PMU interrupt that we can't shift,
670 * assume that we're running on a uniprocessor machine and
671 * continue. Otherwise, continue without this interrupt.
672 */
673 if (irq_set_affinity(irq, cpumask_of(cpu)) &&
674 num_possible_cpus() > 1) {
675 pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n",
676 irq, cpu);
677 continue;
678 }
679
680 err = request_irq(irq, handler,
681 IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu",
682 per_cpu_ptr(&hw_events->percpu_pmu, cpu));
683 if (err) {
684 pr_err("unable to request IRQ%d for ARM PMU counters\n",
685 irq);
686 return err;
687 }
688
689 cpumask_set_cpu(cpu, &cpu_pmu->active_irqs);
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100690 }
691
692 return 0;
693}
694
695/*
696 * PMU hardware loses all context when a CPU goes offline.
697 * When a CPU is hotplugged back in, since some hardware registers are
698 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
699 * junk values out of them.
700 */
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200701static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100702{
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200703 struct arm_pmu *pmu = hlist_entry_safe(node, struct arm_pmu, node);
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100704
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200705 if (!cpumask_test_cpu(cpu, &pmu->supported_cpus))
706 return 0;
707 if (pmu->reset)
708 pmu->reset(pmu);
Thomas Gleixner7d88eb62016-07-13 17:16:36 +0000709 return 0;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100710}
711
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000712#ifdef CONFIG_CPU_PM
713static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
714{
715 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
716 struct perf_event *event;
717 int idx;
718
719 for (idx = 0; idx < armpmu->num_events; idx++) {
720 /*
721 * If the counter is not used skip it, there is no
722 * need of stopping/restarting it.
723 */
724 if (!test_bit(idx, hw_events->used_mask))
725 continue;
726
727 event = hw_events->events[idx];
728
729 switch (cmd) {
730 case CPU_PM_ENTER:
731 /*
732 * Stop and update the counter
733 */
734 armpmu_stop(event, PERF_EF_UPDATE);
735 break;
736 case CPU_PM_EXIT:
737 case CPU_PM_ENTER_FAILED:
Lorenzo Pieralisicbcc72e2016-04-21 10:24:34 +0100738 /*
739 * Restore and enable the counter.
740 * armpmu_start() indirectly calls
741 *
742 * perf_event_update_userpage()
743 *
744 * that requires RCU read locking to be functional,
745 * wrap the call within RCU_NONIDLE to make the
746 * RCU subsystem aware this cpu is not idle from
747 * an RCU perspective for the armpmu_start() call
748 * duration.
749 */
750 RCU_NONIDLE(armpmu_start(event, PERF_EF_RELOAD));
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000751 break;
752 default:
753 break;
754 }
755 }
756}
757
758static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
759 void *v)
760{
761 struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
762 struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
763 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
764
765 if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
766 return NOTIFY_DONE;
767
768 /*
769 * Always reset the PMU registers on power-up even if
770 * there are no events running.
771 */
772 if (cmd == CPU_PM_EXIT && armpmu->reset)
773 armpmu->reset(armpmu);
774
775 if (!enabled)
776 return NOTIFY_OK;
777
778 switch (cmd) {
779 case CPU_PM_ENTER:
780 armpmu->stop(armpmu);
781 cpu_pm_pmu_setup(armpmu, cmd);
782 break;
783 case CPU_PM_EXIT:
784 cpu_pm_pmu_setup(armpmu, cmd);
785 case CPU_PM_ENTER_FAILED:
786 armpmu->start(armpmu);
787 break;
788 default:
789 return NOTIFY_DONE;
790 }
791
792 return NOTIFY_OK;
793}
794
795static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
796{
797 cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
798 return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
799}
800
801static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
802{
803 cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
804}
805#else
806static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
807static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
808#endif
809
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100810static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
811{
812 int err;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100813
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200814 err = cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
815 &cpu_pmu->node);
816 if (err)
Mark Rutland2681f012017-03-10 10:46:13 +0000817 goto out;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100818
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000819 err = cpu_pm_pmu_register(cpu_pmu);
820 if (err)
821 goto out_unregister;
822
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100823 cpu_pmu->request_irq = cpu_pmu_request_irq;
824 cpu_pmu->free_irq = cpu_pmu_free_irq;
825
826 /* Ensure the PMU has sane values out of reset. */
827 if (cpu_pmu->reset)
828 on_each_cpu_mask(&cpu_pmu->supported_cpus, cpu_pmu->reset,
829 cpu_pmu, 1);
830
Mark Rutland5101ef22016-04-26 11:33:46 +0100831 /*
832 * This is a CPU PMU potentially in a heterogeneous configuration (e.g.
833 * big.LITTLE). This is not an uncore PMU, and we have taken ctx
834 * sharing into account (e.g. with our pmu::filter_match callback and
835 * pmu::event_init group validation).
836 */
837 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_HETEROGENEOUS_CPUS;
838
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100839 return 0;
840
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000841out_unregister:
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200842 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
843 &cpu_pmu->node);
Mark Rutland2681f012017-03-10 10:46:13 +0000844out:
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100845 return err;
846}
847
848static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
849{
Lorenzo Pieralisida4e4f12016-02-23 18:22:39 +0000850 cpu_pm_pmu_unregister(cpu_pmu);
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +0200851 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_STARTING,
852 &cpu_pmu->node);
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100853}
854
855/*
856 * CPU PMU identification and probing.
857 */
858static int probe_current_pmu(struct arm_pmu *pmu,
859 const struct pmu_probe_info *info)
860{
861 int cpu = get_cpu();
862 unsigned int cpuid = read_cpuid_id();
863 int ret = -ENODEV;
864
865 pr_info("probing PMU on CPU %d\n", cpu);
866
867 for (; info->init != NULL; info++) {
868 if ((cpuid & info->mask) != info->cpuid)
869 continue;
870 ret = info->init(pmu);
871 break;
872 }
873
874 put_cpu();
875 return ret;
876}
877
Mark Rutland7ed98e02017-03-10 10:46:14 +0000878static int pmu_parse_percpu_irq(struct arm_pmu *pmu, int irq)
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100879{
Mark Rutland7ed98e02017-03-10 10:46:14 +0000880 int cpu, ret;
881 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100882
Mark Rutland7ed98e02017-03-10 10:46:14 +0000883 ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
884 if (ret)
885 return ret;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100886
Mark Rutland7ed98e02017-03-10 10:46:14 +0000887 for_each_cpu(cpu, &pmu->supported_cpus)
888 per_cpu(hw_events->irq, cpu) = irq;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100889
Mark Rutland7ed98e02017-03-10 10:46:14 +0000890 return 0;
891}
Will Deaconb6c084d2015-06-29 13:59:01 +0100892
Mark Rutland7ed98e02017-03-10 10:46:14 +0000893static bool pmu_has_irq_affinity(struct device_node *node)
894{
895 return !!of_find_property(node, "interrupt-affinity", NULL);
896}
Will Deaconb6c084d2015-06-29 13:59:01 +0100897
Mark Rutland7ed98e02017-03-10 10:46:14 +0000898static int pmu_parse_irq_affinity(struct device_node *node, int i)
899{
900 struct device_node *dn;
901 int cpu;
Will Deaconb6c084d2015-06-29 13:59:01 +0100902
Mark Rutland7ed98e02017-03-10 10:46:14 +0000903 /*
904 * If we don't have an interrupt-affinity property, we guess irq
905 * affinity matches our logical CPU order, as we used to assume.
906 * This is fragile, so we'll warn in pmu_parse_irqs().
907 */
908 if (!pmu_has_irq_affinity(node))
909 return i;
Mark Rutland74cf0bc2015-05-26 17:23:39 +0100910
Mark Rutland7ed98e02017-03-10 10:46:14 +0000911 dn = of_parse_phandle(node, "interrupt-affinity", i);
912 if (!dn) {
913 pr_warn("failed to parse interrupt-affinity[%d] for %s\n",
914 i, node->name);
915 return -EINVAL;
Marc Zyngier19a469a2016-07-08 15:56:04 +0100916 }
Will Deaconb6c084d2015-06-29 13:59:01 +0100917
Mark Rutland7ed98e02017-03-10 10:46:14 +0000918 /* Now look up the logical CPU number */
919 for_each_possible_cpu(cpu) {
920 struct device_node *cpu_dn;
921
922 cpu_dn = of_cpu_device_node_get(cpu);
923 of_node_put(cpu_dn);
924
925 if (dn == cpu_dn)
926 break;
927 }
928
929 if (cpu >= nr_cpu_ids) {
930 pr_warn("failed to find logical CPU for %s\n", dn->name);
931 }
932
933 of_node_put(dn);
934
935 return cpu;
936}
937
938static int pmu_parse_irqs(struct arm_pmu *pmu)
939{
940 int i = 0, irqs;
941 struct platform_device *pdev = pmu->plat_device;
942 struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
943
944 irqs = platform_irq_count(pdev);
945 if (irqs < 0) {
946 pr_err("unable to count PMU IRQs\n");
947 return irqs;
948 }
949
950 /*
951 * In this case we have no idea which CPUs are covered by the PMU.
952 * To match our prior behaviour, we assume all CPUs in this case.
953 */
954 if (irqs == 0) {
955 pr_warn("no irqs for PMU, sampling events not supported\n");
956 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
957 cpumask_setall(&pmu->supported_cpus);
958 return 0;
959 }
960
961 if (irqs == 1) {
962 int irq = platform_get_irq(pdev, 0);
963 if (irq && irq_is_percpu(irq))
964 return pmu_parse_percpu_irq(pmu, irq);
965 }
966
967 if (!pmu_has_irq_affinity(pdev->dev.of_node)) {
968 pr_warn("no interrupt-affinity property for %s, guessing.\n",
969 of_node_full_name(pdev->dev.of_node));
970 }
971
972 /*
973 * Some platforms have all PMU IRQs OR'd into a single IRQ, with a
974 * special platdata function that attempts to demux them.
975 */
976 if (dev_get_platdata(&pdev->dev))
977 cpumask_setall(&pmu->supported_cpus);
978
979 for (i = 0; i < irqs; i++) {
980 int cpu, irq;
981
982 irq = platform_get_irq(pdev, i);
983 if (WARN_ON(irq <= 0))
984 continue;
985
986 if (irq_is_percpu(irq)) {
987 pr_warn("multiple PPIs or mismatched SPI/PPI detected\n");
988 return -EINVAL;
989 }
990
991 cpu = pmu_parse_irq_affinity(pdev->dev.of_node, i);
992 if (cpu < 0)
993 return cpu;
994 if (cpu >= nr_cpu_ids)
995 continue;
996
997 if (per_cpu(hw_events->irq, cpu)) {
998 pr_warn("multiple PMU IRQs for the same CPU detected\n");
999 return -EINVAL;
1000 }
1001
1002 per_cpu(hw_events->irq, cpu) = irq;
1003 cpumask_set_cpu(cpu, &pmu->supported_cpus);
1004 }
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001005
1006 return 0;
1007}
1008
Mark Rutland2681f012017-03-10 10:46:13 +00001009static struct arm_pmu *armpmu_alloc(void)
1010{
1011 struct arm_pmu *pmu;
1012 int cpu;
1013
1014 pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
1015 if (!pmu) {
1016 pr_info("failed to allocate PMU device!\n");
1017 goto out;
1018 }
1019
1020 pmu->hw_events = alloc_percpu(struct pmu_hw_events);
1021 if (!pmu->hw_events) {
1022 pr_info("failed to allocate per-cpu PMU data.\n");
1023 goto out_free_pmu;
1024 }
1025
1026 for_each_possible_cpu(cpu) {
1027 struct pmu_hw_events *events;
1028
1029 events = per_cpu_ptr(pmu->hw_events, cpu);
1030 raw_spin_lock_init(&events->pmu_lock);
1031 events->percpu_pmu = pmu;
1032 }
1033
1034 return pmu;
1035
1036out_free_pmu:
1037 kfree(pmu);
1038out:
1039 return NULL;
1040}
1041
1042static void armpmu_free(struct arm_pmu *pmu)
1043{
1044 free_percpu(pmu->hw_events);
1045 kfree(pmu);
1046}
1047
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001048int arm_pmu_device_probe(struct platform_device *pdev,
1049 const struct of_device_id *of_table,
1050 const struct pmu_probe_info *probe_table)
1051{
1052 const struct of_device_id *of_id;
1053 const int (*init_fn)(struct arm_pmu *);
1054 struct device_node *node = pdev->dev.of_node;
1055 struct arm_pmu *pmu;
1056 int ret = -ENODEV;
1057
Mark Rutland2681f012017-03-10 10:46:13 +00001058 pmu = armpmu_alloc();
1059 if (!pmu)
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001060 return -ENOMEM;
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001061
Mark Rutlandb916b782015-10-28 12:32:17 +00001062 armpmu_init(pmu);
1063
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001064 pmu->plat_device = pdev;
1065
Mark Rutland7ed98e02017-03-10 10:46:14 +00001066 ret = pmu_parse_irqs(pmu);
1067 if (ret)
1068 goto out_free;
1069
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001070 if (node && (of_id = of_match_node(of_table, pdev->dev.of_node))) {
1071 init_fn = of_id->data;
1072
Martin Fuzzey8d1a0ae2016-01-13 23:36:26 -05001073 pmu->secure_access = of_property_read_bool(pdev->dev.of_node,
1074 "secure-reg-access");
1075
1076 /* arm64 systems boot only as non-secure */
1077 if (IS_ENABLED(CONFIG_ARM64) && pmu->secure_access) {
1078 pr_warn("ignoring \"secure-reg-access\" property for arm64\n");
1079 pmu->secure_access = false;
1080 }
1081
Mark Rutland7ed98e02017-03-10 10:46:14 +00001082 ret = init_fn(pmu);
Mark Salterdbee3a72016-09-14 17:32:29 -05001083 } else if (probe_table) {
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001084 cpumask_setall(&pmu->supported_cpus);
Mark Salterf7a6c142016-06-07 11:32:21 -05001085 ret = probe_current_pmu(pmu, probe_table);
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001086 }
1087
1088 if (ret) {
Will Deacon357b5652016-03-21 11:07:15 +00001089 pr_info("%s: failed to probe PMU!\n", of_node_full_name(node));
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001090 goto out_free;
1091 }
1092
Mark Rutland86cdd722016-09-09 14:08:26 +01001093
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001094 ret = cpu_pmu_init(pmu);
1095 if (ret)
1096 goto out_free;
1097
Mark Rutlandb916b782015-10-28 12:32:17 +00001098 ret = perf_pmu_register(&pmu->pmu, pmu->name, -1);
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001099 if (ret)
1100 goto out_destroy;
1101
Julien Grall0f254c72016-05-31 12:41:22 +01001102 if (!__oprofile_cpu_pmu)
1103 __oprofile_cpu_pmu = pmu;
1104
Mark Rutlandb916b782015-10-28 12:32:17 +00001105 pr_info("enabled with %s PMU driver, %d counters available\n",
1106 pmu->name, pmu->num_events);
1107
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001108 return 0;
1109
1110out_destroy:
1111 cpu_pmu_destroy(pmu);
1112out_free:
Will Deacon357b5652016-03-21 11:07:15 +00001113 pr_info("%s: failed to register PMU devices!\n",
1114 of_node_full_name(node));
Mark Rutland2681f012017-03-10 10:46:13 +00001115 armpmu_free(pmu);
Mark Rutland74cf0bc2015-05-26 17:23:39 +01001116 return ret;
1117}
Sebastian Andrzej Siewior37b502f2016-07-20 09:51:11 +02001118
1119static int arm_pmu_hp_init(void)
1120{
1121 int ret;
1122
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +02001123 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +01001124 "perf/arm/pmu:starting",
Sebastian Andrzej Siewior6e103c02016-08-17 19:14:20 +02001125 arm_perf_starting_cpu, NULL);
Sebastian Andrzej Siewior37b502f2016-07-20 09:51:11 +02001126 if (ret)
1127 pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n",
1128 ret);
1129 return ret;
1130}
1131subsys_initcall(arm_pmu_hp_init);