blob: dd6a18bc10abd0c34a6717fe12bb7169a4589231 [file] [log] [blame]
Ralf Baechle42f77542007-10-18 17:48:11 +01001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007 MIPS Technologies, Inc.
7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8 */
9#include <linux/clockchips.h>
10#include <linux/interrupt.h>
11#include <linux/percpu.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010012#include <linux/smp.h>
David Howellsca4d3e672010-10-07 14:08:54 +010013#include <linux/irq.h>
Ralf Baechle42f77542007-10-18 17:48:11 +010014
Ralf Baechle42f77542007-10-18 17:48:11 +010015#include <asm/time.h>
Kevin D. Kissell8531a352008-09-09 21:48:52 +020016#include <asm/cevt-r4k.h>
17
Ralf Baechle42f77542007-10-18 17:48:11 +010018static int mips_next_event(unsigned long delta,
Ralf Baechle70342282013-01-22 12:59:30 +010019 struct clock_event_device *evt)
Ralf Baechle42f77542007-10-18 17:48:11 +010020{
21 unsigned int cnt;
22 int res;
23
Ralf Baechle42f77542007-10-18 17:48:11 +010024 cnt = read_c0_count();
25 cnt += delta;
26 write_c0_compare(cnt);
Kevin Cernekee5878fc92010-11-23 10:26:44 -080027 res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
Ralf Baechle42f77542007-10-18 17:48:11 +010028 return res;
29}
30
James Hogan1fa40552016-04-22 18:19:17 +010031/**
32 * calculate_min_delta() - Calculate a good minimum delta for mips_next_event().
33 *
34 * Running under virtualisation can introduce overhead into mips_next_event() in
35 * the form of hypervisor emulation of CP0_Count/CP0_Compare registers,
36 * potentially with an unnatural frequency, which makes a fixed min_delta_ns
37 * value inappropriate as it may be too small.
38 *
39 * It can also introduce occasional latency from the guest being descheduled.
40 *
41 * This function calculates a good minimum delta based roughly on the 75th
42 * percentile of the time taken to do the mips_next_event() sequence, in order
43 * to handle potentially higher overhead while also eliminating outliers due to
44 * unpredictable hypervisor latency (which can be handled by retries).
45 *
46 * Return: An appropriate minimum delta for the clock event device.
47 */
48static unsigned int calculate_min_delta(void)
49{
50 unsigned int cnt, i, j, k, l;
51 unsigned int buf1[4], buf2[3];
52 unsigned int min_delta;
53
54 /*
55 * Calculate the median of 5 75th percentiles of 5 samples of how long
56 * it takes to set CP0_Compare = CP0_Count + delta.
57 */
58 for (i = 0; i < 5; ++i) {
59 for (j = 0; j < 5; ++j) {
60 /*
61 * This is like the code in mips_next_event(), and
62 * directly measures the borderline "safe" delta.
63 */
64 cnt = read_c0_count();
65 write_c0_compare(cnt);
66 cnt = read_c0_count() - cnt;
67
68 /* Sorted insert into buf1 */
69 for (k = 0; k < j; ++k) {
70 if (cnt < buf1[k]) {
71 l = min_t(unsigned int,
72 j, ARRAY_SIZE(buf1) - 1);
73 for (; l > k; --l)
74 buf1[l] = buf1[l - 1];
75 break;
76 }
77 }
78 if (k < ARRAY_SIZE(buf1))
79 buf1[k] = cnt;
80 }
81
82 /* Sorted insert of 75th percentile into buf2 */
James Hogan9d7f29c2017-04-05 16:32:45 +010083 for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
James Hogan1fa40552016-04-22 18:19:17 +010084 if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
85 l = min_t(unsigned int,
86 i, ARRAY_SIZE(buf2) - 1);
87 for (; l > k; --l)
88 buf2[l] = buf2[l - 1];
89 break;
90 }
91 }
92 if (k < ARRAY_SIZE(buf2))
93 buf2[k] = buf1[ARRAY_SIZE(buf1) - 1];
94 }
95
96 /* Use 2 * median of 75th percentiles */
97 min_delta = buf2[ARRAY_SIZE(buf2) - 1] * 2;
98
99 /* Don't go too low */
100 if (min_delta < 0x300)
101 min_delta = 0x300;
102
103 pr_debug("%s: median 75th percentile=%#x, min_delta=%#x\n",
104 __func__, buf2[ARRAY_SIZE(buf2) - 1], min_delta);
105 return min_delta;
106}
107
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200108DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
109int cp0_timer_irq_installed;
Ralf Baechle42f77542007-10-18 17:48:11 +0100110
James Hogan19971c02015-01-27 21:45:47 +0000111/*
112 * Possibly handle a performance counter interrupt.
113 * Return true if the timer interrupt should not be checked
114 */
115static inline int handle_perf_irq(int r2)
116{
117 /*
118 * The performance counter overflow interrupt may be shared with the
119 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
120 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
121 * and we can't reliably determine if a counter interrupt has also
122 * happened (!r2) then don't check for a timer interrupt.
123 */
124 return (cp0_perfcount_irq < 0) &&
125 perf_irq() == IRQ_HANDLED &&
126 !r2;
127}
128
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200129irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
Ralf Baechle42f77542007-10-18 17:48:11 +0100130{
Leonid Yegoshin54dac952014-11-13 13:39:39 +0000131 const int r2 = cpu_has_mips_r2_r6;
Ralf Baechle42f77542007-10-18 17:48:11 +0100132 struct clock_event_device *cd;
133 int cpu = smp_processor_id();
134
135 /*
136 * Suckage alert:
137 * Before R2 of the architecture there was no way to see if a
138 * performance counter interrupt was pending, so we have to run
139 * the performance counter interrupt handler anyway.
140 */
141 if (handle_perf_irq(r2))
Ralf Baechlef0c5b892015-03-20 19:45:09 +0100142 return IRQ_HANDLED;
Ralf Baechle42f77542007-10-18 17:48:11 +0100143
144 /*
Ralf Baechle70342282013-01-22 12:59:30 +0100145 * The same applies to performance counter interrupts. But with the
Ralf Baechle42f77542007-10-18 17:48:11 +0100146 * above we now know that the reason we got here must be a timer
147 * interrupt. Being the paranoiacs we are we check anyway.
148 */
James Hogan3ba50402015-01-27 21:45:48 +0000149 if (!r2 || (read_c0_cause() & CAUSEF_TI)) {
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200150 /* Clear Count/Compare Interrupt */
151 write_c0_compare(read_c0_compare());
Ralf Baechle42f77542007-10-18 17:48:11 +0100152 cd = &per_cpu(mips_clockevent_device, cpu);
153 cd->event_handler(cd);
Ralf Baechlef0c5b892015-03-20 19:45:09 +0100154
155 return IRQ_HANDLED;
Ralf Baechle42f77542007-10-18 17:48:11 +0100156 }
157
Ralf Baechlef0c5b892015-03-20 19:45:09 +0100158 return IRQ_NONE;
Ralf Baechle42f77542007-10-18 17:48:11 +0100159}
160
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200161struct irqaction c0_compare_irqaction = {
Ralf Baechle42f77542007-10-18 17:48:11 +0100162 .handler = c0_compare_interrupt,
James Hogan7dfe8192015-01-27 21:45:52 +0000163 /*
164 * IRQF_SHARED: The timer interrupt may be shared with other interrupts
165 * such as perf counter and FDC interrupts.
166 */
167 .flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED,
Ralf Baechle42f77542007-10-18 17:48:11 +0100168 .name = "timer",
169};
170
Ralf Baechle42f77542007-10-18 17:48:11 +0100171
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200172void mips_event_handler(struct clock_event_device *dev)
Ralf Baechle42f77542007-10-18 17:48:11 +0100173{
174}
175
176/*
177 * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
178 */
179static int c0_compare_int_pending(void)
180{
James Hoganae58d882015-01-19 12:00:55 +0000181 /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
David VomLehn010c1082009-12-21 17:49:22 -0800182 return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
Ralf Baechle42f77542007-10-18 17:48:11 +0100183}
184
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200185/*
186 * Compare interrupt can be routed and latched outside the core,
Al Cooper4f1a1eb2011-11-08 09:59:01 -0500187 * so wait up to worst case number of cycle counter ticks for timer interrupt
188 * changes to propagate to the cause register.
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200189 */
Al Cooper4f1a1eb2011-11-08 09:59:01 -0500190#define COMPARE_INT_SEEN_TICKS 50
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200191
192int c0_compare_int_usable(void)
Ralf Baechle42f77542007-10-18 17:48:11 +0100193{
Atsushi Nemoto3a6c43a2007-10-23 21:55:42 +0900194 unsigned int delta;
Ralf Baechle42f77542007-10-18 17:48:11 +0100195 unsigned int cnt;
196
Sanjay Lal9843b032012-11-21 18:34:03 -0800197#ifdef CONFIG_KVM_GUEST
198 return 1;
199#endif
200
Ralf Baechle42f77542007-10-18 17:48:11 +0100201 /*
Ralf Baechle70342282013-01-22 12:59:30 +0100202 * IP7 already pending? Try to clear it by acking the timer.
Ralf Baechle42f77542007-10-18 17:48:11 +0100203 */
204 if (c0_compare_int_pending()) {
Al Cooper4f1a1eb2011-11-08 09:59:01 -0500205 cnt = read_c0_count();
206 write_c0_compare(cnt);
207 back_to_back_c0_hazard();
208 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
209 if (!c0_compare_int_pending())
210 break;
Ralf Baechle42f77542007-10-18 17:48:11 +0100211 if (c0_compare_int_pending())
212 return 0;
213 }
214
Atsushi Nemoto3a6c43a2007-10-23 21:55:42 +0900215 for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
216 cnt = read_c0_count();
217 cnt += delta;
218 write_c0_compare(cnt);
Al Cooper4f1a1eb2011-11-08 09:59:01 -0500219 back_to_back_c0_hazard();
Atsushi Nemoto3a6c43a2007-10-23 21:55:42 +0900220 if ((int)(read_c0_count() - cnt) < 0)
221 break;
222 /* increase delta if the timer was already expired */
223 }
Ralf Baechle42f77542007-10-18 17:48:11 +0100224
Atsushi Nemotoc637fec2007-10-23 21:51:19 +0900225 while ((int)(read_c0_count() - cnt) <= 0)
Ralf Baechle42f77542007-10-18 17:48:11 +0100226 ; /* Wait for expiry */
227
Al Cooper4f1a1eb2011-11-08 09:59:01 -0500228 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
229 if (c0_compare_int_pending())
230 break;
Ralf Baechle42f77542007-10-18 17:48:11 +0100231 if (!c0_compare_int_pending())
232 return 0;
Al Cooper4f1a1eb2011-11-08 09:59:01 -0500233 cnt = read_c0_count();
234 write_c0_compare(cnt);
235 back_to_back_c0_hazard();
236 while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
237 if (!c0_compare_int_pending())
238 break;
Ralf Baechle42f77542007-10-18 17:48:11 +0100239 if (c0_compare_int_pending())
240 return 0;
241
242 /*
243 * Feels like a real count / compare timer.
244 */
245 return 1;
246}
247
Bjorn Helgaasec0b9d32015-07-12 18:11:38 -0500248unsigned int __weak get_c0_compare_int(void)
249{
250 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
251}
252
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000253int r4k_clockevent_init(void)
Ralf Baechle42f77542007-10-18 17:48:11 +0100254{
Ralf Baechle42f77542007-10-18 17:48:11 +0100255 unsigned int cpu = smp_processor_id();
256 struct clock_event_device *cd;
James Hogan1fa40552016-04-22 18:19:17 +0100257 unsigned int irq, min_delta;
Ralf Baechle42f77542007-10-18 17:48:11 +0100258
Yoichi Yuasa22df3f52007-10-26 22:27:05 +0900259 if (!cpu_has_counter || !mips_hpt_frequency)
Ralf Baechle5aa85c92007-11-21 16:39:44 +0000260 return -ENXIO;
Ralf Baechle42f77542007-10-18 17:48:11 +0100261
Ralf Baechle42f77542007-10-18 17:48:11 +0100262 if (!c0_compare_int_usable())
Ralf Baechle5aa85c92007-11-21 16:39:44 +0000263 return -ENXIO;
Ralf Baechle42f77542007-10-18 17:48:11 +0100264
Ralf Baechle38760d42007-10-29 14:23:43 +0000265 /*
266 * With vectored interrupts things are getting platform specific.
267 * get_c0_compare_int is a hook to allow a platform to return the
Bjorn Helgaasec0b9d32015-07-12 18:11:38 -0500268 * interrupt number of its liking.
Ralf Baechle38760d42007-10-29 14:23:43 +0000269 */
Bjorn Helgaasec0b9d32015-07-12 18:11:38 -0500270 irq = get_c0_compare_int();
Ralf Baechle38760d42007-10-29 14:23:43 +0000271
Ralf Baechle42f77542007-10-18 17:48:11 +0100272 cd = &per_cpu(mips_clockevent_device, cpu);
273
274 cd->name = "MIPS";
Paul Burton5977d682014-02-14 09:20:15 +0000275 cd->features = CLOCK_EVT_FEAT_ONESHOT |
Paul Burtond8107ef2014-04-15 12:05:24 +0100276 CLOCK_EVT_FEAT_C3STOP |
277 CLOCK_EVT_FEAT_PERCPU;
Ralf Baechle42f77542007-10-18 17:48:11 +0100278
James Hogan1fa40552016-04-22 18:19:17 +0100279 min_delta = calculate_min_delta();
Ralf Baechle42f77542007-10-18 17:48:11 +0100280
281 cd->rating = 300;
282 cd->irq = irq;
Rusty Russell320ab2b2008-12-13 21:20:26 +1030283 cd->cpumask = cpumask_of(cpu);
Ralf Baechle42f77542007-10-18 17:48:11 +0100284 cd->set_next_event = mips_next_event;
Ralf Baechle42f77542007-10-18 17:48:11 +0100285 cd->event_handler = mips_event_handler;
286
Huacai Chen6dabf2b2016-07-21 14:27:49 +0800287 clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
Ralf Baechle42f77542007-10-18 17:48:11 +0100288
Ralf Baechleaea68632007-10-30 02:21:08 +0000289 if (cp0_timer_irq_installed)
Ralf Baechle5aa85c92007-11-21 16:39:44 +0000290 return 0;
Ralf Baechle38760d42007-10-29 14:23:43 +0000291
292 cp0_timer_irq_installed = 1;
293
Ralf Baechle38760d42007-10-29 14:23:43 +0000294 setup_irq(irq, &c0_compare_irqaction);
Ralf Baechle5aa85c92007-11-21 16:39:44 +0000295
296 return 0;
Ralf Baechle42f77542007-10-18 17:48:11 +0100297}
Kevin D. Kissell8531a352008-09-09 21:48:52 +0200298