Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * Copyright (C) 2007 MIPS Technologies, Inc. |
| 7 | * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> |
| 8 | */ |
| 9 | #include <linux/clockchips.h> |
| 10 | #include <linux/interrupt.h> |
| 11 | #include <linux/percpu.h> |
Ralf Baechle | 631330f | 2009-06-19 14:05:26 +0100 | [diff] [blame] | 12 | #include <linux/smp.h> |
David Howells | ca4d3e67 | 2010-10-07 14:08:54 +0100 | [diff] [blame] | 13 | #include <linux/irq.h> |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 14 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 15 | #include <asm/time.h> |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 16 | #include <asm/cevt-r4k.h> |
| 17 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 18 | static int mips_next_event(unsigned long delta, |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 19 | struct clock_event_device *evt) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 20 | { |
| 21 | unsigned int cnt; |
| 22 | int res; |
| 23 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 24 | cnt = read_c0_count(); |
| 25 | cnt += delta; |
| 26 | write_c0_compare(cnt); |
Kevin Cernekee | 5878fc9 | 2010-11-23 10:26:44 -0800 | [diff] [blame] | 27 | res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 28 | return res; |
| 29 | } |
| 30 | |
James Hogan | 1fa4055 | 2016-04-22 18:19:17 +0100 | [diff] [blame] | 31 | /** |
| 32 | * calculate_min_delta() - Calculate a good minimum delta for mips_next_event(). |
| 33 | * |
| 34 | * Running under virtualisation can introduce overhead into mips_next_event() in |
| 35 | * the form of hypervisor emulation of CP0_Count/CP0_Compare registers, |
| 36 | * potentially with an unnatural frequency, which makes a fixed min_delta_ns |
| 37 | * value inappropriate as it may be too small. |
| 38 | * |
| 39 | * It can also introduce occasional latency from the guest being descheduled. |
| 40 | * |
| 41 | * This function calculates a good minimum delta based roughly on the 75th |
| 42 | * percentile of the time taken to do the mips_next_event() sequence, in order |
| 43 | * to handle potentially higher overhead while also eliminating outliers due to |
| 44 | * unpredictable hypervisor latency (which can be handled by retries). |
| 45 | * |
| 46 | * Return: An appropriate minimum delta for the clock event device. |
| 47 | */ |
| 48 | static unsigned int calculate_min_delta(void) |
| 49 | { |
| 50 | unsigned int cnt, i, j, k, l; |
| 51 | unsigned int buf1[4], buf2[3]; |
| 52 | unsigned int min_delta; |
| 53 | |
| 54 | /* |
| 55 | * Calculate the median of 5 75th percentiles of 5 samples of how long |
| 56 | * it takes to set CP0_Compare = CP0_Count + delta. |
| 57 | */ |
| 58 | for (i = 0; i < 5; ++i) { |
| 59 | for (j = 0; j < 5; ++j) { |
| 60 | /* |
| 61 | * This is like the code in mips_next_event(), and |
| 62 | * directly measures the borderline "safe" delta. |
| 63 | */ |
| 64 | cnt = read_c0_count(); |
| 65 | write_c0_compare(cnt); |
| 66 | cnt = read_c0_count() - cnt; |
| 67 | |
| 68 | /* Sorted insert into buf1 */ |
| 69 | for (k = 0; k < j; ++k) { |
| 70 | if (cnt < buf1[k]) { |
| 71 | l = min_t(unsigned int, |
| 72 | j, ARRAY_SIZE(buf1) - 1); |
| 73 | for (; l > k; --l) |
| 74 | buf1[l] = buf1[l - 1]; |
| 75 | break; |
| 76 | } |
| 77 | } |
| 78 | if (k < ARRAY_SIZE(buf1)) |
| 79 | buf1[k] = cnt; |
| 80 | } |
| 81 | |
| 82 | /* Sorted insert of 75th percentile into buf2 */ |
James Hogan | 9d7f29c | 2017-04-05 16:32:45 +0100 | [diff] [blame] | 83 | for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) { |
James Hogan | 1fa4055 | 2016-04-22 18:19:17 +0100 | [diff] [blame] | 84 | if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) { |
| 85 | l = min_t(unsigned int, |
| 86 | i, ARRAY_SIZE(buf2) - 1); |
| 87 | for (; l > k; --l) |
| 88 | buf2[l] = buf2[l - 1]; |
| 89 | break; |
| 90 | } |
| 91 | } |
| 92 | if (k < ARRAY_SIZE(buf2)) |
| 93 | buf2[k] = buf1[ARRAY_SIZE(buf1) - 1]; |
| 94 | } |
| 95 | |
| 96 | /* Use 2 * median of 75th percentiles */ |
| 97 | min_delta = buf2[ARRAY_SIZE(buf2) - 1] * 2; |
| 98 | |
| 99 | /* Don't go too low */ |
| 100 | if (min_delta < 0x300) |
| 101 | min_delta = 0x300; |
| 102 | |
| 103 | pr_debug("%s: median 75th percentile=%#x, min_delta=%#x\n", |
| 104 | __func__, buf2[ARRAY_SIZE(buf2) - 1], min_delta); |
| 105 | return min_delta; |
| 106 | } |
| 107 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 108 | DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
| 109 | int cp0_timer_irq_installed; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 110 | |
James Hogan | 19971c0 | 2015-01-27 21:45:47 +0000 | [diff] [blame] | 111 | /* |
| 112 | * Possibly handle a performance counter interrupt. |
| 113 | * Return true if the timer interrupt should not be checked |
| 114 | */ |
| 115 | static inline int handle_perf_irq(int r2) |
| 116 | { |
| 117 | /* |
| 118 | * The performance counter overflow interrupt may be shared with the |
| 119 | * timer interrupt (cp0_perfcount_irq < 0). If it is and a |
| 120 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) |
| 121 | * and we can't reliably determine if a counter interrupt has also |
| 122 | * happened (!r2) then don't check for a timer interrupt. |
| 123 | */ |
| 124 | return (cp0_perfcount_irq < 0) && |
| 125 | perf_irq() == IRQ_HANDLED && |
| 126 | !r2; |
| 127 | } |
| 128 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 129 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 130 | { |
Leonid Yegoshin | 54dac95 | 2014-11-13 13:39:39 +0000 | [diff] [blame] | 131 | const int r2 = cpu_has_mips_r2_r6; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 132 | struct clock_event_device *cd; |
| 133 | int cpu = smp_processor_id(); |
| 134 | |
| 135 | /* |
| 136 | * Suckage alert: |
| 137 | * Before R2 of the architecture there was no way to see if a |
| 138 | * performance counter interrupt was pending, so we have to run |
| 139 | * the performance counter interrupt handler anyway. |
| 140 | */ |
| 141 | if (handle_perf_irq(r2)) |
Ralf Baechle | f0c5b89 | 2015-03-20 19:45:09 +0100 | [diff] [blame] | 142 | return IRQ_HANDLED; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 143 | |
| 144 | /* |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 145 | * The same applies to performance counter interrupts. But with the |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 146 | * above we now know that the reason we got here must be a timer |
| 147 | * interrupt. Being the paranoiacs we are we check anyway. |
| 148 | */ |
James Hogan | 3ba5040 | 2015-01-27 21:45:48 +0000 | [diff] [blame] | 149 | if (!r2 || (read_c0_cause() & CAUSEF_TI)) { |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 150 | /* Clear Count/Compare Interrupt */ |
| 151 | write_c0_compare(read_c0_compare()); |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 152 | cd = &per_cpu(mips_clockevent_device, cpu); |
| 153 | cd->event_handler(cd); |
Ralf Baechle | f0c5b89 | 2015-03-20 19:45:09 +0100 | [diff] [blame] | 154 | |
| 155 | return IRQ_HANDLED; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 156 | } |
| 157 | |
Ralf Baechle | f0c5b89 | 2015-03-20 19:45:09 +0100 | [diff] [blame] | 158 | return IRQ_NONE; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 159 | } |
| 160 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 161 | struct irqaction c0_compare_irqaction = { |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 162 | .handler = c0_compare_interrupt, |
James Hogan | 7dfe819 | 2015-01-27 21:45:52 +0000 | [diff] [blame] | 163 | /* |
| 164 | * IRQF_SHARED: The timer interrupt may be shared with other interrupts |
| 165 | * such as perf counter and FDC interrupts. |
| 166 | */ |
| 167 | .flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED, |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 168 | .name = "timer", |
| 169 | }; |
| 170 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 171 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 172 | void mips_event_handler(struct clock_event_device *dev) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 173 | { |
| 174 | } |
| 175 | |
| 176 | /* |
| 177 | * FIXME: This doesn't hold for the relocated E9000 compare interrupt. |
| 178 | */ |
| 179 | static int c0_compare_int_pending(void) |
| 180 | { |
James Hogan | ae58d88 | 2015-01-19 12:00:55 +0000 | [diff] [blame] | 181 | /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */ |
David VomLehn | 010c108 | 2009-12-21 17:49:22 -0800 | [diff] [blame] | 182 | return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 183 | } |
| 184 | |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 185 | /* |
| 186 | * Compare interrupt can be routed and latched outside the core, |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 187 | * so wait up to worst case number of cycle counter ticks for timer interrupt |
| 188 | * changes to propagate to the cause register. |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 189 | */ |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 190 | #define COMPARE_INT_SEEN_TICKS 50 |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 191 | |
| 192 | int c0_compare_int_usable(void) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 193 | { |
Atsushi Nemoto | 3a6c43a | 2007-10-23 21:55:42 +0900 | [diff] [blame] | 194 | unsigned int delta; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 195 | unsigned int cnt; |
| 196 | |
Sanjay Lal | 9843b03 | 2012-11-21 18:34:03 -0800 | [diff] [blame] | 197 | #ifdef CONFIG_KVM_GUEST |
| 198 | return 1; |
| 199 | #endif |
| 200 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 201 | /* |
Ralf Baechle | 7034228 | 2013-01-22 12:59:30 +0100 | [diff] [blame] | 202 | * IP7 already pending? Try to clear it by acking the timer. |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 203 | */ |
| 204 | if (c0_compare_int_pending()) { |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 205 | cnt = read_c0_count(); |
| 206 | write_c0_compare(cnt); |
| 207 | back_to_back_c0_hazard(); |
| 208 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) |
| 209 | if (!c0_compare_int_pending()) |
| 210 | break; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 211 | if (c0_compare_int_pending()) |
| 212 | return 0; |
| 213 | } |
| 214 | |
Atsushi Nemoto | 3a6c43a | 2007-10-23 21:55:42 +0900 | [diff] [blame] | 215 | for (delta = 0x10; delta <= 0x400000; delta <<= 1) { |
| 216 | cnt = read_c0_count(); |
| 217 | cnt += delta; |
| 218 | write_c0_compare(cnt); |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 219 | back_to_back_c0_hazard(); |
Atsushi Nemoto | 3a6c43a | 2007-10-23 21:55:42 +0900 | [diff] [blame] | 220 | if ((int)(read_c0_count() - cnt) < 0) |
| 221 | break; |
| 222 | /* increase delta if the timer was already expired */ |
| 223 | } |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 224 | |
Atsushi Nemoto | c637fec | 2007-10-23 21:51:19 +0900 | [diff] [blame] | 225 | while ((int)(read_c0_count() - cnt) <= 0) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 226 | ; /* Wait for expiry */ |
| 227 | |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 228 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) |
| 229 | if (c0_compare_int_pending()) |
| 230 | break; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 231 | if (!c0_compare_int_pending()) |
| 232 | return 0; |
Al Cooper | 4f1a1eb | 2011-11-08 09:59:01 -0500 | [diff] [blame] | 233 | cnt = read_c0_count(); |
| 234 | write_c0_compare(cnt); |
| 235 | back_to_back_c0_hazard(); |
| 236 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) |
| 237 | if (!c0_compare_int_pending()) |
| 238 | break; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 239 | if (c0_compare_int_pending()) |
| 240 | return 0; |
| 241 | |
| 242 | /* |
| 243 | * Feels like a real count / compare timer. |
| 244 | */ |
| 245 | return 1; |
| 246 | } |
| 247 | |
Bjorn Helgaas | ec0b9d3 | 2015-07-12 18:11:38 -0500 | [diff] [blame] | 248 | unsigned int __weak get_c0_compare_int(void) |
| 249 | { |
| 250 | return MIPS_CPU_IRQ_BASE + cp0_compare_irq; |
| 251 | } |
| 252 | |
Paul Gortmaker | 078a55f | 2013-06-18 13:38:59 +0000 | [diff] [blame] | 253 | int r4k_clockevent_init(void) |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 254 | { |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 255 | unsigned int cpu = smp_processor_id(); |
| 256 | struct clock_event_device *cd; |
James Hogan | 1fa4055 | 2016-04-22 18:19:17 +0100 | [diff] [blame] | 257 | unsigned int irq, min_delta; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 258 | |
Yoichi Yuasa | 22df3f5 | 2007-10-26 22:27:05 +0900 | [diff] [blame] | 259 | if (!cpu_has_counter || !mips_hpt_frequency) |
Ralf Baechle | 5aa85c9 | 2007-11-21 16:39:44 +0000 | [diff] [blame] | 260 | return -ENXIO; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 261 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 262 | if (!c0_compare_int_usable()) |
Ralf Baechle | 5aa85c9 | 2007-11-21 16:39:44 +0000 | [diff] [blame] | 263 | return -ENXIO; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 264 | |
Ralf Baechle | 38760d4 | 2007-10-29 14:23:43 +0000 | [diff] [blame] | 265 | /* |
| 266 | * With vectored interrupts things are getting platform specific. |
| 267 | * get_c0_compare_int is a hook to allow a platform to return the |
Bjorn Helgaas | ec0b9d3 | 2015-07-12 18:11:38 -0500 | [diff] [blame] | 268 | * interrupt number of its liking. |
Ralf Baechle | 38760d4 | 2007-10-29 14:23:43 +0000 | [diff] [blame] | 269 | */ |
Bjorn Helgaas | ec0b9d3 | 2015-07-12 18:11:38 -0500 | [diff] [blame] | 270 | irq = get_c0_compare_int(); |
Ralf Baechle | 38760d4 | 2007-10-29 14:23:43 +0000 | [diff] [blame] | 271 | |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 272 | cd = &per_cpu(mips_clockevent_device, cpu); |
| 273 | |
| 274 | cd->name = "MIPS"; |
Paul Burton | 5977d68 | 2014-02-14 09:20:15 +0000 | [diff] [blame] | 275 | cd->features = CLOCK_EVT_FEAT_ONESHOT | |
Paul Burton | d8107ef | 2014-04-15 12:05:24 +0100 | [diff] [blame] | 276 | CLOCK_EVT_FEAT_C3STOP | |
| 277 | CLOCK_EVT_FEAT_PERCPU; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 278 | |
James Hogan | 1fa4055 | 2016-04-22 18:19:17 +0100 | [diff] [blame] | 279 | min_delta = calculate_min_delta(); |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 280 | |
| 281 | cd->rating = 300; |
| 282 | cd->irq = irq; |
Rusty Russell | 320ab2b | 2008-12-13 21:20:26 +1030 | [diff] [blame] | 283 | cd->cpumask = cpumask_of(cpu); |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 284 | cd->set_next_event = mips_next_event; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 285 | cd->event_handler = mips_event_handler; |
| 286 | |
Huacai Chen | 6dabf2b | 2016-07-21 14:27:49 +0800 | [diff] [blame] | 287 | clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff); |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 288 | |
Ralf Baechle | aea6863 | 2007-10-30 02:21:08 +0000 | [diff] [blame] | 289 | if (cp0_timer_irq_installed) |
Ralf Baechle | 5aa85c9 | 2007-11-21 16:39:44 +0000 | [diff] [blame] | 290 | return 0; |
Ralf Baechle | 38760d4 | 2007-10-29 14:23:43 +0000 | [diff] [blame] | 291 | |
| 292 | cp0_timer_irq_installed = 1; |
| 293 | |
Ralf Baechle | 38760d4 | 2007-10-29 14:23:43 +0000 | [diff] [blame] | 294 | setup_irq(irq, &c0_compare_irqaction); |
Ralf Baechle | 5aa85c9 | 2007-11-21 16:39:44 +0000 | [diff] [blame] | 295 | |
| 296 | return 0; |
Ralf Baechle | 42f7754 | 2007-10-18 17:48:11 +0100 | [diff] [blame] | 297 | } |
Kevin D. Kissell | 8531a35 | 2008-09-09 21:48:52 +0200 | [diff] [blame] | 298 | |