blob: 848353a27ac82b3c48e4b65e4870c5818e1e504d [file] [log] [blame]
Vineet Guptad8005e62013-01-18 15:12:18 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: Jan 1011
9 * -sched_clock( ) no longer jiffies based. Uses the same clocksource
10 * as gtod
11 *
12 * Rajeshwarr/Vineetg: Mar 2008
13 * -Implemented CONFIG_GENERIC_TIME (rather deleted arch specific code)
14 * for arch independent gettimeofday()
15 * -Implemented CONFIG_GENERIC_CLOCKEVENTS as base for hrtimers
16 *
17 * Vineetg: Mar 2008: Forked off from time.c which now is time-jiff.c
18 */
19
20/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1
21 * Each can programmed to go from @count to @limit and optionally
22 * interrupt when that happens.
23 * A write to Control Register clears the Interrupt
24 *
25 * We've designated TIMER0 for events (clockevents)
26 * while TIMER1 for free running (clocksource)
27 *
28 * Newer ARC700 cores have 64bit clk fetching RTSC insn, preferred over TIMER1
Vineet Gupta565a9b42015-03-07 17:06:09 +053029 * which however is currently broken
Vineet Guptad8005e62013-01-18 15:12:18 +053030 */
31
Vineet Guptad8005e62013-01-18 15:12:18 +053032#include <linux/interrupt.h>
33#include <linux/module.h>
Vineet Guptad8005e62013-01-18 15:12:18 +053034#include <linux/kernel.h>
Vineet Guptad8005e62013-01-18 15:12:18 +053035#include <linux/time.h>
36#include <linux/init.h>
Noam Camus69fbd092016-01-14 12:20:08 +053037#include <linux/clk.h>
38#include <linux/clk-provider.h>
Vineet Guptad8005e62013-01-18 15:12:18 +053039#include <linux/clocksource.h>
40#include <linux/clockchips.h>
Noam Camuseec3c582016-01-01 15:48:49 +053041#include <linux/cpu.h>
Vineet Guptad8005e62013-01-18 15:12:18 +053042#include <asm/irq.h>
43#include <asm/arcregs.h>
44#include <asm/clk.h>
Vineet Gupta03a6d282013-01-18 15:12:26 +053045#include <asm/mach_desc.h>
Vineet Guptad8005e62013-01-18 15:12:18 +053046
Vineet Gupta72d72882014-12-24 18:41:55 +053047#include <asm/mcip.h>
48
Vineet Guptada1677b2013-05-14 13:28:17 +053049/* Timer related Aux registers */
50#define ARC_REG_TIMER0_LIMIT 0x23 /* timer 0 limit */
51#define ARC_REG_TIMER0_CTRL 0x22 /* timer 0 control */
52#define ARC_REG_TIMER0_CNT 0x21 /* timer 0 count */
53#define ARC_REG_TIMER1_LIMIT 0x102 /* timer 1 limit */
54#define ARC_REG_TIMER1_CTRL 0x101 /* timer 1 control */
55#define ARC_REG_TIMER1_CNT 0x100 /* timer 1 count */
56
Adam Buchbinder7423cc02016-02-23 15:24:55 -080057#define TIMER_CTRL_IE (1 << 0) /* Interrupt when Count reaches limit */
58#define TIMER_CTRL_NH (1 << 1) /* Count only when CPU NOT halted */
Vineet Guptada1677b2013-05-14 13:28:17 +053059
Vineet Guptad8005e62013-01-18 15:12:18 +053060#define ARC_TIMER_MAX 0xFFFFFFFF
61
62/********** Clock Source Device *********/
63
Vineet Guptad584f0f2016-01-22 14:27:50 +053064#ifdef CONFIG_ARC_HAS_GFRC
Vineet Gupta72d72882014-12-24 18:41:55 +053065
66static int arc_counter_setup(void)
67{
68 return 1;
69}
70
71static cycle_t arc_counter_read(struct clocksource *cs)
72{
73 unsigned long flags;
74 union {
75#ifdef CONFIG_CPU_BIG_ENDIAN
76 struct { u32 h, l; };
77#else
78 struct { u32 l, h; };
79#endif
80 cycle_t full;
81 } stamp;
82
83 local_irq_save(flags);
84
Vineet Guptad584f0f2016-01-22 14:27:50 +053085 __mcip_cmd(CMD_GFRC_READ_LO, 0);
Vineet Gupta72d72882014-12-24 18:41:55 +053086 stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
87
Vineet Guptad584f0f2016-01-22 14:27:50 +053088 __mcip_cmd(CMD_GFRC_READ_HI, 0);
Vineet Gupta72d72882014-12-24 18:41:55 +053089 stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
90
91 local_irq_restore(flags);
92
93 return stamp.full;
94}
95
96static struct clocksource arc_counter = {
Vineet Guptad584f0f2016-01-22 14:27:50 +053097 .name = "ARConnect GFRC",
Vineet Gupta72d72882014-12-24 18:41:55 +053098 .rating = 400,
99 .read = arc_counter_read,
100 .mask = CLOCKSOURCE_MASK(64),
101 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
102};
103
104#else
105
Vineet Guptaaa93e8e2013-11-07 14:57:16 +0530106#ifdef CONFIG_ARC_HAS_RTC
107
108#define AUX_RTC_CTRL 0x103
109#define AUX_RTC_LOW 0x104
110#define AUX_RTC_HIGH 0x105
111
112int arc_counter_setup(void)
113{
114 write_aux_reg(AUX_RTC_CTRL, 1);
115
116 /* Not usable in SMP */
117 return !IS_ENABLED(CONFIG_SMP);
118}
119
120static cycle_t arc_counter_read(struct clocksource *cs)
121{
122 unsigned long status;
123 union {
124#ifdef CONFIG_CPU_BIG_ENDIAN
125 struct { u32 high, low; };
126#else
127 struct { u32 low, high; };
128#endif
129 cycle_t full;
130 } stamp;
131
132
133 __asm__ __volatile(
134 "1: \n"
135 " lr %0, [AUX_RTC_LOW] \n"
136 " lr %1, [AUX_RTC_HIGH] \n"
137 " lr %2, [AUX_RTC_CTRL] \n"
138 " bbit0.nt %2, 31, 1b \n"
139 : "=r" (stamp.low), "=r" (stamp.high), "=r" (status));
140
141 return stamp.full;
142}
143
144static struct clocksource arc_counter = {
145 .name = "ARCv2 RTC",
146 .rating = 350,
147 .read = arc_counter_read,
148 .mask = CLOCKSOURCE_MASK(64),
149 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
150};
151
152#else /* !CONFIG_ARC_HAS_RTC */
153
Vineet Guptad8005e62013-01-18 15:12:18 +0530154/*
155 * set 32bit TIMER1 to keep counting monotonically and wraparound
156 */
Paul Gortmakerce759952013-06-24 15:30:15 -0400157int arc_counter_setup(void)
Vineet Guptad8005e62013-01-18 15:12:18 +0530158{
159 write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMER_MAX);
160 write_aux_reg(ARC_REG_TIMER1_CNT, 0);
161 write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
162
Vineet Gupta5b9bd172015-03-07 16:59:38 +0530163 /* Not usable in SMP */
164 return !IS_ENABLED(CONFIG_SMP);
Vineet Guptad8005e62013-01-18 15:12:18 +0530165}
166
167static cycle_t arc_counter_read(struct clocksource *cs)
168{
169 return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT);
170}
171
172static struct clocksource arc_counter = {
173 .name = "ARC Timer1",
174 .rating = 300,
175 .read = arc_counter_read,
176 .mask = CLOCKSOURCE_MASK(32),
177 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
178};
179
Vineet Guptaaa93e8e2013-11-07 14:57:16 +0530180#endif
Vineet Gupta72d72882014-12-24 18:41:55 +0530181#endif
Vineet Guptaaa93e8e2013-11-07 14:57:16 +0530182
Vineet Guptad8005e62013-01-18 15:12:18 +0530183/********** Clock Event Device *********/
184
Noam Camuseec3c582016-01-01 15:48:49 +0530185static int arc_timer_irq = TIMER0_IRQ;
186
Vineet Guptad8005e62013-01-18 15:12:18 +0530187/*
Vineet Guptac9a98e182014-06-25 17:14:03 +0530188 * Arm the timer to interrupt after @cycles
Vineet Guptad8005e62013-01-18 15:12:18 +0530189 * The distinction for oneshot/periodic is done in arc_event_timer_ack() below
190 */
Vineet Guptac9a98e182014-06-25 17:14:03 +0530191static void arc_timer_event_setup(unsigned int cycles)
Vineet Guptad8005e62013-01-18 15:12:18 +0530192{
Vineet Guptac9a98e182014-06-25 17:14:03 +0530193 write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
Vineet Guptad8005e62013-01-18 15:12:18 +0530194 write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
195
196 write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
197}
198
Vineet Guptad8005e62013-01-18 15:12:18 +0530199
200static int arc_clkevent_set_next_event(unsigned long delta,
201 struct clock_event_device *dev)
202{
203 arc_timer_event_setup(delta);
204 return 0;
205}
206
Viresh Kumaraeec6cd2015-07-16 16:56:14 +0530207static int arc_clkevent_set_periodic(struct clock_event_device *dev)
Vineet Guptad8005e62013-01-18 15:12:18 +0530208{
Viresh Kumaraeec6cd2015-07-16 16:56:14 +0530209 /*
210 * At X Hz, 1 sec = 1000ms -> X cycles;
211 * 10ms -> X / 100 cycles
212 */
213 arc_timer_event_setup(arc_get_core_freq() / HZ);
214 return 0;
Vineet Guptad8005e62013-01-18 15:12:18 +0530215}
216
217static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
Viresh Kumaraeec6cd2015-07-16 16:56:14 +0530218 .name = "ARC Timer0",
219 .features = CLOCK_EVT_FEAT_ONESHOT |
220 CLOCK_EVT_FEAT_PERIODIC,
221 .rating = 300,
Viresh Kumaraeec6cd2015-07-16 16:56:14 +0530222 .set_next_event = arc_clkevent_set_next_event,
223 .set_state_periodic = arc_clkevent_set_periodic,
Vineet Guptad8005e62013-01-18 15:12:18 +0530224};
225
226static irqreturn_t timer_irq_handler(int irq, void *dev_id)
227{
Vineet Guptaf8b34c32014-01-25 00:42:37 +0530228 /*
229 * Note that generic IRQ core could have passed @evt for @dev_id if
230 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
231 */
232 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
Viresh Kumaraeec6cd2015-07-16 16:56:14 +0530233 int irq_reenable = clockevent_state_periodic(evt);
Vineet Guptad8005e62013-01-18 15:12:18 +0530234
Vineet Guptaf8b34c32014-01-25 00:42:37 +0530235 /*
236 * Any write to CTRL reg ACks the interrupt, we rewrite the
237 * Count when [N]ot [H]alted bit.
238 * And re-arm it if perioid by [I]nterrupt [E]nable bit
239 */
240 write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
241
242 evt->event_handler(evt);
243
Vineet Guptad8005e62013-01-18 15:12:18 +0530244 return IRQ_HANDLED;
245}
246
Noam Camuseec3c582016-01-01 15:48:49 +0530247static int arc_timer_cpu_notify(struct notifier_block *self,
248 unsigned long action, void *hcpu)
Vineet Guptad8005e62013-01-18 15:12:18 +0530249{
Vineet Gupta2d4899f2014-05-08 14:06:38 +0530250 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
Vineet Guptad8005e62013-01-18 15:12:18 +0530251
Noam Camuseec3c582016-01-01 15:48:49 +0530252 evt->cpumask = cpumask_of(smp_processor_id());
253
254 switch (action & ~CPU_TASKS_FROZEN) {
255 case CPU_STARTING:
256 clockevents_config_and_register(evt, arc_get_core_freq(),
257 0, ULONG_MAX);
258 enable_percpu_irq(arc_timer_irq, 0);
259 break;
260 case CPU_DYING:
261 disable_percpu_irq(arc_timer_irq);
262 break;
263 }
264
265 return NOTIFY_OK;
266}
267
268static struct notifier_block arc_timer_cpu_nb = {
269 .notifier_call = arc_timer_cpu_notify,
270};
271
272/*
273 * clockevent setup for boot CPU
274 */
275static void __init arc_clockevent_setup(void)
276{
277 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
278 int ret;
279
280 register_cpu_notifier(&arc_timer_cpu_nb);
281
282 evt->cpumask = cpumask_of(smp_processor_id());
Vineet Gupta2d4899f2014-05-08 14:06:38 +0530283 clockevents_config_and_register(evt, arc_get_core_freq(),
Uwe Kleine-König55c2e262013-09-24 23:05:37 +0200284 0, ARC_TIMER_MAX);
Vineet Guptad8005e62013-01-18 15:12:18 +0530285
Noam Camuseec3c582016-01-01 15:48:49 +0530286 /* Needs apriori irq_set_percpu_devid() done in intc map function */
287 ret = request_percpu_irq(arc_timer_irq, timer_irq_handler,
288 "Timer0 (per-cpu-tick)", evt);
289 if (ret)
290 pr_err("Unable to register interrupt\n");
Vineet Gupta56957942016-01-28 12:56:03 +0530291
Noam Camuseec3c582016-01-01 15:48:49 +0530292 enable_percpu_irq(arc_timer_irq, 0);
Vineet Guptad8005e62013-01-18 15:12:18 +0530293}
294
295/*
296 * Called from start_kernel() - boot CPU only
297 *
298 * -Sets up h/w timers as applicable on boot cpu
299 * -Also sets up any global state needed for timer subsystem:
300 * - for "counting" timer, registers a clocksource, usable across CPUs
301 * (provided that underlying counter h/w is synchronized across cores)
302 * - for "event" timer, sets up TIMER0 IRQ (as that is platform agnostic)
303 */
304void __init time_init(void)
305{
Noam Camus69fbd092016-01-14 12:20:08 +0530306 of_clk_init(NULL);
307 clocksource_probe();
308
Vineet Guptad8005e62013-01-18 15:12:18 +0530309 /*
310 * sets up the timekeeping free-flowing counter which also returns
311 * whether the counter is usable as clocksource
312 */
313 if (arc_counter_setup())
314 /*
315 * CLK upto 4.29 GHz can be safely represented in 32 bits
316 * because Max 32 bit number is 4,294,967,295
317 */
318 clocksource_register_hz(&arc_counter, arc_get_core_freq());
319
Noam Camuseec3c582016-01-01 15:48:49 +0530320 arc_clockevent_setup();
Vineet Guptad8005e62013-01-18 15:12:18 +0530321}