blob: fe8d8930ccb620a649e825157872ea09675bbebb [file] [log] [blame]
Paul Mundtaa016662006-01-16 22:14:18 -08001/*
2 * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support
3 *
Paul Mundt57be2b42007-05-09 17:33:24 +09004 * Copyright (C) 2005 - 2007 Paul Mundt
Paul Mundtaa016662006-01-16 22:14:18 -08005 *
6 * TMU handling code hacked out of arch/sh/kernel/time.c
7 *
8 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
9 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
10 * Copyright (C) 2002, 2003, 2004 Paul Mundt
11 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/interrupt.h>
Paul Mundtaa016662006-01-16 22:14:18 -080020#include <linux/seqlock.h>
Paul Mundt57be2b42007-05-09 17:33:24 +090021#include <linux/clockchips.h>
Paul Mundtaa016662006-01-16 22:14:18 -080022#include <asm/timer.h>
23#include <asm/rtc.h>
24#include <asm/io.h>
25#include <asm/irq.h>
26#include <asm/clock.h>
27
28#define TMU_TOCR_INIT 0x00
Paul Mundt57be2b42007-05-09 17:33:24 +090029#define TMU_TCR_INIT 0x0020
Paul Mundtaa016662006-01-16 22:14:18 -080030
Francesco Virlinzi61c66382008-09-05 16:40:22 +090031#define TMU0 (0)
32#define TMU1 (1)
33
34static inline void _tmu_start(int tmu_num)
35{
36 ctrl_outb(ctrl_inb(TMU_012_TSTR) | (0x1<<tmu_num), TMU_012_TSTR);
37}
38
39static inline void _tmu_set_irq(int tmu_num, int enabled)
40{
41 register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num);
42 ctrl_outw( (enabled ? ctrl_inw(tmu_tcr) | (1<<5) : ctrl_inw(tmu_tcr) & ~(1<<5)), tmu_tcr);
43}
44
45static inline void _tmu_stop(int tmu_num)
46{
47 ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~(0x1<<tmu_num), TMU_012_TSTR);
48}
49
50static inline void _tmu_clear_status(int tmu_num)
51{
52 register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num);
53 /* Clear UNF bit */
54 ctrl_outw(ctrl_inw(tmu_tcr) & ~0x100, tmu_tcr);
55}
56
57static inline unsigned long _tmu_read(int tmu_num)
58{
59 return ctrl_inl(TMU0_TCNT+0xC*tmu_num);
60}
61
Paul Mundt57be2b42007-05-09 17:33:24 +090062static int tmu_timer_start(void)
Paul Mundtaa016662006-01-16 22:14:18 -080063{
Francesco Virlinzi61c66382008-09-05 16:40:22 +090064 _tmu_start(TMU0);
65 _tmu_start(TMU1);
66 _tmu_set_irq(TMU0,1);
Paul Mundt57be2b42007-05-09 17:33:24 +090067 return 0;
Paul Mundtaa016662006-01-16 22:14:18 -080068}
69
Francesco Virlinzi61c66382008-09-05 16:40:22 +090070static int tmu_timer_stop(void)
Paul Mundt57be2b42007-05-09 17:33:24 +090071{
Francesco Virlinzi61c66382008-09-05 16:40:22 +090072 _tmu_stop(TMU0);
73 _tmu_stop(TMU1);
74 _tmu_clear_status(TMU0);
75 return 0;
76}
77
78/*
79 * also when the module_clk is scaled the TMU1
80 * will show the same frequency
81 */
82static int tmus_are_scaled;
83
Magnus Damm8e196082009-04-21 12:24:00 -070084static cycle_t tmu_timer_read(struct clocksource *cs)
Francesco Virlinzi61c66382008-09-05 16:40:22 +090085{
86 return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled;
87}
88
89
90static unsigned long tmu_latest_interval[3];
91static void tmu_timer_set_interval(int tmu_num, unsigned long interval, unsigned int reload)
92{
93 unsigned long tmu_tcnt = TMU0_TCNT + tmu_num*0xC;
94 unsigned long tmu_tcor = TMU0_TCOR + tmu_num*0xC;
95
96 _tmu_stop(tmu_num);
97
98 ctrl_outl(interval, tmu_tcnt);
99 tmu_latest_interval[tmu_num] = interval;
Paul Mundt57be2b42007-05-09 17:33:24 +0900100
101 /*
102 * TCNT reloads from TCOR on underflow, clear it if we don't
103 * intend to auto-reload
104 */
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900105 ctrl_outl( reload ? interval : 0 , tmu_tcor);
Paul Mundt57be2b42007-05-09 17:33:24 +0900106
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900107 _tmu_start(tmu_num);
Paul Mundt57be2b42007-05-09 17:33:24 +0900108}
109
110static int tmu_set_next_event(unsigned long cycles,
111 struct clock_event_device *evt)
112{
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900113 tmu_timer_set_interval(TMU0,cycles, evt->mode == CLOCK_EVT_MODE_PERIODIC);
114 _tmu_set_irq(TMU0,1);
Paul Mundt57be2b42007-05-09 17:33:24 +0900115 return 0;
116}
117
118static void tmu_set_mode(enum clock_event_mode mode,
119 struct clock_event_device *evt)
120{
121 switch (mode) {
122 case CLOCK_EVT_MODE_PERIODIC:
Francesco VIRLINZI2cd0ebc2008-10-15 11:58:24 +0200123 ctrl_outl(tmu_latest_interval[TMU0], TMU0_TCOR);
Paul Mundt57be2b42007-05-09 17:33:24 +0900124 break;
125 case CLOCK_EVT_MODE_ONESHOT:
126 ctrl_outl(0, TMU0_TCOR);
127 break;
128 case CLOCK_EVT_MODE_UNUSED:
129 case CLOCK_EVT_MODE_SHUTDOWN:
Thomas Gleixner18de5bc2007-07-21 04:37:34 -0700130 case CLOCK_EVT_MODE_RESUME:
Paul Mundt57be2b42007-05-09 17:33:24 +0900131 break;
132 }
133}
134
135static struct clock_event_device tmu0_clockevent = {
136 .name = "tmu0",
137 .shift = 32,
138 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
139 .set_mode = tmu_set_mode,
140 .set_next_event = tmu_set_next_event,
141};
142
Paul Mundt35f3c512006-10-06 15:31:16 +0900143static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
Paul Mundtaa016662006-01-16 22:14:18 -0800144{
Paul Mundt57be2b42007-05-09 17:33:24 +0900145 struct clock_event_device *evt = &tmu0_clockevent;
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900146 _tmu_clear_status(TMU0);
147 _tmu_set_irq(TMU0,tmu0_clockevent.mode != CLOCK_EVT_MODE_ONESHOT);
Paul Mundtaa016662006-01-16 22:14:18 -0800148
Magnus Damm70f08002009-01-22 09:55:40 +0000149 switch (tmu0_clockevent.mode) {
150 case CLOCK_EVT_MODE_ONESHOT:
151 case CLOCK_EVT_MODE_PERIODIC:
152 evt->event_handler(evt);
153 break;
154 default:
155 break;
156 }
Paul Mundtaa016662006-01-16 22:14:18 -0800157
158 return IRQ_HANDLED;
159}
160
Paul Mundt57be2b42007-05-09 17:33:24 +0900161static struct irqaction tmu0_irq = {
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900162 .name = "periodic/oneshot timer",
Paul Mundtaa016662006-01-16 22:14:18 -0800163 .handler = tmu_timer_interrupt,
Bernhard Wallee9485ba2007-05-08 00:35:34 -0700164 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
Paul Mundtaa016662006-01-16 22:14:18 -0800165};
166
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900167static void __init tmu_clk_init(struct clk *clk)
Paul Mundtaa016662006-01-16 22:14:18 -0800168{
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900169 u8 divisor = TMU_TCR_INIT & 0x7;
170 int tmu_num = clk->name[3]-'0';
171 ctrl_outw(TMU_TCR_INIT, TMU0_TCR+(tmu_num*0xC));
172 clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1));
Paul Mundtaa016662006-01-16 22:14:18 -0800173}
174
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900175static void tmu_clk_recalc(struct clk *clk)
Paul Mundtaa016662006-01-16 22:14:18 -0800176{
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900177 int tmu_num = clk->name[3]-'0';
178 unsigned long prev_rate = clk_get_rate(clk);
179 unsigned long flags;
180 u8 divisor = ctrl_inw(TMU0_TCR+tmu_num*0xC) & 0x7;
181 clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1));
182
183 if(prev_rate==clk_get_rate(clk))
184 return;
185
186 if(tmu_num)
187 return; /* No more work on TMU1 */
188
189 local_irq_save(flags);
190 tmus_are_scaled = (prev_rate > clk->rate);
191
192 _tmu_stop(TMU0);
193
194 tmu0_clockevent.mult = div_sc(clk->rate, NSEC_PER_SEC,
195 tmu0_clockevent.shift);
196 tmu0_clockevent.max_delta_ns =
197 clockevent_delta2ns(-1, &tmu0_clockevent);
198 tmu0_clockevent.min_delta_ns =
199 clockevent_delta2ns(1, &tmu0_clockevent);
200
201 if (tmus_are_scaled)
202 tmu_latest_interval[TMU0] >>= 1;
203 else
204 tmu_latest_interval[TMU0] <<= 1;
205
206 tmu_timer_set_interval(TMU0,
207 tmu_latest_interval[TMU0],
208 tmu0_clockevent.mode == CLOCK_EVT_MODE_PERIODIC);
209
210 _tmu_start(TMU0);
211
212 local_irq_restore(flags);
Paul Mundtaa016662006-01-16 22:14:18 -0800213}
214
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900215static struct clk_ops tmu_clk_ops = {
216 .init = tmu_clk_init,
217 .recalc = tmu_clk_recalc,
Paul Mundtaa016662006-01-16 22:14:18 -0800218};
219
220static struct clk tmu0_clk = {
221 .name = "tmu0_clk",
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900222 .ops = &tmu_clk_ops,
Paul Mundt57be2b42007-05-09 17:33:24 +0900223};
224
225static struct clk tmu1_clk = {
226 .name = "tmu1_clk",
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900227 .ops = &tmu_clk_ops,
Paul Mundt57be2b42007-05-09 17:33:24 +0900228};
229
Paul Mundtaa016662006-01-16 22:14:18 -0800230static int tmu_timer_init(void)
231{
232 unsigned long interval;
Paul Mundt57be2b42007-05-09 17:33:24 +0900233 unsigned long frequency;
Paul Mundtaa016662006-01-16 22:14:18 -0800234
Paul Mundt57be2b42007-05-09 17:33:24 +0900235 setup_irq(CONFIG_SH_TIMER_IRQ, &tmu0_irq);
Paul Mundtaa016662006-01-16 22:14:18 -0800236
Paul Mundt1d118562006-12-01 13:15:14 +0900237 tmu0_clk.parent = clk_get(NULL, "module_clk");
Paul Mundt57be2b42007-05-09 17:33:24 +0900238 tmu1_clk.parent = clk_get(NULL, "module_clk");
Paul Mundtaa016662006-01-16 22:14:18 -0800239
Andriy Skulysh3aa770e2006-09-27 16:20:22 +0900240 tmu_timer_stop();
Paul Mundt57be2b42007-05-09 17:33:24 +0900241
Markus Brunner3ea6bc32007-08-20 08:59:33 +0900242#if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \
Yoshihiro Shimoda31a49c42007-12-26 11:45:06 +0900243 !defined(CONFIG_CPU_SUBTYPE_SH7721) && \
Markus Brunner3ea6bc32007-08-20 08:59:33 +0900244 !defined(CONFIG_CPU_SUBTYPE_SH7760) && \
Paul Mundt2b1bd1a2007-06-20 18:27:10 +0900245 !defined(CONFIG_CPU_SUBTYPE_SH7785) && \
Kuninori Morimoto55ba99e2009-03-03 15:40:25 +0900246 !defined(CONFIG_CPU_SUBTYPE_SH7786) && \
Paul Mundt2b1bd1a2007-06-20 18:27:10 +0900247 !defined(CONFIG_CPU_SUBTYPE_SHX3)
Paul Mundtaa016662006-01-16 22:14:18 -0800248 ctrl_outb(TMU_TOCR_INIT, TMU_TOCR);
249#endif
250
251 clk_register(&tmu0_clk);
Paul Mundt57be2b42007-05-09 17:33:24 +0900252 clk_register(&tmu1_clk);
Paul Mundtaa016662006-01-16 22:14:18 -0800253 clk_enable(&tmu0_clk);
Paul Mundt57be2b42007-05-09 17:33:24 +0900254 clk_enable(&tmu1_clk);
Paul Mundtaa016662006-01-16 22:14:18 -0800255
Paul Mundt57be2b42007-05-09 17:33:24 +0900256 frequency = clk_get_rate(&tmu0_clk);
257 interval = (frequency + HZ / 2) / HZ;
Paul Mundtaa016662006-01-16 22:14:18 -0800258
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900259 tmu_timer_set_interval(TMU0,interval, 1);
260 tmu_timer_set_interval(TMU1,~0,1);
Paul Mundtaa016662006-01-16 22:14:18 -0800261
Francesco Virlinzi61c66382008-09-05 16:40:22 +0900262 _tmu_start(TMU1);
263
Magnus Damm955c0772009-01-22 09:55:31 +0000264 clocksource_sh.rating = 200;
265 clocksource_sh.mask = CLOCKSOURCE_MASK(32);
266 clocksource_sh.read = tmu_timer_read;
267 clocksource_sh.shift = 10;
268 clocksource_sh.mult = clocksource_hz2mult(clk_get_rate(&tmu1_clk),
269 clocksource_sh.shift);
270 clocksource_sh.flags = CLOCK_SOURCE_IS_CONTINUOUS;
271 clocksource_register(&clocksource_sh);
Paul Mundt57be2b42007-05-09 17:33:24 +0900272
273 tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC,
274 tmu0_clockevent.shift);
275 tmu0_clockevent.max_delta_ns =
276 clockevent_delta2ns(-1, &tmu0_clockevent);
277 tmu0_clockevent.min_delta_ns =
278 clockevent_delta2ns(1, &tmu0_clockevent);
279
Rusty Russell320ab2b2008-12-13 21:20:26 +1030280 tmu0_clockevent.cpumask = cpumask_of(0);
Magnus Damm70f08002009-01-22 09:55:40 +0000281 tmu0_clockevent.rating = 100;
Paul Mundt57be2b42007-05-09 17:33:24 +0900282
283 clockevents_register_device(&tmu0_clockevent);
Paul Mundtaa016662006-01-16 22:14:18 -0800284
285 return 0;
286}
287
Adrian Bunk4c1cfab2008-06-18 03:36:50 +0300288static struct sys_timer_ops tmu_timer_ops = {
Paul Mundtaa016662006-01-16 22:14:18 -0800289 .init = tmu_timer_init,
Andriy Skulysh3aa770e2006-09-27 16:20:22 +0900290 .start = tmu_timer_start,
291 .stop = tmu_timer_stop,
Paul Mundtaa016662006-01-16 22:14:18 -0800292};
293
294struct sys_timer tmu_timer = {
295 .name = "tmu",
296 .ops = &tmu_timer_ops,
297};