blob: 776b70a14e036e45808a86442577adb2d00645a7 [file] [log] [blame]
Paul Mundt317a6102006-09-27 17:13:19 +09001/*
2 * SuperH On-Chip RTC Support
3 *
Paul Mundt063adc72009-04-16 14:12:22 +09004 * Copyright (C) 2006 - 2009 Paul Mundt
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +09005 * Copyright (C) 2006 Jamie Lenehan
Angelo Castellob420b1a2008-03-06 12:50:53 +09006 * Copyright (C) 2008 Angelo Castello
Paul Mundt317a6102006-09-27 17:13:19 +09007 *
8 * Based on the old arch/sh/kernel/cpu/rtc.c by:
9 *
10 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
11 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#include <linux/module.h>
Randy Dunlapac316722018-06-19 22:47:28 -070018#include <linux/mod_devicetable.h>
Paul Mundt317a6102006-09-27 17:13:19 +090019#include <linux/kernel.h>
20#include <linux/bcd.h>
21#include <linux/rtc.h>
22#include <linux/init.h>
23#include <linux/platform_device.h>
24#include <linux/seq_file.h>
25#include <linux/interrupt.h>
26#include <linux/spinlock.h>
Jamie Lenehan31ccb082006-12-07 17:23:50 +090027#include <linux/io.h>
Jonathan Cameron5d2a5032009-01-06 14:42:12 -080028#include <linux/log2.h>
Paul Mundt063adc72009-04-16 14:12:22 +090029#include <linux/clk.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090030#include <linux/slab.h>
Chris Brandtdab5aec2017-03-29 10:30:29 -070031#ifdef CONFIG_SUPERH
Paul Mundtad89f872007-08-03 14:19:58 +090032#include <asm/rtc.h>
Chris Brandtdab5aec2017-03-29 10:30:29 -070033#else
34/* Default values for RZ/A RTC */
35#define rtc_reg_size sizeof(u16)
36#define RTC_BIT_INVERTED 0 /* no chip bugs */
37#define RTC_CAP_4_DIGIT_YEAR (1 << 0)
38#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
39#endif
Paul Mundt317a6102006-09-27 17:13:19 +090040
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +090041#define DRV_NAME "sh-rtc"
Paul Mundt317a6102006-09-27 17:13:19 +090042
43#define RTC_REG(r) ((r) * rtc_reg_size)
44
Jamie Lenehan31ccb082006-12-07 17:23:50 +090045#define R64CNT RTC_REG(0)
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +090046
47#define RSECCNT RTC_REG(1) /* RTC sec */
48#define RMINCNT RTC_REG(2) /* RTC min */
49#define RHRCNT RTC_REG(3) /* RTC hour */
50#define RWKCNT RTC_REG(4) /* RTC week */
51#define RDAYCNT RTC_REG(5) /* RTC day */
52#define RMONCNT RTC_REG(6) /* RTC month */
53#define RYRCNT RTC_REG(7) /* RTC year */
54#define RSECAR RTC_REG(8) /* ALARM sec */
55#define RMINAR RTC_REG(9) /* ALARM min */
56#define RHRAR RTC_REG(10) /* ALARM hour */
57#define RWKAR RTC_REG(11) /* ALARM week */
58#define RDAYAR RTC_REG(12) /* ALARM day */
59#define RMONAR RTC_REG(13) /* ALARM month */
60#define RCR1 RTC_REG(14) /* Control */
61#define RCR2 RTC_REG(15) /* Control */
62
Paul Mundtff1b7502007-11-26 17:56:31 +090063/*
64 * Note on RYRAR and RCR3: Up until this point most of the register
65 * definitions are consistent across all of the available parts. However,
66 * the placement of the optional RYRAR and RCR3 (the RYRAR control
67 * register used to control RYRCNT/RYRAR compare) varies considerably
68 * across various parts, occasionally being mapped in to a completely
69 * unrelated address space. For proper RYRAR support a separate resource
70 * would have to be handed off, but as this is purely optional in
71 * practice, we simply opt not to support it, thereby keeping the code
72 * quite a bit more simplified.
73 */
74
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +090075/* ALARM Bits - or with BCD encoded value */
76#define AR_ENB 0x80 /* Enable for alarm cmp */
Paul Mundt317a6102006-09-27 17:13:19 +090077
Angelo Castellob420b1a2008-03-06 12:50:53 +090078/* Period Bits */
79#define PF_HP 0x100 /* Enable Half Period to support 8,32,128Hz */
80#define PF_COUNT 0x200 /* Half periodic counter */
81#define PF_OXS 0x400 /* Periodic One x Second */
82#define PF_KOU 0x800 /* Kernel or User periodic request 1=kernel */
83#define PF_MASK 0xf00
84
Paul Mundt317a6102006-09-27 17:13:19 +090085/* RCR1 Bits */
86#define RCR1_CF 0x80 /* Carry Flag */
87#define RCR1_CIE 0x10 /* Carry Interrupt Enable */
88#define RCR1_AIE 0x08 /* Alarm Interrupt Enable */
89#define RCR1_AF 0x01 /* Alarm Flag */
90
91/* RCR2 Bits */
92#define RCR2_PEF 0x80 /* PEriodic interrupt Flag */
93#define RCR2_PESMASK 0x70 /* Periodic interrupt Set */
94#define RCR2_RTCEN 0x08 /* ENable RTC */
95#define RCR2_ADJ 0x04 /* ADJustment (30-second) */
96#define RCR2_RESET 0x02 /* Reset bit */
97#define RCR2_START 0x01 /* Start bit */
98
99struct sh_rtc {
Paul Mundt063adc72009-04-16 14:12:22 +0900100 void __iomem *regbase;
101 unsigned long regsize;
102 struct resource *res;
103 int alarm_irq;
104 int periodic_irq;
105 int carry_irq;
106 struct clk *clk;
107 struct rtc_device *rtc_dev;
108 spinlock_t lock;
109 unsigned long capabilities; /* See asm/rtc.h for cap bits */
110 unsigned short periodic_freq;
Paul Mundt317a6102006-09-27 17:13:19 +0900111};
112
Magnus Damm5e084a12009-02-24 22:11:03 +0900113static int __sh_rtc_interrupt(struct sh_rtc *rtc)
Paul Mundt317a6102006-09-27 17:13:19 +0900114{
Magnus Damm5e084a12009-02-24 22:11:03 +0900115 unsigned int tmp, pending;
Paul Mundt317a6102006-09-27 17:13:19 +0900116
117 tmp = readb(rtc->regbase + RCR1);
Magnus Damm5e084a12009-02-24 22:11:03 +0900118 pending = tmp & RCR1_CF;
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900119 tmp &= ~RCR1_CF;
Paul Mundt317a6102006-09-27 17:13:19 +0900120 writeb(tmp, rtc->regbase + RCR1);
121
Angelo Castellob420b1a2008-03-06 12:50:53 +0900122 /* Users have requested One x Second IRQ */
Magnus Damm5e084a12009-02-24 22:11:03 +0900123 if (pending && rtc->periodic_freq & PF_OXS)
Angelo Castellob420b1a2008-03-06 12:50:53 +0900124 rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF);
Paul Mundt317a6102006-09-27 17:13:19 +0900125
Magnus Damm5e084a12009-02-24 22:11:03 +0900126 return pending;
Paul Mundt317a6102006-09-27 17:13:19 +0900127}
128
Magnus Damm5e084a12009-02-24 22:11:03 +0900129static int __sh_rtc_alarm(struct sh_rtc *rtc)
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900130{
Magnus Damm5e084a12009-02-24 22:11:03 +0900131 unsigned int tmp, pending;
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900132
133 tmp = readb(rtc->regbase + RCR1);
Magnus Damm5e084a12009-02-24 22:11:03 +0900134 pending = tmp & RCR1_AF;
Angelo Castellob420b1a2008-03-06 12:50:53 +0900135 tmp &= ~(RCR1_AF | RCR1_AIE);
Magnus Damm5e084a12009-02-24 22:11:03 +0900136 writeb(tmp, rtc->regbase + RCR1);
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900137
Magnus Damm5e084a12009-02-24 22:11:03 +0900138 if (pending)
139 rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF);
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900140
Magnus Damm5e084a12009-02-24 22:11:03 +0900141 return pending;
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900142}
143
Magnus Damm5e084a12009-02-24 22:11:03 +0900144static int __sh_rtc_periodic(struct sh_rtc *rtc)
Paul Mundt317a6102006-09-27 17:13:19 +0900145{
Angelo Castellob420b1a2008-03-06 12:50:53 +0900146 struct rtc_device *rtc_dev = rtc->rtc_dev;
Magnus Damm5e084a12009-02-24 22:11:03 +0900147 struct rtc_task *irq_task;
148 unsigned int tmp, pending;
Paul Mundt317a6102006-09-27 17:13:19 +0900149
Angelo Castellob420b1a2008-03-06 12:50:53 +0900150 tmp = readb(rtc->regbase + RCR2);
Magnus Damm5e084a12009-02-24 22:11:03 +0900151 pending = tmp & RCR2_PEF;
Angelo Castellob420b1a2008-03-06 12:50:53 +0900152 tmp &= ~RCR2_PEF;
153 writeb(tmp, rtc->regbase + RCR2);
154
Magnus Damm5e084a12009-02-24 22:11:03 +0900155 if (!pending)
156 return 0;
157
Angelo Castellob420b1a2008-03-06 12:50:53 +0900158 /* Half period enabled than one skipped and the next notified */
159 if ((rtc->periodic_freq & PF_HP) && (rtc->periodic_freq & PF_COUNT))
160 rtc->periodic_freq &= ~PF_COUNT;
161 else {
162 if (rtc->periodic_freq & PF_HP)
163 rtc->periodic_freq |= PF_COUNT;
164 if (rtc->periodic_freq & PF_KOU) {
165 spin_lock(&rtc_dev->irq_task_lock);
Magnus Damm5e084a12009-02-24 22:11:03 +0900166 irq_task = rtc_dev->irq_task;
167 if (irq_task)
168 irq_task->func(irq_task->private_data);
Angelo Castellob420b1a2008-03-06 12:50:53 +0900169 spin_unlock(&rtc_dev->irq_task_lock);
170 } else
171 rtc_update_irq(rtc->rtc_dev, 1, RTC_PF | RTC_IRQF);
172 }
Paul Mundt317a6102006-09-27 17:13:19 +0900173
Magnus Damm5e084a12009-02-24 22:11:03 +0900174 return pending;
175}
176
177static irqreturn_t sh_rtc_interrupt(int irq, void *dev_id)
178{
179 struct sh_rtc *rtc = dev_id;
180 int ret;
181
182 spin_lock(&rtc->lock);
183 ret = __sh_rtc_interrupt(rtc);
Paul Mundt317a6102006-09-27 17:13:19 +0900184 spin_unlock(&rtc->lock);
185
Magnus Damm5e084a12009-02-24 22:11:03 +0900186 return IRQ_RETVAL(ret);
187}
188
189static irqreturn_t sh_rtc_alarm(int irq, void *dev_id)
190{
191 struct sh_rtc *rtc = dev_id;
192 int ret;
193
194 spin_lock(&rtc->lock);
195 ret = __sh_rtc_alarm(rtc);
196 spin_unlock(&rtc->lock);
197
198 return IRQ_RETVAL(ret);
199}
200
201static irqreturn_t sh_rtc_periodic(int irq, void *dev_id)
202{
203 struct sh_rtc *rtc = dev_id;
204 int ret;
205
206 spin_lock(&rtc->lock);
207 ret = __sh_rtc_periodic(rtc);
208 spin_unlock(&rtc->lock);
209
210 return IRQ_RETVAL(ret);
211}
212
213static irqreturn_t sh_rtc_shared(int irq, void *dev_id)
214{
215 struct sh_rtc *rtc = dev_id;
216 int ret;
217
218 spin_lock(&rtc->lock);
219 ret = __sh_rtc_interrupt(rtc);
220 ret |= __sh_rtc_alarm(rtc);
221 ret |= __sh_rtc_periodic(rtc);
222 spin_unlock(&rtc->lock);
223
224 return IRQ_RETVAL(ret);
Paul Mundt317a6102006-09-27 17:13:19 +0900225}
226
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900227static int sh_rtc_irq_set_state(struct device *dev, int enable)
Paul Mundt317a6102006-09-27 17:13:19 +0900228{
229 struct sh_rtc *rtc = dev_get_drvdata(dev);
230 unsigned int tmp;
231
232 spin_lock_irq(&rtc->lock);
233
234 tmp = readb(rtc->regbase + RCR2);
235
236 if (enable) {
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900237 rtc->periodic_freq |= PF_KOU;
Angelo Castellob420b1a2008-03-06 12:50:53 +0900238 tmp &= ~RCR2_PEF; /* Clear PES bit */
239 tmp |= (rtc->periodic_freq & ~PF_HP); /* Set PES2-0 */
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900240 } else {
241 rtc->periodic_freq &= ~PF_KOU;
Paul Mundt317a6102006-09-27 17:13:19 +0900242 tmp &= ~(RCR2_PESMASK | RCR2_PEF);
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900243 }
Paul Mundt317a6102006-09-27 17:13:19 +0900244
245 writeb(tmp, rtc->regbase + RCR2);
246
247 spin_unlock_irq(&rtc->lock);
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900248
249 return 0;
Paul Mundt317a6102006-09-27 17:13:19 +0900250}
251
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900252static int sh_rtc_irq_set_freq(struct device *dev, int freq)
Angelo Castellob420b1a2008-03-06 12:50:53 +0900253{
254 struct sh_rtc *rtc = dev_get_drvdata(dev);
255 int tmp, ret = 0;
256
257 spin_lock_irq(&rtc->lock);
258 tmp = rtc->periodic_freq & PF_MASK;
259
260 switch (freq) {
261 case 0:
262 rtc->periodic_freq = 0x00;
263 break;
264 case 1:
265 rtc->periodic_freq = 0x60;
266 break;
267 case 2:
268 rtc->periodic_freq = 0x50;
269 break;
270 case 4:
271 rtc->periodic_freq = 0x40;
272 break;
273 case 8:
274 rtc->periodic_freq = 0x30 | PF_HP;
275 break;
276 case 16:
277 rtc->periodic_freq = 0x30;
278 break;
279 case 32:
280 rtc->periodic_freq = 0x20 | PF_HP;
281 break;
282 case 64:
283 rtc->periodic_freq = 0x20;
284 break;
285 case 128:
286 rtc->periodic_freq = 0x10 | PF_HP;
287 break;
288 case 256:
289 rtc->periodic_freq = 0x10;
290 break;
291 default:
292 ret = -ENOTSUPP;
293 }
294
Paul Mundt1043bf52009-09-09 12:13:01 +0900295 if (ret == 0)
Angelo Castellob420b1a2008-03-06 12:50:53 +0900296 rtc->periodic_freq |= tmp;
Angelo Castellob420b1a2008-03-06 12:50:53 +0900297
298 spin_unlock_irq(&rtc->lock);
299 return ret;
300}
301
Paul Mundt317a6102006-09-27 17:13:19 +0900302static inline void sh_rtc_setaie(struct device *dev, unsigned int enable)
303{
304 struct sh_rtc *rtc = dev_get_drvdata(dev);
305 unsigned int tmp;
306
307 spin_lock_irq(&rtc->lock);
308
309 tmp = readb(rtc->regbase + RCR1);
310
Paul Mundt063adc72009-04-16 14:12:22 +0900311 if (enable)
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900312 tmp |= RCR1_AIE;
Paul Mundt063adc72009-04-16 14:12:22 +0900313 else
314 tmp &= ~RCR1_AIE;
Paul Mundt317a6102006-09-27 17:13:19 +0900315
316 writeb(tmp, rtc->regbase + RCR1);
317
318 spin_unlock_irq(&rtc->lock);
319}
320
Paul Mundt317a6102006-09-27 17:13:19 +0900321static int sh_rtc_proc(struct device *dev, struct seq_file *seq)
322{
323 struct sh_rtc *rtc = dev_get_drvdata(dev);
324 unsigned int tmp;
325
326 tmp = readb(rtc->regbase + RCR1);
Angelo Castellob420b1a2008-03-06 12:50:53 +0900327 seq_printf(seq, "carry_IRQ\t: %s\n", (tmp & RCR1_CIE) ? "yes" : "no");
Paul Mundt317a6102006-09-27 17:13:19 +0900328
329 tmp = readb(rtc->regbase + RCR2);
330 seq_printf(seq, "periodic_IRQ\t: %s\n",
Angelo Castellob420b1a2008-03-06 12:50:53 +0900331 (tmp & RCR2_PESMASK) ? "yes" : "no");
Paul Mundt317a6102006-09-27 17:13:19 +0900332
333 return 0;
334}
335
Magnus Damm9cd88b92009-03-19 10:05:58 +0000336static inline void sh_rtc_setcie(struct device *dev, unsigned int enable)
337{
338 struct sh_rtc *rtc = dev_get_drvdata(dev);
339 unsigned int tmp;
340
341 spin_lock_irq(&rtc->lock);
342
343 tmp = readb(rtc->regbase + RCR1);
344
345 if (!enable)
346 tmp &= ~RCR1_CIE;
347 else
348 tmp |= RCR1_CIE;
349
350 writeb(tmp, rtc->regbase + RCR1);
351
352 spin_unlock_irq(&rtc->lock);
353}
354
John Stultz16380c12011-02-02 17:02:41 -0800355static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
356{
357 sh_rtc_setaie(dev, enabled);
358 return 0;
359}
360
Paul Mundt317a6102006-09-27 17:13:19 +0900361static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
362{
Wolfram Sang85368bb2018-04-19 16:06:14 +0200363 struct sh_rtc *rtc = dev_get_drvdata(dev);
Paul Mundt317a6102006-09-27 17:13:19 +0900364 unsigned int sec128, sec2, yr, yr100, cf_bit;
365
366 do {
367 unsigned int tmp;
368
369 spin_lock_irq(&rtc->lock);
370
371 tmp = readb(rtc->regbase + RCR1);
372 tmp &= ~RCR1_CF; /* Clear CF-bit */
373 tmp |= RCR1_CIE;
374 writeb(tmp, rtc->regbase + RCR1);
375
376 sec128 = readb(rtc->regbase + R64CNT);
377
Adrian Bunkfe20ba72008-10-18 20:28:41 -0700378 tm->tm_sec = bcd2bin(readb(rtc->regbase + RSECCNT));
379 tm->tm_min = bcd2bin(readb(rtc->regbase + RMINCNT));
380 tm->tm_hour = bcd2bin(readb(rtc->regbase + RHRCNT));
381 tm->tm_wday = bcd2bin(readb(rtc->regbase + RWKCNT));
382 tm->tm_mday = bcd2bin(readb(rtc->regbase + RDAYCNT));
383 tm->tm_mon = bcd2bin(readb(rtc->regbase + RMONCNT)) - 1;
Paul Mundt317a6102006-09-27 17:13:19 +0900384
Paul Mundtad89f872007-08-03 14:19:58 +0900385 if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) {
386 yr = readw(rtc->regbase + RYRCNT);
Adrian Bunkfe20ba72008-10-18 20:28:41 -0700387 yr100 = bcd2bin(yr >> 8);
Paul Mundtad89f872007-08-03 14:19:58 +0900388 yr &= 0xff;
389 } else {
390 yr = readb(rtc->regbase + RYRCNT);
Adrian Bunkfe20ba72008-10-18 20:28:41 -0700391 yr100 = bcd2bin((yr == 0x99) ? 0x19 : 0x20);
Paul Mundtad89f872007-08-03 14:19:58 +0900392 }
Paul Mundt317a6102006-09-27 17:13:19 +0900393
Adrian Bunkfe20ba72008-10-18 20:28:41 -0700394 tm->tm_year = (yr100 * 100 + bcd2bin(yr)) - 1900;
Paul Mundt317a6102006-09-27 17:13:19 +0900395
396 sec2 = readb(rtc->regbase + R64CNT);
397 cf_bit = readb(rtc->regbase + RCR1) & RCR1_CF;
398
399 spin_unlock_irq(&rtc->lock);
400 } while (cf_bit != 0 || ((sec128 ^ sec2) & RTC_BIT_INVERTED) != 0);
401
402#if RTC_BIT_INVERTED != 0
403 if ((sec128 & RTC_BIT_INVERTED))
404 tm->tm_sec--;
405#endif
406
Magnus Damm9cd88b92009-03-19 10:05:58 +0000407 /* only keep the carry interrupt enabled if UIE is on */
408 if (!(rtc->periodic_freq & PF_OXS))
409 sh_rtc_setcie(dev, 0);
410
Paul Mundt435c55d2007-05-08 11:56:27 +0900411 dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
Paul Mundt317a6102006-09-27 17:13:19 +0900412 "mday=%d, mon=%d, year=%d, wday=%d\n",
Harvey Harrison2a4e2b82008-04-28 02:12:00 -0700413 __func__,
Paul Mundt317a6102006-09-27 17:13:19 +0900414 tm->tm_sec, tm->tm_min, tm->tm_hour,
Jamie Lenehana1614792006-12-08 14:49:30 +0900415 tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
Paul Mundt317a6102006-09-27 17:13:19 +0900416
Alexandre Belloni22652ba2018-02-19 16:23:56 +0100417 return 0;
Paul Mundt317a6102006-09-27 17:13:19 +0900418}
419
420static int sh_rtc_set_time(struct device *dev, struct rtc_time *tm)
421{
Wolfram Sang85368bb2018-04-19 16:06:14 +0200422 struct sh_rtc *rtc = dev_get_drvdata(dev);
Paul Mundt317a6102006-09-27 17:13:19 +0900423 unsigned int tmp;
424 int year;
425
426 spin_lock_irq(&rtc->lock);
427
428 /* Reset pre-scaler & stop RTC */
429 tmp = readb(rtc->regbase + RCR2);
430 tmp |= RCR2_RESET;
Markus Brunner699bc662007-07-26 17:31:28 +0900431 tmp &= ~RCR2_START;
Paul Mundt317a6102006-09-27 17:13:19 +0900432 writeb(tmp, rtc->regbase + RCR2);
433
Adrian Bunkfe20ba72008-10-18 20:28:41 -0700434 writeb(bin2bcd(tm->tm_sec), rtc->regbase + RSECCNT);
435 writeb(bin2bcd(tm->tm_min), rtc->regbase + RMINCNT);
436 writeb(bin2bcd(tm->tm_hour), rtc->regbase + RHRCNT);
437 writeb(bin2bcd(tm->tm_wday), rtc->regbase + RWKCNT);
438 writeb(bin2bcd(tm->tm_mday), rtc->regbase + RDAYCNT);
439 writeb(bin2bcd(tm->tm_mon + 1), rtc->regbase + RMONCNT);
Paul Mundt317a6102006-09-27 17:13:19 +0900440
Paul Mundtad89f872007-08-03 14:19:58 +0900441 if (rtc->capabilities & RTC_CAP_4_DIGIT_YEAR) {
Adrian Bunkfe20ba72008-10-18 20:28:41 -0700442 year = (bin2bcd((tm->tm_year + 1900) / 100) << 8) |
443 bin2bcd(tm->tm_year % 100);
Paul Mundtad89f872007-08-03 14:19:58 +0900444 writew(year, rtc->regbase + RYRCNT);
445 } else {
446 year = tm->tm_year % 100;
Adrian Bunkfe20ba72008-10-18 20:28:41 -0700447 writeb(bin2bcd(year), rtc->regbase + RYRCNT);
Paul Mundtad89f872007-08-03 14:19:58 +0900448 }
Paul Mundt317a6102006-09-27 17:13:19 +0900449
450 /* Start RTC */
451 tmp = readb(rtc->regbase + RCR2);
452 tmp &= ~RCR2_RESET;
453 tmp |= RCR2_RTCEN | RCR2_START;
454 writeb(tmp, rtc->regbase + RCR2);
455
456 spin_unlock_irq(&rtc->lock);
457
458 return 0;
459}
460
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900461static inline int sh_rtc_read_alarm_value(struct sh_rtc *rtc, int reg_off)
462{
463 unsigned int byte;
464 int value = 0xff; /* return 0xff for ignored values */
465
466 byte = readb(rtc->regbase + reg_off);
467 if (byte & AR_ENB) {
468 byte &= ~AR_ENB; /* strip the enable bit */
Adrian Bunkfe20ba72008-10-18 20:28:41 -0700469 value = bcd2bin(byte);
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900470 }
471
472 return value;
473}
474
475static int sh_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
476{
Wolfram Sang85368bb2018-04-19 16:06:14 +0200477 struct sh_rtc *rtc = dev_get_drvdata(dev);
Angelo Castellob420b1a2008-03-06 12:50:53 +0900478 struct rtc_time *tm = &wkalrm->time;
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900479
480 spin_lock_irq(&rtc->lock);
481
482 tm->tm_sec = sh_rtc_read_alarm_value(rtc, RSECAR);
483 tm->tm_min = sh_rtc_read_alarm_value(rtc, RMINAR);
484 tm->tm_hour = sh_rtc_read_alarm_value(rtc, RHRAR);
485 tm->tm_wday = sh_rtc_read_alarm_value(rtc, RWKAR);
486 tm->tm_mday = sh_rtc_read_alarm_value(rtc, RDAYAR);
487 tm->tm_mon = sh_rtc_read_alarm_value(rtc, RMONAR);
488 if (tm->tm_mon > 0)
489 tm->tm_mon -= 1; /* RTC is 1-12, tm_mon is 0-11 */
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900490
David Brownell0d103e92007-01-10 23:15:32 -0800491 wkalrm->enabled = (readb(rtc->regbase + RCR1) & RCR1_AIE) ? 1 : 0;
492
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900493 spin_unlock_irq(&rtc->lock);
494
495 return 0;
496}
497
498static inline void sh_rtc_write_alarm_value(struct sh_rtc *rtc,
499 int value, int reg_off)
500{
501 /* < 0 for a value that is ignored */
502 if (value < 0)
503 writeb(0, rtc->regbase + reg_off);
504 else
Adrian Bunkfe20ba72008-10-18 20:28:41 -0700505 writeb(bin2bcd(value) | AR_ENB, rtc->regbase + reg_off);
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900506}
507
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900508static int sh_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm)
509{
Wolfram Sang85368bb2018-04-19 16:06:14 +0200510 struct sh_rtc *rtc = dev_get_drvdata(dev);
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900511 unsigned int rcr1;
512 struct rtc_time *tm = &wkalrm->time;
Uwe Kleine-König84411892016-06-28 10:43:48 +0200513 int mon;
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900514
515 spin_lock_irq(&rtc->lock);
516
Jamie Lenehan15c945c2007-01-22 20:40:41 -0800517 /* disable alarm interrupt and clear the alarm flag */
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900518 rcr1 = readb(rtc->regbase + RCR1);
Angelo Castellob420b1a2008-03-06 12:50:53 +0900519 rcr1 &= ~(RCR1_AF | RCR1_AIE);
Jamie Lenehan15c945c2007-01-22 20:40:41 -0800520 writeb(rcr1, rtc->regbase + RCR1);
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900521
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900522 /* set alarm time */
523 sh_rtc_write_alarm_value(rtc, tm->tm_sec, RSECAR);
524 sh_rtc_write_alarm_value(rtc, tm->tm_min, RMINAR);
525 sh_rtc_write_alarm_value(rtc, tm->tm_hour, RHRAR);
526 sh_rtc_write_alarm_value(rtc, tm->tm_wday, RWKAR);
527 sh_rtc_write_alarm_value(rtc, tm->tm_mday, RDAYAR);
528 mon = tm->tm_mon;
529 if (mon >= 0)
530 mon += 1;
531 sh_rtc_write_alarm_value(rtc, mon, RMONAR);
532
Jamie Lenehan15c945c2007-01-22 20:40:41 -0800533 if (wkalrm->enabled) {
534 rcr1 |= RCR1_AIE;
535 writeb(rcr1, rtc->regbase + RCR1);
536 }
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900537
538 spin_unlock_irq(&rtc->lock);
539
540 return 0;
541}
542
Bhumika Goyal8bc57e72017-01-05 22:25:05 +0530543static const struct rtc_class_ops sh_rtc_ops = {
Paul Mundt317a6102006-09-27 17:13:19 +0900544 .read_time = sh_rtc_read_time,
545 .set_time = sh_rtc_set_time,
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900546 .read_alarm = sh_rtc_read_alarm,
547 .set_alarm = sh_rtc_set_alarm,
Paul Mundt317a6102006-09-27 17:13:19 +0900548 .proc = sh_rtc_proc,
John Stultz16380c12011-02-02 17:02:41 -0800549 .alarm_irq_enable = sh_rtc_alarm_irq_enable,
Paul Mundt317a6102006-09-27 17:13:19 +0900550};
551
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900552static int __init sh_rtc_probe(struct platform_device *pdev)
Paul Mundt317a6102006-09-27 17:13:19 +0900553{
554 struct sh_rtc *rtc;
555 struct resource *res;
Magnus Dammedf22472009-03-19 10:10:44 +0000556 struct rtc_time r;
Paul Mundt063adc72009-04-16 14:12:22 +0900557 char clk_name[6];
558 int clk_id, ret;
Paul Mundt317a6102006-09-27 17:13:19 +0900559
Jingoo Han0209aff2013-07-03 15:07:11 -0700560 rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL);
Paul Mundt317a6102006-09-27 17:13:19 +0900561 if (unlikely(!rtc))
562 return -ENOMEM;
563
564 spin_lock_init(&rtc->lock);
565
Angelo Castellob420b1a2008-03-06 12:50:53 +0900566 /* get periodic/carry/alarm irqs */
roel kluin2641dc92008-09-10 19:34:44 +0200567 ret = platform_get_irq(pdev, 0);
Anton Vorontsov2fac6672009-01-06 14:42:11 -0800568 if (unlikely(ret <= 0)) {
Magnus Damm5e084a12009-02-24 22:11:03 +0900569 dev_err(&pdev->dev, "No IRQ resource\n");
Jingoo Han0209aff2013-07-03 15:07:11 -0700570 return -ENOENT;
Paul Mundt317a6102006-09-27 17:13:19 +0900571 }
Paul Mundt063adc72009-04-16 14:12:22 +0900572
roel kluin2641dc92008-09-10 19:34:44 +0200573 rtc->periodic_irq = ret;
Magnus Damm5e084a12009-02-24 22:11:03 +0900574 rtc->carry_irq = platform_get_irq(pdev, 1);
575 rtc->alarm_irq = platform_get_irq(pdev, 2);
Paul Mundt317a6102006-09-27 17:13:19 +0900576
577 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
Chris Brandtdab5aec2017-03-29 10:30:29 -0700578 if (!res)
579 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Paul Mundt317a6102006-09-27 17:13:19 +0900580 if (unlikely(res == NULL)) {
581 dev_err(&pdev->dev, "No IO resource\n");
Jingoo Han0209aff2013-07-03 15:07:11 -0700582 return -ENOENT;
Paul Mundt317a6102006-09-27 17:13:19 +0900583 }
584
Paul Mundt063adc72009-04-16 14:12:22 +0900585 rtc->regsize = resource_size(res);
Paul Mundt317a6102006-09-27 17:13:19 +0900586
Jingoo Han0209aff2013-07-03 15:07:11 -0700587 rtc->res = devm_request_mem_region(&pdev->dev, res->start,
588 rtc->regsize, pdev->name);
589 if (unlikely(!rtc->res))
590 return -EBUSY;
Paul Mundt317a6102006-09-27 17:13:19 +0900591
Jingoo Han0209aff2013-07-03 15:07:11 -0700592 rtc->regbase = devm_ioremap_nocache(&pdev->dev, rtc->res->start,
593 rtc->regsize);
594 if (unlikely(!rtc->regbase))
595 return -EINVAL;
Paul Mundt317a6102006-09-27 17:13:19 +0900596
Chris Brandtdab5aec2017-03-29 10:30:29 -0700597 if (!pdev->dev.of_node) {
598 clk_id = pdev->id;
599 /* With a single device, the clock id is still "rtc0" */
600 if (clk_id < 0)
601 clk_id = 0;
Paul Mundt063adc72009-04-16 14:12:22 +0900602
Chris Brandtdab5aec2017-03-29 10:30:29 -0700603 snprintf(clk_name, sizeof(clk_name), "rtc%d", clk_id);
604 } else
605 snprintf(clk_name, sizeof(clk_name), "fck");
Paul Mundt063adc72009-04-16 14:12:22 +0900606
Jingoo Han0209aff2013-07-03 15:07:11 -0700607 rtc->clk = devm_clk_get(&pdev->dev, clk_name);
Paul Mundt063adc72009-04-16 14:12:22 +0900608 if (IS_ERR(rtc->clk)) {
609 /*
610 * No error handling for rtc->clk intentionally, not all
611 * platforms will have a unique clock for the RTC, and
612 * the clk API can handle the struct clk pointer being
613 * NULL.
614 */
615 rtc->clk = NULL;
616 }
617
618 clk_enable(rtc->clk);
619
Paul Mundtad89f872007-08-03 14:19:58 +0900620 rtc->capabilities = RTC_DEF_CAPABILITIES;
Chris Brandtdab5aec2017-03-29 10:30:29 -0700621
622#ifdef CONFIG_SUPERH
Jingoo Hane58c18d2013-11-12 15:10:52 -0800623 if (dev_get_platdata(&pdev->dev)) {
624 struct sh_rtc_platform_info *pinfo =
625 dev_get_platdata(&pdev->dev);
Paul Mundtad89f872007-08-03 14:19:58 +0900626
627 /*
628 * Some CPUs have special capabilities in addition to the
629 * default set. Add those in here.
630 */
631 rtc->capabilities |= pinfo->capabilities;
632 }
Chris Brandtdab5aec2017-03-29 10:30:29 -0700633#endif
Paul Mundtad89f872007-08-03 14:19:58 +0900634
Magnus Damm5e084a12009-02-24 22:11:03 +0900635 if (rtc->carry_irq <= 0) {
636 /* register shared periodic/carry/alarm irq */
Jingoo Han0209aff2013-07-03 15:07:11 -0700637 ret = devm_request_irq(&pdev->dev, rtc->periodic_irq,
638 sh_rtc_shared, 0, "sh-rtc", rtc);
Magnus Damm5e084a12009-02-24 22:11:03 +0900639 if (unlikely(ret)) {
640 dev_err(&pdev->dev,
641 "request IRQ failed with %d, IRQ %d\n", ret,
642 rtc->periodic_irq);
643 goto err_unmap;
644 }
645 } else {
646 /* register periodic/carry/alarm irqs */
Jingoo Han0209aff2013-07-03 15:07:11 -0700647 ret = devm_request_irq(&pdev->dev, rtc->periodic_irq,
648 sh_rtc_periodic, 0, "sh-rtc period", rtc);
Magnus Damm5e084a12009-02-24 22:11:03 +0900649 if (unlikely(ret)) {
650 dev_err(&pdev->dev,
651 "request period IRQ failed with %d, IRQ %d\n",
652 ret, rtc->periodic_irq);
653 goto err_unmap;
654 }
Angelo Castellob420b1a2008-03-06 12:50:53 +0900655
Jingoo Han0209aff2013-07-03 15:07:11 -0700656 ret = devm_request_irq(&pdev->dev, rtc->carry_irq,
657 sh_rtc_interrupt, 0, "sh-rtc carry", rtc);
Magnus Damm5e084a12009-02-24 22:11:03 +0900658 if (unlikely(ret)) {
659 dev_err(&pdev->dev,
660 "request carry IRQ failed with %d, IRQ %d\n",
661 ret, rtc->carry_irq);
Magnus Damm5e084a12009-02-24 22:11:03 +0900662 goto err_unmap;
663 }
Angelo Castellob420b1a2008-03-06 12:50:53 +0900664
Jingoo Han0209aff2013-07-03 15:07:11 -0700665 ret = devm_request_irq(&pdev->dev, rtc->alarm_irq,
666 sh_rtc_alarm, 0, "sh-rtc alarm", rtc);
Magnus Damm5e084a12009-02-24 22:11:03 +0900667 if (unlikely(ret)) {
668 dev_err(&pdev->dev,
669 "request alarm IRQ failed with %d, IRQ %d\n",
670 ret, rtc->alarm_irq);
Magnus Damm5e084a12009-02-24 22:11:03 +0900671 goto err_unmap;
672 }
Angelo Castellob420b1a2008-03-06 12:50:53 +0900673 }
674
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900675 platform_set_drvdata(pdev, rtc);
676
Magnus Damm9cd88b92009-03-19 10:05:58 +0000677 /* everything disabled by default */
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900678 sh_rtc_irq_set_freq(&pdev->dev, 0);
679 sh_rtc_irq_set_state(&pdev->dev, 0);
Magnus Damm9cd88b92009-03-19 10:05:58 +0000680 sh_rtc_setaie(&pdev->dev, 0);
681 sh_rtc_setcie(&pdev->dev, 0);
Magnus Dammedf22472009-03-19 10:10:44 +0000682
Jingoo Han0209aff2013-07-03 15:07:11 -0700683 rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, "sh",
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900684 &sh_rtc_ops, THIS_MODULE);
685 if (IS_ERR(rtc->rtc_dev)) {
686 ret = PTR_ERR(rtc->rtc_dev);
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900687 goto err_unmap;
688 }
689
690 rtc->rtc_dev->max_user_freq = 256;
691
Magnus Dammedf22472009-03-19 10:10:44 +0000692 /* reset rtc to epoch 0 if time is invalid */
693 if (rtc_read_time(rtc->rtc_dev, &r) < 0) {
694 rtc_time_to_tm(0, &r);
695 rtc_set_time(rtc->rtc_dev, &r);
696 }
697
Magnus Damm7a8fe8e2009-03-19 10:14:41 +0000698 device_init_wakeup(&pdev->dev, 1);
Paul Mundt317a6102006-09-27 17:13:19 +0900699 return 0;
700
Paul Mundt03057942008-04-25 17:58:42 +0900701err_unmap:
Paul Mundt063adc72009-04-16 14:12:22 +0900702 clk_disable(rtc->clk);
Paul Mundt317a6102006-09-27 17:13:19 +0900703
704 return ret;
705}
706
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900707static int __exit sh_rtc_remove(struct platform_device *pdev)
Paul Mundt317a6102006-09-27 17:13:19 +0900708{
709 struct sh_rtc *rtc = platform_get_drvdata(pdev);
710
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900711 sh_rtc_irq_set_state(&pdev->dev, 0);
Paul Mundt317a6102006-09-27 17:13:19 +0900712
Paul Mundt317a6102006-09-27 17:13:19 +0900713 sh_rtc_setaie(&pdev->dev, 0);
Magnus Damm9cd88b92009-03-19 10:05:58 +0000714 sh_rtc_setcie(&pdev->dev, 0);
Paul Mundt317a6102006-09-27 17:13:19 +0900715
Paul Mundt063adc72009-04-16 14:12:22 +0900716 clk_disable(rtc->clk);
Paul Mundt317a6102006-09-27 17:13:19 +0900717
718 return 0;
719}
Magnus Dammfaa9fa82009-04-01 14:45:17 +0000720
721static void sh_rtc_set_irq_wake(struct device *dev, int enabled)
722{
Wolfram Sang85368bb2018-04-19 16:06:14 +0200723 struct sh_rtc *rtc = dev_get_drvdata(dev);
Magnus Dammfaa9fa82009-04-01 14:45:17 +0000724
Thomas Gleixnerdced35a2011-03-28 17:49:12 +0200725 irq_set_irq_wake(rtc->periodic_irq, enabled);
Paul Mundt063adc72009-04-16 14:12:22 +0900726
Magnus Dammfaa9fa82009-04-01 14:45:17 +0000727 if (rtc->carry_irq > 0) {
Thomas Gleixnerdced35a2011-03-28 17:49:12 +0200728 irq_set_irq_wake(rtc->carry_irq, enabled);
729 irq_set_irq_wake(rtc->alarm_irq, enabled);
Magnus Dammfaa9fa82009-04-01 14:45:17 +0000730 }
Magnus Dammfaa9fa82009-04-01 14:45:17 +0000731}
732
Arnd Bergmann5d05e812017-04-19 19:52:43 +0200733static int __maybe_unused sh_rtc_suspend(struct device *dev)
Magnus Dammfaa9fa82009-04-01 14:45:17 +0000734{
735 if (device_may_wakeup(dev))
736 sh_rtc_set_irq_wake(dev, 1);
737
738 return 0;
739}
740
Arnd Bergmann5d05e812017-04-19 19:52:43 +0200741static int __maybe_unused sh_rtc_resume(struct device *dev)
Magnus Dammfaa9fa82009-04-01 14:45:17 +0000742{
743 if (device_may_wakeup(dev))
744 sh_rtc_set_irq_wake(dev, 0);
745
746 return 0;
747}
748
Jingoo Han0ed50542013-04-29 16:20:00 -0700749static SIMPLE_DEV_PM_OPS(sh_rtc_pm_ops, sh_rtc_suspend, sh_rtc_resume);
Magnus Dammfaa9fa82009-04-01 14:45:17 +0000750
Chris Brandtdab5aec2017-03-29 10:30:29 -0700751static const struct of_device_id sh_rtc_of_match[] = {
752 { .compatible = "renesas,sh-rtc", },
753 { /* sentinel */ }
754};
755MODULE_DEVICE_TABLE(of, sh_rtc_of_match);
756
Paul Mundt317a6102006-09-27 17:13:19 +0900757static struct platform_driver sh_rtc_platform_driver = {
758 .driver = {
Jamie Lenehan1b73e6a2006-12-08 15:26:15 +0900759 .name = DRV_NAME,
Jingoo Han0ed50542013-04-29 16:20:00 -0700760 .pm = &sh_rtc_pm_ops,
Chris Brandtdab5aec2017-03-29 10:30:29 -0700761 .of_match_table = sh_rtc_of_match,
Paul Mundt317a6102006-09-27 17:13:19 +0900762 },
Alessandro Zummo5c9740a2009-08-20 13:25:11 +0900763 .remove = __exit_p(sh_rtc_remove),
Paul Mundt317a6102006-09-27 17:13:19 +0900764};
765
Jingoo Handeed5a92013-04-29 16:18:51 -0700766module_platform_driver_probe(sh_rtc_platform_driver, sh_rtc_probe);
Paul Mundt317a6102006-09-27 17:13:19 +0900767
768MODULE_DESCRIPTION("SuperH on-chip RTC driver");
Angelo Castellob420b1a2008-03-06 12:50:53 +0900769MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, "
770 "Jamie Lenehan <lenehan@twibble.org>, "
771 "Angelo Castello <angelo.castello@st.com>");
Paul Mundt317a6102006-09-27 17:13:19 +0900772MODULE_LICENSE("GPL");
Kay Sieversad28a072008-04-10 21:29:25 -0700773MODULE_ALIAS("platform:" DRV_NAME);