a37a4b633ba42b511989ebd900e07d59ec37e353
[linux-3.10.git] / drivers / video / tegra / host / t124 / intr_t124.c
1 /*
2  * drivers/video/tegra/host/t124/intr_t124.c
3  *
4  * Tegra Graphics Host Interrupt Management
5  *
6  * Copyright (C) 2010 Google, Inc.
7  * Copyright (c) 2011-2012, NVIDIA CORPORATION.  All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms and conditions of the GNU General Public License,
11  * version 2, as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  */
22
23 #include <linux/interrupt.h>
24 #include <linux/irq.h>
25 #include <linux/io.h>
26 #include <asm/mach/irq.h>
27
28 #include "../nvhost_intr.h"
29 #include "../dev.h"
30
31 #include "t124.h"
32 #include "hardware_t124.h"
33
34 #include "chip_support.h"
35
36 static void syncpt_thresh_mask(struct irq_data *data)
37 {
38         (void)data;
39 }
40
41 static void syncpt_thresh_unmask(struct irq_data *data)
42 {
43         (void)data;
44 }
45
46 static void syncpt_thresh_cascade(unsigned int irq, struct irq_desc *desc)
47 {
48         void __iomem *sync_regs = irq_desc_get_handler_data(desc);
49         unsigned long reg;
50         int i, id;
51         struct irq_chip *chip = irq_desc_get_chip(desc);
52
53         chained_irq_enter(chip, desc);
54
55         for (i = 0; i < INT_SYNCPT_THRESH_NR / 32; i++) {
56                 reg = readl(sync_regs +
57                         host1x_sync_syncpt_thresh_cpu0_int_status_0_r() + i * 4);
58
59                 for_each_set_bit(id, &reg, 32)
60                         generic_handle_irq(id + INT_SYNCPT_THRESH_BASE + i *32);
61         }
62
63         chained_irq_exit(chip, desc);
64 }
65
66 static struct irq_chip syncpt_thresh_irq = {
67         .name           = "syncpt",
68         .irq_mask       = syncpt_thresh_mask,
69         .irq_unmask     = syncpt_thresh_unmask
70 };
71
72 static void t124_intr_init_host_sync(struct nvhost_intr *intr)
73 {
74         struct nvhost_master *dev = intr_to_dev(intr);
75         void __iomem *sync_regs = dev->sync_aperture;
76         int i, irq;
77
78         writel(0xffffffffUL,
79                 sync_regs + host1x_sync_syncpt_thresh_int_disable_0_r());
80         writel(0xffffffffUL,
81                 sync_regs + host1x_sync_syncpt_thresh_cpu0_int_status_0_r());
82
83         for (i = 0; i < INT_SYNCPT_THRESH_NR; i++) {
84                 irq = INT_SYNCPT_THRESH_BASE + i;
85                 irq_set_chip_and_handler(irq, &syncpt_thresh_irq,
86                         handle_simple_irq);
87                 irq_set_chip_data(irq, sync_regs);
88                 set_irq_flags(irq, IRQF_VALID);
89         }
90         irq_set_chained_handler(INT_HOST1X_MPCORE_SYNCPT,
91                 syncpt_thresh_cascade);
92         irq_set_handler_data(INT_HOST1X_MPCORE_SYNCPT, sync_regs);
93
94         nvhost_dbg_fn("");
95         /* disable the ip_busy_timeout. this prevents write drops, etc.
96          * there's no real way to recover from a hung client anyway.
97          */
98         writel(0, sync_regs + host1x_sync_ip_busy_timeout_0_r());
99
100         /* increase the auto-ack timout to the maximum value.
101          *  T124?
102          */
103         writel(0xff, sync_regs + host1x_sync_ctxsw_timeout_cfg_0_r());
104 }
105
106 static void t124_intr_set_host_clocks_per_usec(struct nvhost_intr *intr, u32 cpm)
107 {
108         struct nvhost_master *dev = intr_to_dev(intr);
109         void __iomem *sync_regs = dev->sync_aperture;
110
111         nvhost_dbg_fn("");
112         /* write microsecond clock register */
113         writel(cpm, sync_regs + host1x_sync_usec_clk_0_r());
114 }
115
116 static void t124_intr_set_syncpt_threshold(struct nvhost_intr *intr,
117                                           u32 id, u32 thresh)
118 {
119         struct nvhost_master *dev = intr_to_dev(intr);
120         void __iomem *sync_regs = dev->sync_aperture;
121
122         nvhost_dbg_fn("");
123         thresh &= 0xffff;
124         writel(thresh, sync_regs + (host1x_sync_syncpt_int_thresh_0_0_r() +
125                                     id * 4));
126 }
127
128 static void t124_intr_enable_syncpt_intr(struct nvhost_intr *intr, u32 id)
129 {
130         struct nvhost_master *dev = intr_to_dev(intr);
131         void __iomem *sync_regs = dev->sync_aperture;
132         u32 reg_offset = (id / 32) * 4;
133
134         nvhost_dbg_fn("");
135         BUG_ON(reg_offset > (nvhost_syncpt_nb_pts(&dev->syncpt) / 32) * 4);
136         writel(BIT(id & (32 - 1)), sync_regs +
137                 host1x_sync_syncpt_thresh_int_enable_cpu0_0_r() + reg_offset);
138 }
139
140 static void t124_intr_disable_all_syncpt_intrs(struct nvhost_intr *intr)
141 {
142         struct nvhost_master *dev = intr_to_dev(intr);
143         void __iomem *sync_regs = dev->sync_aperture;
144         u32 reg_offset;
145
146         nvhost_dbg_fn("");
147
148         for (reg_offset = 0;
149              reg_offset <= (nvhost_syncpt_nb_pts(&dev->syncpt) / 32) * 4;
150              reg_offset += 4) {
151
152                 /* disable interrupts for both cpu's */
153                 writel(0, sync_regs +
154                        host1x_sync_syncpt_thresh_int_disable_0_r() +
155                        reg_offset);
156
157                 /* clear status for both cpu's */
158                 writel(0xfffffffful, sync_regs +
159                         host1x_sync_syncpt_thresh_cpu0_int_status_0_r() +
160                        reg_offset);
161
162                 writel(0xfffffffful, sync_regs +
163                         host1x_sync_syncpt_thresh_cpu1_int_status_0_r() +
164                        reg_offset);
165         }
166 }
167
168 /**
169  * Sync point threshold interrupt service function
170  * Handles sync point threshold triggers, in interrupt context
171  */
172 irqreturn_t t124_intr_syncpt_thresh_isr(int irq, void *dev_id)
173 {
174         struct nvhost_intr_syncpt *syncpt = dev_id;
175         unsigned int id = syncpt->id;
176         struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
177         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
178
179         u32 reg_offset = (id / 32) * 4;
180         id &= 32 - 1;
181
182         nvhost_dbg_fn("");
183         writel(BIT(id),
184                sync_regs +
185                 host1x_sync_syncpt_thresh_int_disable_0_r() + reg_offset);
186         writel(BIT(id),
187                sync_regs +
188                 host1x_sync_syncpt_thresh_cpu0_int_status_0_r() + reg_offset);
189
190         return IRQ_WAKE_THREAD;
191 }
192
193 /**
194  * Host general interrupt service function
195  * Handles read / write failures
196  */
197 static irqreturn_t t124_intr_host1x_isr(int irq, void *dev_id)
198 {
199         struct nvhost_intr *intr = dev_id;
200         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
201         u32 stat;
202         u32 ext_stat;
203         u32 addr;
204         nvhost_dbg_fn("");
205
206         stat = readl(sync_regs + host1x_sync_hintstatus_0_r());
207         ext_stat = readl(sync_regs + host1x_sync_hintstatus_ext_0_r());
208
209         if (nvhost_sync_hintstatus_ext_ip_read_int(ext_stat)) {
210                 addr = readl(sync_regs +
211                              host1x_sync_ip_read_timeout_addr_0_r());
212                 pr_err("Host read timeout at address %x\n", addr);
213         }
214
215         if (nvhost_sync_hintstatus_ext_ip_write_int(ext_stat)) {
216                 addr = readl(sync_regs +
217                              host1x_sync_ip_write_timeout_addr_0_r());
218                 pr_err("Host write timeout at address %x\n", addr);
219         }
220
221         writel(ext_stat, sync_regs + host1x_sync_hintstatus_ext_0_r());
222         writel(stat, sync_regs + host1x_sync_hintstatus_0_r());
223
224         return IRQ_HANDLED;
225 }
226
227 static int t124_intr_request_host_general_irq(struct nvhost_intr *intr)
228 {
229         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
230         int err;
231         nvhost_dbg_fn("");
232
233         if (intr->host_general_irq_requested)
234                 return 0;
235
236         /* master disable for general (not syncpt) host interrupts */
237         writel(0, sync_regs + host1x_sync_intmask_0_r());
238
239         /* clear status & extstatus */
240         writel(0xfffffffful, sync_regs + host1x_sync_hintstatus_ext_0_r());
241         writel(0xfffffffful, sync_regs + host1x_sync_hintstatus_ext_0_r());
242
243         err = request_irq(intr->host_general_irq, t124_intr_host1x_isr, 0,
244                           "host_status", intr);
245         if (err)
246                 return err;
247         /* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */
248         writel(BIT(30) | BIT(31), sync_regs + host1x_sync_hintmask_ext_0_r());
249
250         /* enable extra interrupt sources */
251         writel(BIT(31), sync_regs + host1x_sync_hintmask_0_r());
252
253         /* enable host module interrupt to CPU0 */
254         writel(BIT(0), sync_regs + host1x_sync_intc0mask_0_r());
255
256         /* master enable for general (not syncpt) host interrupts */
257         writel(BIT(0), sync_regs + host1x_sync_intmask_0_r());
258
259         intr->host_general_irq_requested = true;
260
261         return err;
262 }
263
264 static void t124_intr_free_host_general_irq(struct nvhost_intr *intr)
265 {
266         nvhost_dbg_fn("");
267         if (intr->host_general_irq_requested) {
268                 void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
269
270                 /* master disable for general (not syncpt) host interrupts */
271                 writel(0, sync_regs + host1x_sync_intmask_0_r());
272
273                 free_irq(intr->host_general_irq, intr);
274                 intr->host_general_irq_requested = false;
275         }
276 }
277
278 static int t124_request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
279 {
280         int err;
281         if (syncpt->irq_requested)
282                 return 0;
283
284         err = request_threaded_irq(syncpt->irq,
285                                    t124_intr_syncpt_thresh_isr, nvhost_syncpt_thresh_fn,
286                                    0, syncpt->thresh_irq_name, syncpt);
287         if (err)
288                 return err;
289
290         syncpt->irq_requested = 1;
291         return 0;
292 }
293
294 int nvhost_init_t124_intr_support(struct nvhost_chip_support *op)
295 {
296         op->intr.init_host_sync = t124_intr_init_host_sync;
297         op->intr.set_host_clocks_per_usec =
298           t124_intr_set_host_clocks_per_usec;
299         op->intr.set_syncpt_threshold = t124_intr_set_syncpt_threshold;
300         op->intr.enable_syncpt_intr = t124_intr_enable_syncpt_intr;
301         op->intr.disable_all_syncpt_intrs =
302           t124_intr_disable_all_syncpt_intrs;
303         op->intr.request_host_general_irq =
304           t124_intr_request_host_general_irq;
305         op->intr.free_host_general_irq =
306           t124_intr_free_host_general_irq;
307         op->intr.request_syncpt_irq =
308                 t124_request_syncpt_irq;
309
310         return 0;
311 }