video: tegra: host: fix after merge
[linux-3.10.git] / drivers / video / tegra / host / t124 / intr_t124.c
1 /*
2  * drivers/video/tegra/host/t124/intr_t124.c
3  *
4  * Tegra Graphics Host Interrupt Management
5  *
6  * Copyright (C) 2010 Google, Inc.
7  * Copyright (c) 2011-2012, NVIDIA CORPORATION.  All rights reserved.
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms and conditions of the GNU General Public License,
11  * version 2, as published by the Free Software Foundation.
12  *
13  * This program is distributed in the hope it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along with
19  * this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21  */
22
23 #include <linux/interrupt.h>
24 #include <linux/irq.h>
25 #include <linux/io.h>
26 #include <asm/mach/irq.h>
27
28 #include "../nvhost_intr.h"
29 #include "../dev.h"
30
31 #include "t124.h"
32 #include "hardware_t124.h"
33
34 #include "chip_support.h"
35
36 static void syncpt_thresh_mask(struct irq_data *data)
37 {
38         (void)data;
39 }
40
41 static void syncpt_thresh_unmask(struct irq_data *data)
42 {
43         (void)data;
44 }
45
46 static void syncpt_thresh_cascade(unsigned int irq, struct irq_desc *desc)
47 {
48         void __iomem *sync_regs = irq_desc_get_handler_data(desc);
49         unsigned long reg;
50         int i, id;
51         struct irq_chip *chip = irq_desc_get_chip(desc);
52
53         chained_irq_enter(chip, desc);
54
55         for (i = 0; i < INT_SYNCPT_THRESH_NR / 32; i++) {
56                 reg = readl(sync_regs +
57                         host1x_sync_syncpt_thresh_cpu0_int_status_0_r() + i * 4);
58
59                 for_each_set_bit(id, &reg, 32)
60                         generic_handle_irq(id + INT_SYNCPT_THRESH_BASE + i *32);
61         }
62
63         chained_irq_exit(chip, desc);
64 }
65
66 static struct irq_chip syncpt_thresh_irq = {
67         .name           = "syncpt",
68         .irq_mask       = syncpt_thresh_mask,
69         .irq_unmask     = syncpt_thresh_unmask
70 };
71
72 static void t124_intr_init_host_sync(struct nvhost_intr *intr)
73 {
74         struct nvhost_master *dev = intr_to_dev(intr);
75         void __iomem *sync_regs = dev->sync_aperture;
76         int i, irq;
77
78         writel(0xffffffffUL,
79                 sync_regs + host1x_sync_syncpt_thresh_int_disable_0_r());
80         writel(0xffffffffUL,
81                 sync_regs + host1x_sync_syncpt_thresh_cpu0_int_status_0_r());
82
83         for (i = 0; i < INT_SYNCPT_THRESH_NR; i++) {
84                 irq = INT_SYNCPT_THRESH_BASE + i;
85                 irq_set_chip_and_handler(irq, &syncpt_thresh_irq,
86                         handle_simple_irq);
87                 irq_set_chip_data(irq, sync_regs);
88                 set_irq_flags(irq, IRQF_VALID);
89         }
90         irq_set_chained_handler(INT_HOST1X_MPCORE_SYNCPT,
91                 syncpt_thresh_cascade);
92         irq_set_handler_data(INT_HOST1X_MPCORE_SYNCPT, sync_regs);
93
94         nvhost_dbg_fn("");
95         /* disable the ip_busy_timeout. this prevents write drops, etc.
96          * there's no real way to recover from a hung client anyway.
97          */
98         writel(0, sync_regs + host1x_sync_ip_busy_timeout_0_r());
99
100         /* increase the auto-ack timout to the maximum value.
101          *  T124?
102          */
103         writel(0xff, sync_regs + host1x_sync_ctxsw_timeout_cfg_0_r());
104 }
105
106 static void t124_intr_set_host_clocks_per_usec(struct nvhost_intr *intr, u32 cpm)
107 {
108         struct nvhost_master *dev = intr_to_dev(intr);
109         void __iomem *sync_regs = dev->sync_aperture;
110
111         nvhost_dbg_fn("");
112         /* write microsecond clock register */
113         writel(cpm, sync_regs + host1x_sync_usec_clk_0_r());
114 }
115
116 static void t124_intr_set_syncpt_threshold(struct nvhost_intr *intr,
117                                           u32 id, u32 thresh)
118 {
119         struct nvhost_master *dev = intr_to_dev(intr);
120         void __iomem *sync_regs = dev->sync_aperture;
121
122         nvhost_dbg_fn("");
123         thresh &= 0xffff;
124         writel(thresh, sync_regs + (host1x_sync_syncpt_int_thresh_0_0_r() +
125                                     id * 4));
126 }
127
128 static void t124_intr_enable_syncpt_intr(struct nvhost_intr *intr, u32 id)
129 {
130         struct nvhost_master *dev = intr_to_dev(intr);
131         void __iomem *sync_regs = dev->sync_aperture;
132         u32 reg_offset = (id / 32) * 4;
133
134         nvhost_dbg_fn("");
135         BUG_ON(reg_offset > (nvhost_syncpt_nb_pts(&dev->syncpt) / 32) * 4);
136         writel(BIT(id & (32 - 1)), sync_regs +
137                 host1x_sync_syncpt_thresh_int_enable_cpu0_0_r() + reg_offset);
138 }
139
140 static void t124_intr_disable_syncpt_intr(struct nvhost_intr *intr, u32 id)
141 {
142         struct nvhost_master *dev = intr_to_dev(intr);
143         void __iomem *sync_regs = dev->sync_aperture;
144         u32 reg_offset = (id / 32) * 4;
145
146         nvhost_dbg_fn("");
147         BUG_ON(reg_offset > (nvhost_syncpt_nb_pts(&dev->syncpt) / 32) * 4);
148         writel(BIT(id & (32 - 1)),
149                sync_regs +
150                 host1x_sync_syncpt_thresh_int_disable_0_r() + reg_offset);
151         writel(BIT(id & (32 - 1)),
152                sync_regs +
153                 host1x_sync_syncpt_thresh_cpu0_int_status_0_r() + reg_offset);
154 }
155
156 static void t124_intr_disable_all_syncpt_intrs(struct nvhost_intr *intr)
157 {
158         struct nvhost_master *dev = intr_to_dev(intr);
159         void __iomem *sync_regs = dev->sync_aperture;
160         u32 reg_offset;
161
162         nvhost_dbg_fn("");
163
164         for (reg_offset = 0;
165              reg_offset <= (nvhost_syncpt_nb_pts(&dev->syncpt) / 32) * 4;
166              reg_offset += 4) {
167
168                 /* disable interrupts for both cpu's */
169                 writel(0, sync_regs +
170                        host1x_sync_syncpt_thresh_int_disable_0_r() +
171                        reg_offset);
172
173                 /* clear status for both cpu's */
174                 writel(0xfffffffful, sync_regs +
175                         host1x_sync_syncpt_thresh_cpu0_int_status_0_r() +
176                        reg_offset);
177
178                 writel(0xfffffffful, sync_regs +
179                         host1x_sync_syncpt_thresh_cpu1_int_status_0_r() +
180                        reg_offset);
181         }
182 }
183
184 /**
185  * Sync point threshold interrupt service function
186  * Handles sync point threshold triggers, in interrupt context
187  */
188 irqreturn_t t124_intr_syncpt_thresh_isr(int irq, void *dev_id)
189 {
190         struct nvhost_intr_syncpt *syncpt = dev_id;
191         unsigned int id = syncpt->id;
192         struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
193         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
194
195         u32 reg_offset = (id / 32) * 4;
196         id &= 32 - 1;
197
198         nvhost_dbg_fn("");
199         writel(BIT(id),
200                sync_regs +
201                 host1x_sync_syncpt_thresh_int_disable_0_r() + reg_offset);
202         writel(BIT(id),
203                sync_regs +
204                 host1x_sync_syncpt_thresh_cpu0_int_status_0_r() + reg_offset);
205
206         return IRQ_WAKE_THREAD;
207 }
208
209 /**
210  * Host general interrupt service function
211  * Handles read / write failures
212  */
213 static irqreturn_t t124_intr_host1x_isr(int irq, void *dev_id)
214 {
215         struct nvhost_intr *intr = dev_id;
216         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
217         u32 stat;
218         u32 ext_stat;
219         u32 addr;
220         nvhost_dbg_fn("");
221
222         stat = readl(sync_regs + host1x_sync_hintstatus_0_r());
223         ext_stat = readl(sync_regs + host1x_sync_hintstatus_ext_0_r());
224
225         if (nvhost_sync_hintstatus_ext_ip_read_int(ext_stat)) {
226                 addr = readl(sync_regs +
227                              host1x_sync_ip_read_timeout_addr_0_r());
228                 pr_err("Host read timeout at address %x\n", addr);
229         }
230
231         if (nvhost_sync_hintstatus_ext_ip_write_int(ext_stat)) {
232                 addr = readl(sync_regs +
233                              host1x_sync_ip_write_timeout_addr_0_r());
234                 pr_err("Host write timeout at address %x\n", addr);
235         }
236
237         writel(ext_stat, sync_regs + host1x_sync_hintstatus_ext_0_r());
238         writel(stat, sync_regs + host1x_sync_hintstatus_0_r());
239
240         return IRQ_HANDLED;
241 }
242
243 static int t124_intr_request_host_general_irq(struct nvhost_intr *intr)
244 {
245         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
246         int err;
247         nvhost_dbg_fn("");
248
249         if (intr->host_general_irq_requested)
250                 return 0;
251
252         /* master disable for general (not syncpt) host interrupts */
253         writel(0, sync_regs + host1x_sync_intmask_0_r());
254
255         /* clear status & extstatus */
256         writel(0xfffffffful, sync_regs + host1x_sync_hintstatus_ext_0_r());
257         writel(0xfffffffful, sync_regs + host1x_sync_hintstatus_ext_0_r());
258
259         err = request_irq(intr->host_general_irq, t124_intr_host1x_isr, 0,
260                           "host_status", intr);
261         if (err)
262                 return err;
263         /* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */
264         writel(BIT(30) | BIT(31), sync_regs + host1x_sync_hintmask_ext_0_r());
265
266         /* enable extra interrupt sources */
267         writel(BIT(31), sync_regs + host1x_sync_hintmask_0_r());
268
269         /* enable host module interrupt to CPU0 */
270         writel(BIT(0), sync_regs + host1x_sync_intc0mask_0_r());
271
272         /* master enable for general (not syncpt) host interrupts */
273         writel(BIT(0), sync_regs + host1x_sync_intmask_0_r());
274
275         intr->host_general_irq_requested = true;
276
277         return err;
278 }
279
280 static void t124_intr_free_host_general_irq(struct nvhost_intr *intr)
281 {
282         nvhost_dbg_fn("");
283         if (intr->host_general_irq_requested) {
284                 void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
285
286                 /* master disable for general (not syncpt) host interrupts */
287                 writel(0, sync_regs + host1x_sync_intmask_0_r());
288
289                 free_irq(intr->host_general_irq, intr);
290                 intr->host_general_irq_requested = false;
291         }
292 }
293
294 static int t124_request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
295 {
296         int err;
297         if (syncpt->irq_requested)
298                 return 0;
299
300         err = request_threaded_irq(syncpt->irq,
301                                    t124_intr_syncpt_thresh_isr, nvhost_syncpt_thresh_fn,
302                                    0, syncpt->thresh_irq_name, syncpt);
303         if (err)
304                 return err;
305
306         syncpt->irq_requested = 1;
307         return 0;
308 }
309
310 int nvhost_init_t124_intr_support(struct nvhost_chip_support *op)
311 {
312         op->intr.init_host_sync = t124_intr_init_host_sync;
313         op->intr.set_host_clocks_per_usec =
314           t124_intr_set_host_clocks_per_usec;
315         op->intr.set_syncpt_threshold = t124_intr_set_syncpt_threshold;
316         op->intr.enable_syncpt_intr = t124_intr_enable_syncpt_intr;
317         op->intr.disable_syncpt_intr = t124_intr_disable_syncpt_intr;
318         op->intr.disable_all_syncpt_intrs =
319           t124_intr_disable_all_syncpt_intrs;
320         op->intr.request_host_general_irq =
321           t124_intr_request_host_general_irq;
322         op->intr.free_host_general_irq =
323           t124_intr_free_host_general_irq;
324         op->intr.request_syncpt_irq =
325                 t124_request_syncpt_irq;
326
327         return 0;
328 }