[ARM/tegra] nvhost: Tegra3 support
[linux-2.6.git] / drivers / video / tegra / host / nvhost_intr.c
1 /*
2  * drivers/video/tegra/host/nvhost_intr.c
3  *
4  * Tegra Graphics Host Interrupt Management
5  *
6  * Copyright (c) 2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include "nvhost_intr.h"
24 #include "dev.h"
25 #include <linux/interrupt.h>
26 #include <linux/slab.h>
27 #include <linux/irq.h>
28
29 #define intr_to_dev(x) container_of(x, struct nvhost_master, intr)
30
31
32 /*** HW host sync management ***/
33
34 void init_host_sync(void __iomem *sync_regs)
35 {
36         /* disable the ip_busy_timeout. this prevents write drops, etc.
37          * there's no real way to recover from a hung client anyway.
38          */
39         writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT);
40
41         /* increase the auto-ack timout to the maximum value. 2d will hang
42          * otherwise on ap20.
43          */
44         writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
45 }
46
47 void set_host_clocks_per_microsecond(void __iomem *sync_regs, u32 cpm)
48 {
49         /* write microsecond clock register */
50         writel(cpm, sync_regs + HOST1X_SYNC_USEC_CLK);
51 }
52
53 static void set_syncpt_threshold(void __iomem *sync_regs, u32 id, u32 thresh)
54 {
55         thresh &= 0xffff;
56         writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4));
57 }
58
59 static void enable_syncpt_interrupt(void __iomem *sync_regs, u32 id)
60 {
61         writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0);
62 }
63
64 void disable_all_syncpt_interrupts(void __iomem *sync_regs)
65 {
66         /* disable interrupts for both cpu's */
67         writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
68
69         /* clear status for both cpu's */
70         writel(0xfffffffful, sync_regs +
71                 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
72         writel(0xfffffffful, sync_regs +
73                 HOST1X_SYNC_SYNCPT_THRESH_CPU1_INT_STATUS);
74 }
75
76
77 /*** Wait list management ***/
78
79 struct nvhost_waitlist {
80         struct list_head list;
81         struct kref refcount;
82         u32 thresh;
83         enum nvhost_intr_action action;
84         atomic_t state;
85         void *data;
86         int count;
87 };
88
89 enum waitlist_state
90 {
91         WLS_PENDING,
92         WLS_REMOVED,
93         WLS_CANCELLED,
94         WLS_HANDLED
95 };
96
97 static void waiter_release(struct kref *kref)
98 {
99         kfree(container_of(kref, struct nvhost_waitlist, refcount));
100 }
101
102 /**
103  * add a waiter to a waiter queue, sorted by threshold
104  * returns true if it was added at the head of the queue
105  */
106 static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
107                                 struct list_head *queue)
108 {
109         struct nvhost_waitlist *pos;
110         u32 thresh = waiter->thresh;
111
112         list_for_each_entry_reverse(pos, queue, list)
113                 if ((s32)(pos->thresh - thresh) <= 0) {
114                         list_add(&waiter->list, &pos->list);
115                         return false;
116                 }
117
118         list_add(&waiter->list, queue);
119         return true;
120 }
121
122 /**
123  * run through a waiter queue for a single sync point ID
124  * and gather all completed waiters into lists by actions
125  */
126 static void remove_completed_waiters(struct list_head *head, u32 sync,
127                         struct list_head completed[NVHOST_INTR_ACTION_COUNT])
128 {
129         struct list_head *dest;
130         struct nvhost_waitlist *waiter, *next, *prev;
131
132         list_for_each_entry_safe(waiter, next, head, list) {
133                 if ((s32)(waiter->thresh - sync) > 0)
134                         break;
135
136                 dest = completed + waiter->action;
137
138                 /* consolidate submit cleanups */
139                 if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
140                         && !list_empty(dest)) {
141                         prev = list_entry(dest->prev,
142                                         struct nvhost_waitlist, list);
143                         if (prev->data == waiter->data) {
144                                 prev->count++;
145                                 dest = NULL;
146                         }
147                 }
148
149                 /* PENDING->REMOVED or CANCELLED->HANDLED */
150                 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
151                         list_del(&waiter->list);
152                         kref_put(&waiter->refcount, waiter_release);
153                 } else {
154                         list_move_tail(&waiter->list, dest);
155                 }
156         }
157 }
158
159 void reset_threshold_interrupt(struct list_head *head,
160                 unsigned int id, void __iomem *sync_regs)
161 {
162         u32 thresh = list_first_entry(head,
163                                 struct nvhost_waitlist, list)->thresh;
164
165         set_syncpt_threshold(sync_regs, id, thresh);
166         enable_syncpt_interrupt(sync_regs, id);
167 }
168
169
170 static void action_submit_complete(struct nvhost_waitlist *waiter)
171 {
172         struct nvhost_channel *channel = waiter->data;
173         int nr_completed = waiter->count;
174
175         nvhost_cdma_update(&channel->cdma);
176         nvhost_module_idle_mult(&channel->mod, nr_completed);
177 }
178
179 static void action_ctxsave(struct nvhost_waitlist *waiter)
180 {
181         struct nvhost_hwctx *hwctx = waiter->data;
182         struct nvhost_channel *channel = hwctx->channel;
183
184         if (channel->ctxhandler.save_service)
185                 channel->ctxhandler.save_service(hwctx);
186         channel->ctxhandler.put(hwctx);
187 }
188
189 static void action_wakeup(struct nvhost_waitlist *waiter)
190 {
191         wait_queue_head_t *wq = waiter->data;
192
193         wake_up(wq);
194 }
195
196 static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
197 {
198         wait_queue_head_t *wq = waiter->data;
199
200         wake_up_interruptible(wq);
201 }
202
203 typedef void (*action_handler)(struct nvhost_waitlist *waiter);
204
205 static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
206         action_submit_complete,
207         action_ctxsave,
208         action_wakeup,
209         action_wakeup_interruptible,
210 };
211
212 static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
213 {
214         struct list_head *head = completed;
215         int i;
216
217         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
218                 action_handler handler = action_handlers[i];
219                 struct nvhost_waitlist *waiter, *next;
220
221                 list_for_each_entry_safe(waiter, next, head, list) {
222                         list_del(&waiter->list);
223                         handler(waiter);
224                         WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
225                         kref_put(&waiter->refcount, waiter_release);
226                 }
227         }
228 }
229
230 /**
231  * Remove & handle all waiters that have completed for the given syncpt
232  */
233 int process_wait_list(struct nvhost_intr_syncpt *syncpt,
234                 u32 threshold, void __iomem *sync_regs)
235 {
236         struct list_head completed[NVHOST_INTR_ACTION_COUNT];
237         unsigned int i;
238         int empty;
239
240         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
241                 INIT_LIST_HEAD(completed + i);
242
243         spin_lock(&syncpt->lock);
244
245         remove_completed_waiters(&syncpt->wait_head, threshold, completed);
246
247         empty = list_empty(&syncpt->wait_head);
248         if (!empty)
249                 reset_threshold_interrupt(&syncpt->wait_head,
250                                         syncpt->id, sync_regs);
251
252         spin_unlock(&syncpt->lock);
253
254         run_handlers(completed);
255
256         return empty;
257 }
258
259
260 /*** host syncpt interrupt service functions ***/
261
262 /**
263  * Sync point threshold interrupt service function
264  * Handles sync point threshold triggers, in interrupt context
265  */
266 static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
267 {
268         struct nvhost_intr_syncpt *syncpt = dev_id;
269         unsigned int id = syncpt->id;
270         struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
271                                                 syncpt[id]);
272         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
273
274         writel(BIT(id),
275                 sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
276         writel(BIT(id),
277                 sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
278
279         return IRQ_WAKE_THREAD;
280 }
281
282 /**
283  * Sync point threshold interrupt service thread function
284  * Handles sync point threshold triggers, in thread context
285  */
286 static irqreturn_t syncpt_thresh_fn(int irq, void *dev_id)
287 {
288         struct nvhost_intr_syncpt *syncpt = dev_id;
289         unsigned int id = syncpt->id;
290         struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
291                                                 syncpt[id]);
292         struct nvhost_master *dev = intr_to_dev(intr);
293
294         (void)process_wait_list(syncpt,
295                         nvhost_syncpt_update_min(&dev->syncpt, id),
296                         dev->sync_aperture);
297
298         return IRQ_HANDLED;
299 }
300
301 /**
302  * lazily request a syncpt's irq
303  */
304 static int request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
305 {
306         int err;
307
308         if (syncpt->irq_requested)
309                 return 0;
310
311         err = request_threaded_irq(syncpt->irq,
312                                 syncpt_thresh_isr, syncpt_thresh_fn,
313                                 0, syncpt->thresh_irq_name, syncpt);
314         if (err)
315                 return err;
316
317         syncpt->irq_requested = 1;
318         return 0;
319 }
320
321 /**
322  * free a syncpt's irq. syncpt interrupt should be disabled first.
323  */
324 static void free_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
325 {
326         if (syncpt->irq_requested) {
327                 free_irq(syncpt->irq, syncpt);
328                 syncpt->irq_requested = 0;
329         }
330 }
331
332
333 /*** host general interrupt service functions ***/
334
335 /**
336  * Host general interrupt service function
337  * Handles read / write failures
338  */
339 static irqreturn_t host1x_isr(int irq, void *dev_id)
340 {
341         struct nvhost_intr *intr = dev_id;
342         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
343         u32 stat;
344         u32 ext_stat;
345         u32 addr;
346
347         stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS);
348         ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
349
350         if (nvhost_sync_hintstatus_ext_ip_read_int(ext_stat)) {
351                 addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR);
352                 pr_err("Host read timeout at address %x\n", addr);
353         }
354
355         if (nvhost_sync_hintstatus_ext_ip_write_int(ext_stat)) {
356                 addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR);
357                 pr_err("Host write timeout at address %x\n", addr);
358         }
359
360         writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
361         writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS);
362
363         return IRQ_HANDLED;
364 }
365
366 static int request_host_general_irq(struct nvhost_intr *intr)
367 {
368         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
369         int err;
370
371         if (intr->host_general_irq_requested)
372                 return 0;
373
374         /* master disable for general (not syncpt) host interrupts */
375         writel(0, sync_regs + HOST1X_SYNC_INTMASK);
376
377         /* clear status & extstatus */
378         writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
379         writel(0xfffffffful, sync_regs + HOST1X_SYNC_HINTSTATUS);
380
381         err = request_irq(intr->host_general_irq, host1x_isr, 0,
382                         "host_status", intr);
383         if (err)
384                 return err;
385
386         /* enable extra interrupt sources IP_READ_INT and IP_WRITE_INT */
387         writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT);
388
389         /* enable extra interrupt sources */
390         writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK);
391
392         /* enable host module interrupt to CPU0 */
393         writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK);
394
395         /* master enable for general (not syncpt) host interrupts */
396         writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK);
397
398         intr->host_general_irq_requested = true;
399
400         return err;
401 }
402
403 static void free_host_general_irq(struct nvhost_intr *intr)
404 {
405         if (intr->host_general_irq_requested) {
406                 void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
407
408                 /* master disable for general (not syncpt) host interrupts */
409                 writel(0, sync_regs + HOST1X_SYNC_INTMASK);
410
411                 free_irq(intr->host_general_irq, intr);
412                 intr->host_general_irq_requested = false;
413         }
414 }
415
416
417 /*** Main API ***/
418
419 int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
420                         enum nvhost_intr_action action, void *data,
421                         void **ref)
422 {
423         struct nvhost_waitlist *waiter;
424         struct nvhost_intr_syncpt *syncpt;
425         void __iomem *sync_regs;
426         int queue_was_empty;
427         int err;
428
429         /* create and initialize a new waiter */
430         waiter = kmalloc(sizeof(*waiter), GFP_KERNEL);
431         if (!waiter)
432                 return -ENOMEM;
433         INIT_LIST_HEAD(&waiter->list);
434         kref_init(&waiter->refcount);
435         if (ref)
436                 kref_get(&waiter->refcount);
437         waiter->thresh = thresh;
438         waiter->action = action;
439         atomic_set(&waiter->state, WLS_PENDING);
440         waiter->data = data;
441         waiter->count = 1;
442
443         BUG_ON(id >= NV_HOST1X_SYNCPT_NB_PTS);
444         syncpt = intr->syncpt + id;
445         sync_regs = intr_to_dev(intr)->sync_aperture;
446
447         spin_lock(&syncpt->lock);
448
449         /* lazily request irq for this sync point */
450         if (!syncpt->irq_requested) {
451                 spin_unlock(&syncpt->lock);
452
453                 mutex_lock(&intr->mutex);
454                 err = request_syncpt_irq(syncpt);
455                 mutex_unlock(&intr->mutex);
456
457                 if (err) {
458                         kfree(waiter);
459                         return err;
460                 }
461
462                 spin_lock(&syncpt->lock);
463         }
464
465         queue_was_empty = list_empty(&syncpt->wait_head);
466
467         if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
468                 /* added at head of list - new threshold value */
469                 set_syncpt_threshold(sync_regs, id, thresh);
470
471                 /* added as first waiter - enable interrupt */
472                 if (queue_was_empty)
473                         enable_syncpt_interrupt(sync_regs, id);
474         }
475
476         spin_unlock(&syncpt->lock);
477
478         if (ref)
479                 *ref = waiter;
480         return 0;
481 }
482
483 void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
484 {
485         struct nvhost_waitlist *waiter = ref;
486
487         while (atomic_cmpxchg(&waiter->state,
488                                 WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
489                 schedule();
490
491         kref_put(&waiter->refcount, waiter_release);
492 }
493
494
495 /*** Init & shutdown ***/
496
497 int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
498 {
499         unsigned int id;
500         struct nvhost_intr_syncpt *syncpt;
501
502         mutex_init(&intr->mutex);
503         intr->host_general_irq = irq_gen;
504         intr->host_general_irq_requested = false;
505
506         for (id = 0, syncpt = intr->syncpt;
507              id < NV_HOST1X_SYNCPT_NB_PTS;
508              ++id, ++syncpt) {
509                 syncpt->id = id;
510                 syncpt->irq = irq_sync + id;
511                 syncpt->irq_requested = 0;
512                 spin_lock_init(&syncpt->lock);
513                 INIT_LIST_HEAD(&syncpt->wait_head);
514                 snprintf(syncpt->thresh_irq_name,
515                         sizeof(syncpt->thresh_irq_name),
516                         "host_sp_%02d", id);
517         }
518
519         return 0;
520 }
521
522 void nvhost_intr_deinit(struct nvhost_intr *intr)
523 {
524         nvhost_intr_stop(intr);
525 }
526
527 void nvhost_intr_start(struct nvhost_intr *intr, u32 hz)
528 {
529         struct nvhost_master *dev = intr_to_dev(intr);
530         void __iomem *sync_regs = dev->sync_aperture;
531
532         mutex_lock(&intr->mutex);
533
534         init_host_sync(sync_regs);
535         set_host_clocks_per_microsecond(sync_regs, (hz + 1000000 - 1)/1000000);
536
537         request_host_general_irq(intr);
538
539         mutex_unlock(&intr->mutex);
540 }
541
542 void nvhost_intr_stop(struct nvhost_intr *intr)
543 {
544         void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
545         unsigned int id;
546         struct nvhost_intr_syncpt *syncpt;
547
548         mutex_lock(&intr->mutex);
549
550         disable_all_syncpt_interrupts(sync_regs);
551
552         for (id = 0, syncpt = intr->syncpt;
553              id < NV_HOST1X_SYNCPT_NB_PTS;
554              ++id, ++syncpt) {
555                 struct nvhost_waitlist *waiter, *next;
556                 list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
557                         if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
558                                 == WLS_CANCELLED) {
559                                 list_del(&waiter->list);
560                                 kref_put(&waiter->refcount, waiter_release);
561                         }
562                 }
563
564                 if(!list_empty(&syncpt->wait_head)) {  // output diagnostics
565                         printk("%s id=%d\n",__func__,id);
566                         BUG_ON(1);
567                 }
568
569                 free_syncpt_irq(syncpt);
570         }
571
572         free_host_general_irq(intr);
573
574         mutex_unlock(&intr->mutex);
575 }