video: tegra: refactor for multiple chip support
[linux-2.6.git] / drivers / video / tegra / host / nvhost_intr.c
1 /*
2  * drivers/video/tegra/host/nvhost_intr.c
3  *
4  * Tegra Graphics Host Interrupt Management
5  *
6  * Copyright (c) 2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include "nvhost_intr.h"
24 #include "dev.h"
25 #include <linux/interrupt.h>
26 #include <linux/slab.h>
27 #include <linux/irq.h>
28
29
30
31
32
33 /*** Wait list management ***/
34
35 struct nvhost_waitlist {
36         struct list_head list;
37         struct kref refcount;
38         u32 thresh;
39         enum nvhost_intr_action action;
40         atomic_t state;
41         void *data;
42         int count;
43 };
44
45 enum waitlist_state
46 {
47         WLS_PENDING,
48         WLS_REMOVED,
49         WLS_CANCELLED,
50         WLS_HANDLED
51 };
52
53 static void waiter_release(struct kref *kref)
54 {
55         kfree(container_of(kref, struct nvhost_waitlist, refcount));
56 }
57
58 /**
59  * add a waiter to a waiter queue, sorted by threshold
60  * returns true if it was added at the head of the queue
61  */
62 static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
63                                 struct list_head *queue)
64 {
65         struct nvhost_waitlist *pos;
66         u32 thresh = waiter->thresh;
67
68         list_for_each_entry_reverse(pos, queue, list)
69                 if ((s32)(pos->thresh - thresh) <= 0) {
70                         list_add(&waiter->list, &pos->list);
71                         return false;
72                 }
73
74         list_add(&waiter->list, queue);
75         return true;
76 }
77
78 /**
79  * run through a waiter queue for a single sync point ID
80  * and gather all completed waiters into lists by actions
81  */
82 static void remove_completed_waiters(struct list_head *head, u32 sync,
83                         struct list_head completed[NVHOST_INTR_ACTION_COUNT])
84 {
85         struct list_head *dest;
86         struct nvhost_waitlist *waiter, *next, *prev;
87
88         list_for_each_entry_safe(waiter, next, head, list) {
89                 if ((s32)(waiter->thresh - sync) > 0)
90                         break;
91
92                 dest = completed + waiter->action;
93
94                 /* consolidate submit cleanups */
95                 if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
96                         && !list_empty(dest)) {
97                         prev = list_entry(dest->prev,
98                                         struct nvhost_waitlist, list);
99                         if (prev->data == waiter->data) {
100                                 prev->count++;
101                                 dest = NULL;
102                         }
103                 }
104
105                 /* PENDING->REMOVED or CANCELLED->HANDLED */
106                 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
107                         list_del(&waiter->list);
108                         kref_put(&waiter->refcount, waiter_release);
109                 } else {
110                         list_move_tail(&waiter->list, dest);
111                 }
112         }
113 }
114
115 void reset_threshold_interrupt(struct nvhost_intr *intr,
116                                struct list_head *head,
117                                unsigned int id)
118 {
119         u32 thresh = list_first_entry(head,
120                                 struct nvhost_waitlist, list)->thresh;
121         BUG_ON(!(intr_op(intr).set_syncpt_threshold &&
122                  intr_op(intr).enable_syncpt_intr));
123
124         intr_op(intr).set_syncpt_threshold(intr, id, thresh);
125         intr_op(intr).enable_syncpt_intr(intr, id);
126 }
127
128
129 static void action_submit_complete(struct nvhost_waitlist *waiter)
130 {
131         struct nvhost_channel *channel = waiter->data;
132         int nr_completed = waiter->count;
133
134         nvhost_cdma_update(&channel->cdma);
135         nvhost_module_idle_mult(&channel->mod, nr_completed);
136 }
137
138 static void action_ctxsave(struct nvhost_waitlist *waiter)
139 {
140         struct nvhost_hwctx *hwctx = waiter->data;
141         struct nvhost_channel *channel = hwctx->channel;
142
143         if (channel->ctxhandler.save_service)
144                 channel->ctxhandler.save_service(hwctx);
145         channel->ctxhandler.put(hwctx);
146 }
147
148 static void action_wakeup(struct nvhost_waitlist *waiter)
149 {
150         wait_queue_head_t *wq = waiter->data;
151
152         wake_up(wq);
153 }
154
155 static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
156 {
157         wait_queue_head_t *wq = waiter->data;
158
159         wake_up_interruptible(wq);
160 }
161
162 typedef void (*action_handler)(struct nvhost_waitlist *waiter);
163
164 static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
165         action_submit_complete,
166         action_ctxsave,
167         action_wakeup,
168         action_wakeup_interruptible,
169 };
170
171 static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
172 {
173         struct list_head *head = completed;
174         int i;
175
176         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
177                 action_handler handler = action_handlers[i];
178                 struct nvhost_waitlist *waiter, *next;
179
180                 list_for_each_entry_safe(waiter, next, head, list) {
181                         list_del(&waiter->list);
182                         handler(waiter);
183                         WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
184                         kref_put(&waiter->refcount, waiter_release);
185                 }
186         }
187 }
188
189 /**
190  * Remove & handle all waiters that have completed for the given syncpt
191  */
192 static int process_wait_list(struct nvhost_intr *intr,
193                              struct nvhost_intr_syncpt *syncpt,
194                              u32 threshold)
195 {
196         struct list_head completed[NVHOST_INTR_ACTION_COUNT];
197         unsigned int i;
198         int empty;
199
200         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
201                 INIT_LIST_HEAD(completed + i);
202
203         spin_lock(&syncpt->lock);
204
205         remove_completed_waiters(&syncpt->wait_head, threshold, completed);
206
207         empty = list_empty(&syncpt->wait_head);
208         if (!empty)
209                 reset_threshold_interrupt(intr, &syncpt->wait_head,
210                                           syncpt->id);
211
212         spin_unlock(&syncpt->lock);
213
214         run_handlers(completed);
215
216         return empty;
217 }
218
219 /*** host syncpt interrupt service functions ***/
220 /**
221  * Sync point threshold interrupt service thread function
222  * Handles sync point threshold triggers, in thread context
223  */
224 static irqreturn_t syncpt_thresh_fn(int irq, void *dev_id)
225 {
226         struct nvhost_intr_syncpt *syncpt = dev_id;
227         unsigned int id = syncpt->id;
228         struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
229         struct nvhost_master *dev = intr_to_dev(intr);
230
231         (void)process_wait_list(intr, syncpt,
232                                 nvhost_syncpt_update_min(&dev->syncpt, id));
233
234         return IRQ_HANDLED;
235 }
236
237 /**
238  * lazily request a syncpt's irq
239  */
240 static int request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
241 {
242         int err;
243         extern irqreturn_t t20_intr_syncpt_thresh_isr(int irq, void *dev_id);
244         if (syncpt->irq_requested)
245                 return 0;
246
247         err = request_threaded_irq(syncpt->irq,
248                                 t20_intr_syncpt_thresh_isr, syncpt_thresh_fn,
249                                 0, syncpt->thresh_irq_name, syncpt);
250         if (err)
251                 return err;
252
253         syncpt->irq_requested = 1;
254         return 0;
255 }
256
257 /**
258  * free a syncpt's irq. syncpt interrupt should be disabled first.
259  */
260 static void free_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
261 {
262         if (syncpt->irq_requested) {
263                 free_irq(syncpt->irq, syncpt);
264                 syncpt->irq_requested = 0;
265         }
266 }
267
268
269 /*** host general interrupt service functions ***/
270
271
272 /*** Main API ***/
273
274 int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
275                         enum nvhost_intr_action action, void *data,
276                         void **ref)
277 {
278         struct nvhost_waitlist *waiter;
279         struct nvhost_intr_syncpt *syncpt;
280         int queue_was_empty;
281         int err;
282
283         BUG_ON(!(intr_op(intr).set_syncpt_threshold &&
284                  intr_op(intr).enable_syncpt_intr));
285
286         /* create and initialize a new waiter */
287         waiter = kmalloc(sizeof(*waiter), GFP_KERNEL);
288         if (!waiter)
289                 return -ENOMEM;
290         INIT_LIST_HEAD(&waiter->list);
291         kref_init(&waiter->refcount);
292         if (ref)
293                 kref_get(&waiter->refcount);
294         waiter->thresh = thresh;
295         waiter->action = action;
296         atomic_set(&waiter->state, WLS_PENDING);
297         waiter->data = data;
298         waiter->count = 1;
299
300         BUG_ON(id >= intr_to_dev(intr)->syncpt.nb_pts);
301         syncpt = intr->syncpt + id;
302
303         spin_lock(&syncpt->lock);
304
305         /* lazily request irq for this sync point */
306         if (!syncpt->irq_requested) {
307                 spin_unlock(&syncpt->lock);
308
309                 mutex_lock(&intr->mutex);
310                 err = request_syncpt_irq(syncpt);
311                 mutex_unlock(&intr->mutex);
312
313                 if (err) {
314                         kfree(waiter);
315                         return err;
316                 }
317
318                 spin_lock(&syncpt->lock);
319         }
320
321         queue_was_empty = list_empty(&syncpt->wait_head);
322
323         if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
324                 /* added at head of list - new threshold value */
325                 intr_op(intr).set_syncpt_threshold(intr, id, thresh);
326
327                 /* added as first waiter - enable interrupt */
328                 if (queue_was_empty)
329                         intr_op(intr).enable_syncpt_intr(intr, id);
330         }
331
332         spin_unlock(&syncpt->lock);
333
334         if (ref)
335                 *ref = waiter;
336         return 0;
337 }
338
339 void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
340 {
341         struct nvhost_waitlist *waiter = ref;
342
343         while (atomic_cmpxchg(&waiter->state,
344                                 WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
345                 schedule();
346
347         kref_put(&waiter->refcount, waiter_release);
348 }
349
350
351 /*** Init & shutdown ***/
352
353 int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
354 {
355         unsigned int id;
356         struct nvhost_intr_syncpt *syncpt;
357         struct nvhost_master *host =
358                 container_of(intr, struct nvhost_master, intr);
359         u32 nb_pts = host->syncpt.nb_pts;
360
361         mutex_init(&intr->mutex);
362         intr->host_general_irq = irq_gen;
363         intr->host_general_irq_requested = false;
364
365         for (id = 0, syncpt = intr->syncpt;
366              id < nb_pts;
367              ++id, ++syncpt) {
368                 syncpt->intr = &host->intr;
369                 syncpt->id = id;
370                 syncpt->irq = irq_sync + id;
371                 syncpt->irq_requested = 0;
372                 spin_lock_init(&syncpt->lock);
373                 INIT_LIST_HEAD(&syncpt->wait_head);
374                 snprintf(syncpt->thresh_irq_name,
375                         sizeof(syncpt->thresh_irq_name),
376                         "host_sp_%02d", id);
377         }
378
379         return 0;
380 }
381
382 void nvhost_intr_deinit(struct nvhost_intr *intr)
383 {
384         nvhost_intr_stop(intr);
385 }
386
387 void nvhost_intr_start(struct nvhost_intr *intr, u32 hz)
388 {
389         BUG_ON(!(intr_op(intr).init_host_sync &&
390                  intr_op(intr).set_host_clocks_per_usec &&
391                  intr_op(intr).request_host_general_irq));
392
393         mutex_lock(&intr->mutex);
394
395         intr_op(intr).init_host_sync(intr);
396         intr_op(intr).set_host_clocks_per_usec(intr,
397                                                (hz + 1000000 - 1)/1000000);
398
399         intr_op(intr).request_host_general_irq(intr);
400
401         mutex_unlock(&intr->mutex);
402 }
403
404 void nvhost_intr_stop(struct nvhost_intr *intr)
405 {
406         unsigned int id;
407         struct nvhost_intr_syncpt *syncpt;
408         u32 nb_pts = intr_to_dev(intr)->syncpt.nb_pts;
409
410         BUG_ON(!(intr_op(intr).disable_all_syncpt_intrs &&
411                  intr_op(intr).free_host_general_irq));
412
413         mutex_lock(&intr->mutex);
414
415         intr_op(intr).disable_all_syncpt_intrs(intr);
416
417         for (id = 0, syncpt = intr->syncpt;
418              id < nb_pts;
419              ++id, ++syncpt) {
420                 struct nvhost_waitlist *waiter, *next;
421                 list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
422                         if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
423                                 == WLS_CANCELLED) {
424                                 list_del(&waiter->list);
425                                 kref_put(&waiter->refcount, waiter_release);
426                         }
427                 }
428
429                 if(!list_empty(&syncpt->wait_head)) {  // output diagnostics
430                         printk("%s id=%d\n",__func__,id);
431                         BUG_ON(1);
432                 }
433
434                 free_syncpt_irq(syncpt);
435         }
436
437         intr_op(intr).free_host_general_irq(intr);
438
439         mutex_unlock(&intr->mutex);
440 }