1a1a9b3fa4c35f2c4f00abeda84bca3c2ace82db
[linux-2.6.git] / drivers / video / tegra / host / nvhost_intr.c
1 /*
2  * drivers/video/tegra/host/nvhost_intr.c
3  *
4  * Tegra Graphics Host Interrupt Management
5  *
6  * Copyright (c) 2010-2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include "nvhost_intr.h"
24 #include "dev.h"
25 #include <linux/interrupt.h>
26 #include <linux/slab.h>
27 #include <linux/irq.h>
28 #include <trace/events/nvhost.h>
29
30
31
32
33
34 /*** Wait list management ***/
35
36 struct nvhost_waitlist {
37         struct list_head list;
38         struct kref refcount;
39         u32 thresh;
40         enum nvhost_intr_action action;
41         atomic_t state;
42         void *data;
43         int count;
44 };
45
46 enum waitlist_state {
47         WLS_PENDING,
48         WLS_REMOVED,
49         WLS_CANCELLED,
50         WLS_HANDLED
51 };
52
53 static void waiter_release(struct kref *kref)
54 {
55         kfree(container_of(kref, struct nvhost_waitlist, refcount));
56 }
57
58 /**
59  * add a waiter to a waiter queue, sorted by threshold
60  * returns true if it was added at the head of the queue
61  */
62 static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
63                                 struct list_head *queue)
64 {
65         struct nvhost_waitlist *pos;
66         u32 thresh = waiter->thresh;
67
68         list_for_each_entry_reverse(pos, queue, list)
69                 if ((s32)(pos->thresh - thresh) <= 0) {
70                         list_add(&waiter->list, &pos->list);
71                         return false;
72                 }
73
74         list_add(&waiter->list, queue);
75         return true;
76 }
77
78 /**
79  * run through a waiter queue for a single sync point ID
80  * and gather all completed waiters into lists by actions
81  */
82 static void remove_completed_waiters(struct list_head *head, u32 sync,
83                         struct list_head completed[NVHOST_INTR_ACTION_COUNT])
84 {
85         struct list_head *dest;
86         struct nvhost_waitlist *waiter, *next, *prev;
87
88         list_for_each_entry_safe(waiter, next, head, list) {
89                 if ((s32)(waiter->thresh - sync) > 0)
90                         break;
91
92                 dest = completed + waiter->action;
93
94                 /* consolidate submit cleanups */
95                 if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
96                         && !list_empty(dest)) {
97                         prev = list_entry(dest->prev,
98                                         struct nvhost_waitlist, list);
99                         if (prev->data == waiter->data) {
100                                 prev->count++;
101                                 dest = NULL;
102                         }
103                 }
104
105                 /* PENDING->REMOVED or CANCELLED->HANDLED */
106                 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
107                         list_del(&waiter->list);
108                         kref_put(&waiter->refcount, waiter_release);
109                 } else {
110                         list_move_tail(&waiter->list, dest);
111                 }
112         }
113 }
114
115 void reset_threshold_interrupt(struct nvhost_intr *intr,
116                                struct list_head *head,
117                                unsigned int id)
118 {
119         u32 thresh = list_first_entry(head,
120                                 struct nvhost_waitlist, list)->thresh;
121         BUG_ON(!(intr_op(intr).set_syncpt_threshold &&
122                  intr_op(intr).enable_syncpt_intr));
123
124         intr_op(intr).set_syncpt_threshold(intr, id, thresh);
125         intr_op(intr).enable_syncpt_intr(intr, id);
126 }
127
128
129 static void action_submit_complete(struct nvhost_waitlist *waiter)
130 {
131         struct nvhost_channel *channel = waiter->data;
132         int nr_completed = waiter->count;
133
134         /*  Add nr_completed to trace */
135         trace_nvhost_channel_submit_complete(channel->desc->name,
136                         nr_completed);
137
138         nvhost_cdma_update(&channel->cdma);
139         nvhost_module_idle_mult(&channel->mod, nr_completed);
140 }
141
142 static void action_ctxsave(struct nvhost_waitlist *waiter)
143 {
144         struct nvhost_hwctx *hwctx = waiter->data;
145         struct nvhost_channel *channel = hwctx->channel;
146
147         if (channel->ctxhandler.save_service)
148                 channel->ctxhandler.save_service(hwctx);
149         channel->ctxhandler.put(hwctx);
150 }
151
152 static void action_ctxrestore(struct nvhost_waitlist *waiter)
153 {
154         struct nvhost_hwctx *hwctx = waiter->data;
155         struct nvhost_channel *channel = hwctx->channel;
156
157         channel->ctxhandler.put(hwctx);
158 }
159
160 static void action_wakeup(struct nvhost_waitlist *waiter)
161 {
162         wait_queue_head_t *wq = waiter->data;
163
164         wake_up(wq);
165 }
166
167 static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
168 {
169         wait_queue_head_t *wq = waiter->data;
170
171         wake_up_interruptible(wq);
172 }
173
174 typedef void (*action_handler)(struct nvhost_waitlist *waiter);
175
176 static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
177         action_submit_complete,
178         action_ctxsave,
179         action_ctxrestore,
180         action_wakeup,
181         action_wakeup_interruptible,
182 };
183
184 static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
185 {
186         struct list_head *head = completed;
187         int i;
188
189         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
190                 action_handler handler = action_handlers[i];
191                 struct nvhost_waitlist *waiter, *next;
192
193                 list_for_each_entry_safe(waiter, next, head, list) {
194                         list_del(&waiter->list);
195                         handler(waiter);
196                         WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
197                         kref_put(&waiter->refcount, waiter_release);
198                 }
199         }
200 }
201
202 /**
203  * Remove & handle all waiters that have completed for the given syncpt
204  */
205 static int process_wait_list(struct nvhost_intr *intr,
206                              struct nvhost_intr_syncpt *syncpt,
207                              u32 threshold)
208 {
209         struct list_head completed[NVHOST_INTR_ACTION_COUNT];
210         unsigned int i;
211         int empty;
212
213         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
214                 INIT_LIST_HEAD(completed + i);
215
216         spin_lock(&syncpt->lock);
217
218         remove_completed_waiters(&syncpt->wait_head, threshold, completed);
219
220         empty = list_empty(&syncpt->wait_head);
221         if (!empty)
222                 reset_threshold_interrupt(intr, &syncpt->wait_head,
223                                           syncpt->id);
224
225         spin_unlock(&syncpt->lock);
226
227         run_handlers(completed);
228
229         return empty;
230 }
231
232 /*** host syncpt interrupt service functions ***/
233 /**
234  * Sync point threshold interrupt service thread function
235  * Handles sync point threshold triggers, in thread context
236  */
237 irqreturn_t nvhost_syncpt_thresh_fn(int irq, void *dev_id)
238 {
239         struct nvhost_intr_syncpt *syncpt = dev_id;
240         unsigned int id = syncpt->id;
241         struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
242         struct nvhost_master *dev = intr_to_dev(intr);
243
244         (void)process_wait_list(intr, syncpt,
245                                 nvhost_syncpt_update_min(&dev->syncpt, id));
246
247         return IRQ_HANDLED;
248 }
249
250 /**
251  * free a syncpt's irq. syncpt interrupt should be disabled first.
252  */
253 static void free_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
254 {
255         if (syncpt->irq_requested) {
256                 free_irq(syncpt->irq, syncpt);
257                 syncpt->irq_requested = 0;
258         }
259 }
260
261
262 /*** host general interrupt service functions ***/
263
264
265 /*** Main API ***/
266
267 int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
268                         enum nvhost_intr_action action, void *data,
269                         void *_waiter,
270                         void **ref)
271 {
272         struct nvhost_waitlist *waiter = _waiter;
273         struct nvhost_intr_syncpt *syncpt;
274         int queue_was_empty;
275         int err;
276
277         BUG_ON(waiter == NULL);
278
279         BUG_ON(!(intr_op(intr).set_syncpt_threshold &&
280                  intr_op(intr).enable_syncpt_intr));
281
282         /* initialize a new waiter */
283         INIT_LIST_HEAD(&waiter->list);
284         kref_init(&waiter->refcount);
285         if (ref)
286                 kref_get(&waiter->refcount);
287         waiter->thresh = thresh;
288         waiter->action = action;
289         atomic_set(&waiter->state, WLS_PENDING);
290         waiter->data = data;
291         waiter->count = 1;
292
293         BUG_ON(id >= intr_to_dev(intr)->syncpt.nb_pts);
294         syncpt = intr->syncpt + id;
295
296         spin_lock(&syncpt->lock);
297
298         /* lazily request irq for this sync point */
299         if (!syncpt->irq_requested) {
300                 spin_unlock(&syncpt->lock);
301
302                 mutex_lock(&intr->mutex);
303                 BUG_ON(!(intr_op(intr).request_syncpt_irq));
304                 err = intr_op(intr).request_syncpt_irq(syncpt);
305                 mutex_unlock(&intr->mutex);
306
307                 if (err) {
308                         kfree(waiter);
309                         return err;
310                 }
311
312                 spin_lock(&syncpt->lock);
313         }
314
315         queue_was_empty = list_empty(&syncpt->wait_head);
316
317         if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
318                 /* added at head of list - new threshold value */
319                 intr_op(intr).set_syncpt_threshold(intr, id, thresh);
320
321                 /* added as first waiter - enable interrupt */
322                 if (queue_was_empty)
323                         intr_op(intr).enable_syncpt_intr(intr, id);
324         }
325
326         spin_unlock(&syncpt->lock);
327
328         if (ref)
329                 *ref = waiter;
330         return 0;
331 }
332
333 void *nvhost_intr_alloc_waiter()
334 {
335         return kzalloc(sizeof(struct nvhost_waitlist),
336                         GFP_KERNEL|__GFP_REPEAT);
337 }
338
339 void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
340 {
341         struct nvhost_waitlist *waiter = ref;
342
343         while (atomic_cmpxchg(&waiter->state,
344                                 WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
345                 schedule();
346
347         kref_put(&waiter->refcount, waiter_release);
348 }
349
350
351 /*** Init & shutdown ***/
352
353 int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
354 {
355         unsigned int id;
356         struct nvhost_intr_syncpt *syncpt;
357         struct nvhost_master *host =
358                 container_of(intr, struct nvhost_master, intr);
359         u32 nb_pts = host->syncpt.nb_pts;
360
361         mutex_init(&intr->mutex);
362         intr->host_general_irq = irq_gen;
363         intr->host_general_irq_requested = false;
364
365         for (id = 0, syncpt = intr->syncpt;
366              id < nb_pts;
367              ++id, ++syncpt) {
368                 syncpt->intr = &host->intr;
369                 syncpt->id = id;
370                 syncpt->irq = irq_sync + id;
371                 syncpt->irq_requested = 0;
372                 spin_lock_init(&syncpt->lock);
373                 INIT_LIST_HEAD(&syncpt->wait_head);
374                 snprintf(syncpt->thresh_irq_name,
375                         sizeof(syncpt->thresh_irq_name),
376                         "host_sp_%02d", id);
377         }
378
379         return 0;
380 }
381
382 void nvhost_intr_deinit(struct nvhost_intr *intr)
383 {
384         nvhost_intr_stop(intr);
385 }
386
387 void nvhost_intr_start(struct nvhost_intr *intr, u32 hz)
388 {
389         BUG_ON(!(intr_op(intr).init_host_sync &&
390                  intr_op(intr).set_host_clocks_per_usec &&
391                  intr_op(intr).request_host_general_irq));
392
393         mutex_lock(&intr->mutex);
394
395         intr_op(intr).init_host_sync(intr);
396         intr_op(intr).set_host_clocks_per_usec(intr,
397                                                (hz + 1000000 - 1)/1000000);
398
399         intr_op(intr).request_host_general_irq(intr);
400
401         mutex_unlock(&intr->mutex);
402 }
403
404 void nvhost_intr_stop(struct nvhost_intr *intr)
405 {
406         unsigned int id;
407         struct nvhost_intr_syncpt *syncpt;
408         u32 nb_pts = intr_to_dev(intr)->syncpt.nb_pts;
409
410         BUG_ON(!(intr_op(intr).disable_all_syncpt_intrs &&
411                  intr_op(intr).free_host_general_irq));
412
413         mutex_lock(&intr->mutex);
414
415         intr_op(intr).disable_all_syncpt_intrs(intr);
416
417         for (id = 0, syncpt = intr->syncpt;
418              id < nb_pts;
419              ++id, ++syncpt) {
420                 struct nvhost_waitlist *waiter, *next;
421                 list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
422                         if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
423                                 == WLS_CANCELLED) {
424                                 list_del(&waiter->list);
425                                 kref_put(&waiter->refcount, waiter_release);
426                         }
427                 }
428
429                 if (!list_empty(&syncpt->wait_head)) {  /* output diagnostics */
430                         printk(KERN_DEBUG "%s id=%d\n", __func__, id);
431                         BUG_ON(1);
432                 }
433
434                 free_syncpt_irq(syncpt);
435         }
436
437         intr_op(intr).free_host_general_irq(intr);
438
439         mutex_unlock(&intr->mutex);
440 }