08cadbda89a3edb89b6f934297a2474889584b9d
[linux-3.10.git] / drivers / video / tegra / host / nvhost_intr.c
1 /*
2  * drivers/video/tegra/host/nvhost_intr.c
3  *
4  * Tegra Graphics Host Interrupt Management
5  *
6  * Copyright (c) 2010-2013, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include "nvhost_intr.h"
22 #include "dev.h"
23 #include "nvhost_acm.h"
24
25 #ifdef CONFIG_TEGRA_GRHOST_SYNC
26 #include "nvhost_sync.h"
27 #endif
28
29 #include <linux/interrupt.h>
30 #include <linux/slab.h>
31 #include <linux/irq.h>
32 #include <trace/events/nvhost.h>
33 #include "nvhost_channel.h"
34 #include "nvhost_hwctx.h"
35 #include "chip_support.h"
36 #include "gk20a/channel_gk20a.h"
37
38 /*** Wait list management ***/
39
40 struct nvhost_waitlist {
41         struct list_head list;
42         struct kref refcount;
43         u32 thresh;
44         enum nvhost_intr_action action;
45         atomic_t state;
46         struct timespec isr_recv;
47         void *data;
48         int count;
49 };
50
51 enum waitlist_state {
52         WLS_PENDING,
53         WLS_REMOVED,
54         WLS_CANCELLED,
55         WLS_HANDLED
56 };
57
58 static void waiter_release(struct kref *kref)
59 {
60         kfree(container_of(kref, struct nvhost_waitlist, refcount));
61 }
62
63 int nvhost_intr_release_time(void *ref, struct timespec *ts)
64 {
65         struct nvhost_waitlist *waiter = ref;
66         if (atomic_read(&waiter->state) == WLS_PENDING)
67                 return -EBUSY;
68
69         *ts = waiter->isr_recv;
70         return 0;
71 }
72
73 /**
74  * add a waiter to a waiter queue, sorted by threshold
75  * returns true if it was added at the head of the queue
76  */
77 static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
78                                 struct list_head *queue)
79 {
80         struct nvhost_waitlist *pos;
81         u32 thresh = waiter->thresh;
82
83         list_for_each_entry_reverse(pos, queue, list)
84                 if ((s32)(pos->thresh - thresh) <= 0) {
85                         list_add(&waiter->list, &pos->list);
86                         return false;
87                 }
88
89         list_add(&waiter->list, queue);
90         return true;
91 }
92
93 /**
94  * run through a waiter queue for a single sync point ID
95  * and gather all completed waiters into lists by actions
96  */
97 static void remove_completed_waiters(struct list_head *head, u32 sync,
98                         struct timespec isr_recv,
99                         struct list_head completed[NVHOST_INTR_ACTION_COUNT])
100 {
101         struct list_head *dest;
102         struct nvhost_waitlist *waiter, *next, *prev;
103
104         list_for_each_entry_safe(waiter, next, head, list) {
105                 if ((s32)(waiter->thresh - sync) > 0)
106                         break;
107
108                 waiter->isr_recv = isr_recv;
109                 dest = completed + waiter->action;
110
111                 /* consolidate submit cleanups */
112                 if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
113                         && !list_empty(dest)) {
114                         prev = list_entry(dest->prev,
115                                         struct nvhost_waitlist, list);
116                         if (prev->data == waiter->data) {
117                                 prev->count++;
118                                 dest = NULL;
119                         }
120                 }
121
122                 /* PENDING->REMOVED or CANCELLED->HANDLED */
123                 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
124                         list_del(&waiter->list);
125                         kref_put(&waiter->refcount, waiter_release);
126                 } else {
127                         list_move_tail(&waiter->list, dest);
128                 }
129         }
130 }
131
132 void reset_threshold_interrupt(struct nvhost_intr *intr,
133                                struct list_head *head,
134                                unsigned int id)
135 {
136         u32 thresh = list_first_entry(head,
137                                 struct nvhost_waitlist, list)->thresh;
138
139         intr_op().set_syncpt_threshold(intr, id, thresh);
140         intr_op().enable_syncpt_intr(intr, id);
141 }
142
143
144 static void action_submit_complete(struct nvhost_waitlist *waiter)
145 {
146         struct nvhost_channel *channel = waiter->data;
147         int nr_completed = waiter->count;
148
149         nvhost_module_idle_mult(channel->dev, nr_completed);
150         nvhost_cdma_update(&channel->cdma);
151
152         /*  Add nr_completed to trace */
153         trace_nvhost_channel_submit_complete(channel->dev->name,
154                         nr_completed, waiter->thresh,
155                         channel->cdma.high_prio_count,
156                         channel->cdma.med_prio_count,
157                         channel->cdma.low_prio_count);
158
159 }
160
161 static void action_gpfifo_submit_complete(struct nvhost_waitlist *waiter)
162 {
163         struct channel_gk20a *ch20a = waiter->data;
164         int nr_completed = waiter->count;
165         wake_up(&ch20a->submit_wq);
166         gk20a_channel_update(ch20a);
167         nvhost_module_idle_mult(ch20a->ch->dev, nr_completed);
168         /* TODO: add trace function */
169 }
170
171 static void action_wakeup(struct nvhost_waitlist *waiter)
172 {
173         wait_queue_head_t *wq = waiter->data;
174
175         wake_up(wq);
176 }
177
178 static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
179 {
180         wait_queue_head_t *wq = waiter->data;
181
182         wake_up_interruptible(wq);
183 }
184
185 static void action_signal_sync_pt(struct nvhost_waitlist *waiter)
186 {
187 #ifdef CONFIG_TEGRA_GRHOST_SYNC
188         struct nvhost_sync_pt *pt = waiter->data;
189         nvhost_sync_pt_signal(pt);
190 #endif
191 }
192
193 typedef void (*action_handler)(struct nvhost_waitlist *waiter);
194
195 static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
196         action_submit_complete,
197         action_gpfifo_submit_complete,
198         action_signal_sync_pt,
199         action_wakeup,
200         action_wakeup_interruptible,
201 };
202
203 static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
204 {
205         struct list_head *head = completed;
206         int i;
207
208         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
209                 action_handler handler = action_handlers[i];
210                 struct nvhost_waitlist *waiter, *next;
211
212                 list_for_each_entry_safe(waiter, next, head, list) {
213                         list_del(&waiter->list);
214                         handler(waiter);
215                         WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
216                         kref_put(&waiter->refcount, waiter_release);
217                 }
218         }
219 }
220
221 /**
222  * Remove & handle all waiters that have completed for the given syncpt
223  */
224 static int process_wait_list(struct nvhost_intr *intr,
225                              struct nvhost_intr_syncpt *syncpt,
226                              u32 threshold)
227 {
228         struct list_head completed[NVHOST_INTR_ACTION_COUNT];
229         unsigned int i;
230         int empty;
231
232         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
233                 INIT_LIST_HEAD(completed + i);
234
235         spin_lock(&syncpt->lock);
236
237         remove_completed_waiters(&syncpt->wait_head, threshold,
238                 syncpt->isr_recv, completed);
239
240         empty = list_empty(&syncpt->wait_head);
241         if (empty)
242                 intr_op().disable_syncpt_intr(intr, syncpt->id);
243         else
244                 reset_threshold_interrupt(intr, &syncpt->wait_head,
245                                           syncpt->id);
246
247         spin_unlock(&syncpt->lock);
248
249         run_handlers(completed);
250
251         return empty;
252 }
253
254 /*** host syncpt interrupt service functions ***/
255 /**
256  * Sync point threshold interrupt service thread function
257  * Handles sync point threshold triggers, in thread context
258  */
259 irqreturn_t nvhost_syncpt_thresh_fn(void *dev_id)
260 {
261         struct nvhost_intr_syncpt *syncpt = dev_id;
262         unsigned int id = syncpt->id;
263         struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
264         struct nvhost_master *dev = intr_to_dev(intr);
265
266         (void)process_wait_list(intr, syncpt,
267                                 nvhost_syncpt_update_min(&dev->syncpt, id));
268
269         return IRQ_HANDLED;
270 }
271
272 /*** host general interrupt service functions ***/
273
274
275 /*** Main API ***/
276
277
278 bool nvhost_intr_has_pending_jobs(struct nvhost_intr *intr, u32 id,
279                         void *exclude_data)
280 {
281         struct nvhost_intr_syncpt *syncpt;
282         struct nvhost_waitlist *waiter;
283         bool res = false;
284
285         syncpt = intr->syncpt + id;
286         spin_lock(&syncpt->lock);
287         list_for_each_entry(waiter, &syncpt->wait_head, list)
288                 if (((waiter->action ==
289                         NVHOST_INTR_ACTION_SUBMIT_COMPLETE) &&
290                         (waiter->data != exclude_data))) {
291                         res = true;
292                         break;
293                 }
294
295         spin_unlock(&syncpt->lock);
296
297         return res;
298 }
299
300 int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
301                         enum nvhost_intr_action action, void *data,
302                         void *_waiter,
303                         void **ref)
304 {
305         struct nvhost_waitlist *waiter = _waiter;
306         struct nvhost_intr_syncpt *syncpt;
307         int queue_was_empty;
308
309         if (waiter == NULL) {
310                 pr_warn("%s: NULL waiter\n", __func__);
311                 return -EINVAL;
312         }
313
314         /* initialize a new waiter */
315         INIT_LIST_HEAD(&waiter->list);
316         kref_init(&waiter->refcount);
317         if (ref)
318                 kref_get(&waiter->refcount);
319         waiter->thresh = thresh;
320         waiter->action = action;
321         atomic_set(&waiter->state, WLS_PENDING);
322         waiter->data = data;
323         waiter->count = 1;
324
325         syncpt = intr->syncpt + id;
326
327         spin_lock(&syncpt->lock);
328
329         queue_was_empty = list_empty(&syncpt->wait_head);
330
331         if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
332                 /* added at head of list - new threshold value */
333                 intr_op().set_syncpt_threshold(intr, id, thresh);
334
335                 /* added as first waiter - enable interrupt */
336                 if (queue_was_empty)
337                         intr_op().enable_syncpt_intr(intr, id);
338         }
339
340         spin_unlock(&syncpt->lock);
341
342         if (ref)
343                 *ref = waiter;
344         return 0;
345 }
346
347 void *nvhost_intr_alloc_waiter()
348 {
349         return kzalloc(sizeof(struct nvhost_waitlist),
350                         GFP_KERNEL|__GFP_REPEAT);
351 }
352
353 void nvhost_intr_put_ref(struct nvhost_intr *intr, u32 id, void *ref)
354 {
355         struct nvhost_waitlist *waiter = ref;
356         struct nvhost_intr_syncpt *syncpt;
357         struct nvhost_master *host = intr_to_dev(intr);
358
359         while (atomic_cmpxchg(&waiter->state,
360                                 WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
361                 schedule();
362
363         syncpt = intr->syncpt + id;
364         (void)process_wait_list(intr, syncpt,
365                                 nvhost_syncpt_update_min(&host->syncpt, id));
366
367         kref_put(&waiter->refcount, waiter_release);
368 }
369
370
371 /*** Init & shutdown ***/
372
373 int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
374 {
375         unsigned int id;
376         struct nvhost_intr_syncpt *syncpt;
377         struct nvhost_master *host = intr_to_dev(intr);
378         u32 nb_pts = nvhost_syncpt_nb_pts(&host->syncpt);
379
380         mutex_init(&intr->mutex);
381         intr->syncpt_irq = irq_sync;
382         intr->wq = create_workqueue("host_syncpt");
383         intr->general_irq = irq_gen;
384
385         for (id = 0, syncpt = intr->syncpt;
386              id < nb_pts;
387              ++id, ++syncpt) {
388                 syncpt->intr = &host->intr;
389                 syncpt->id = id;
390                 spin_lock_init(&syncpt->lock);
391                 INIT_LIST_HEAD(&syncpt->wait_head);
392                 snprintf(syncpt->thresh_irq_name,
393                         sizeof(syncpt->thresh_irq_name),
394                         "host_sp_%02d", id);
395         }
396
397         return 0;
398 }
399
400 void nvhost_intr_deinit(struct nvhost_intr *intr)
401 {
402         nvhost_intr_stop(intr);
403         destroy_workqueue(intr->wq);
404 }
405
406 void nvhost_intr_start(struct nvhost_intr *intr, u32 hz)
407 {
408         mutex_lock(&intr->mutex);
409
410         intr_op().init_host_sync(intr);
411         intr_op().set_host_clocks_per_usec(intr,
412                                                (hz + 1000000 - 1)/1000000);
413
414         intr_op().request_host_general_irq(intr);
415
416         mutex_unlock(&intr->mutex);
417 }
418
419 void nvhost_intr_stop(struct nvhost_intr *intr)
420 {
421         unsigned int id;
422         struct nvhost_intr_syncpt *syncpt;
423         u32 nb_pts = nvhost_syncpt_nb_pts(&intr_to_dev(intr)->syncpt);
424
425         mutex_lock(&intr->mutex);
426
427         intr_op().disable_all_syncpt_intrs(intr);
428
429         for (id = 0, syncpt = intr->syncpt;
430              id < nb_pts;
431              ++id, ++syncpt) {
432                 struct nvhost_waitlist *waiter, *next;
433                 list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
434                         if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
435                                 == WLS_CANCELLED) {
436                                 list_del(&waiter->list);
437                                 kref_put(&waiter->refcount, waiter_release);
438                         }
439                 }
440
441                 if (!list_empty(&syncpt->wait_head)) {  /* output diagnostics */
442                         mutex_unlock(&intr->mutex);
443                         pr_warn("%s cannot stop syncpt intr id=%d\n",
444                                         __func__, id);
445                         return;
446                 }
447         }
448
449         intr_op().free_host_general_irq(intr);
450         intr_op().free_syncpt_irq(intr);
451
452         mutex_unlock(&intr->mutex);
453 }