drivers: video: tegra: Implement HOST1X syncpt init
[linux-2.6.git] / drivers / video / tegra / host / nvhost_intr.c
1 /*
2  * drivers/video/tegra/host/nvhost_intr.c
3  *
4  * Tegra Graphics Host Interrupt Management
5  *
6  * Copyright (c) 2010-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include "nvhost_intr.h"
22 #include "dev.h"
23 #include "nvhost_acm.h"
24 #include <linux/interrupt.h>
25 #include <linux/slab.h>
26 #include <linux/irq.h>
27 #include <trace/events/nvhost.h>
28 #include "nvhost_channel.h"
29 #include "nvhost_hwctx.h"
30
31 /*** Wait list management ***/
32
33 struct nvhost_waitlist {
34         struct list_head list;
35         struct kref refcount;
36         u32 thresh;
37         enum nvhost_intr_action action;
38         atomic_t state;
39         void *data;
40         int count;
41 };
42
43 enum waitlist_state {
44         WLS_PENDING,
45         WLS_REMOVED,
46         WLS_CANCELLED,
47         WLS_HANDLED
48 };
49
50 static void waiter_release(struct kref *kref)
51 {
52         kfree(container_of(kref, struct nvhost_waitlist, refcount));
53 }
54
55 /**
56  * add a waiter to a waiter queue, sorted by threshold
57  * returns true if it was added at the head of the queue
58  */
59 static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
60                                 struct list_head *queue)
61 {
62         struct nvhost_waitlist *pos;
63         u32 thresh = waiter->thresh;
64
65         list_for_each_entry_reverse(pos, queue, list)
66                 if ((s32)(pos->thresh - thresh) <= 0) {
67                         list_add(&waiter->list, &pos->list);
68                         return false;
69                 }
70
71         list_add(&waiter->list, queue);
72         return true;
73 }
74
75 /**
76  * run through a waiter queue for a single sync point ID
77  * and gather all completed waiters into lists by actions
78  */
79 static void remove_completed_waiters(struct list_head *head, u32 sync,
80                         struct list_head completed[NVHOST_INTR_ACTION_COUNT])
81 {
82         struct list_head *dest;
83         struct nvhost_waitlist *waiter, *next, *prev;
84
85         list_for_each_entry_safe(waiter, next, head, list) {
86                 if ((s32)(waiter->thresh - sync) > 0)
87                         break;
88
89                 dest = completed + waiter->action;
90
91                 /* consolidate submit cleanups */
92                 if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
93                         && !list_empty(dest)) {
94                         prev = list_entry(dest->prev,
95                                         struct nvhost_waitlist, list);
96                         if (prev->data == waiter->data) {
97                                 prev->count++;
98                                 dest = NULL;
99                         }
100                 }
101
102                 /* PENDING->REMOVED or CANCELLED->HANDLED */
103                 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
104                         list_del(&waiter->list);
105                         kref_put(&waiter->refcount, waiter_release);
106                 } else {
107                         list_move_tail(&waiter->list, dest);
108                 }
109         }
110 }
111
112 void reset_threshold_interrupt(struct nvhost_intr *intr,
113                                struct list_head *head,
114                                unsigned int id)
115 {
116         u32 thresh = list_first_entry(head,
117                                 struct nvhost_waitlist, list)->thresh;
118         BUG_ON(!(intr_op().set_syncpt_threshold &&
119                  intr_op().enable_syncpt_intr));
120
121         intr_op().set_syncpt_threshold(intr, id, thresh);
122         intr_op().enable_syncpt_intr(intr, id);
123 }
124
125
126 static void action_submit_complete(struct nvhost_waitlist *waiter)
127 {
128         struct nvhost_channel *channel = waiter->data;
129         int nr_completed = waiter->count;
130
131         /*  Add nr_completed to trace */
132         trace_nvhost_channel_submit_complete(channel->dev->name,
133                         nr_completed, waiter->thresh);
134
135         nvhost_cdma_update(&channel->cdma);
136         nvhost_module_idle_mult(channel->dev, nr_completed);
137 }
138
139 static void action_ctxsave(struct nvhost_waitlist *waiter)
140 {
141         struct nvhost_hwctx *hwctx = waiter->data;
142         struct nvhost_channel *channel = hwctx->channel;
143
144         if (channel->ctxhandler->save_service)
145                 channel->ctxhandler->save_service(hwctx);
146 }
147
148 static void action_wakeup(struct nvhost_waitlist *waiter)
149 {
150         wait_queue_head_t *wq = waiter->data;
151
152         wake_up(wq);
153 }
154
155 static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
156 {
157         wait_queue_head_t *wq = waiter->data;
158
159         wake_up_interruptible(wq);
160 }
161
162 typedef void (*action_handler)(struct nvhost_waitlist *waiter);
163
164 static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
165         action_submit_complete,
166         action_ctxsave,
167         action_wakeup,
168         action_wakeup_interruptible,
169 };
170
171 static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
172 {
173         struct list_head *head = completed;
174         int i;
175
176         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
177                 action_handler handler = action_handlers[i];
178                 struct nvhost_waitlist *waiter, *next;
179
180                 list_for_each_entry_safe(waiter, next, head, list) {
181                         list_del(&waiter->list);
182                         handler(waiter);
183                         WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != WLS_REMOVED);
184                         kref_put(&waiter->refcount, waiter_release);
185                 }
186         }
187 }
188
189 /**
190  * Remove & handle all waiters that have completed for the given syncpt
191  */
192 static int process_wait_list(struct nvhost_intr *intr,
193                              struct nvhost_intr_syncpt *syncpt,
194                              u32 threshold)
195 {
196         struct list_head completed[NVHOST_INTR_ACTION_COUNT];
197         unsigned int i;
198         int empty;
199
200         for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
201                 INIT_LIST_HEAD(completed + i);
202
203         spin_lock(&syncpt->lock);
204
205         remove_completed_waiters(&syncpt->wait_head, threshold, completed);
206
207         empty = list_empty(&syncpt->wait_head);
208         if (!empty)
209                 reset_threshold_interrupt(intr, &syncpt->wait_head,
210                                           syncpt->id);
211
212         spin_unlock(&syncpt->lock);
213
214         run_handlers(completed);
215
216         return empty;
217 }
218
219 /*** host syncpt interrupt service functions ***/
220 /**
221  * Sync point threshold interrupt service thread function
222  * Handles sync point threshold triggers, in thread context
223  */
224 irqreturn_t nvhost_syncpt_thresh_fn(int irq, void *dev_id)
225 {
226         struct nvhost_intr_syncpt *syncpt = dev_id;
227         unsigned int id = syncpt->id;
228         struct nvhost_intr *intr = intr_syncpt_to_intr(syncpt);
229         struct nvhost_master *dev = intr_to_dev(intr);
230
231         (void)process_wait_list(intr, syncpt,
232                                 nvhost_syncpt_update_min(&dev->syncpt, id));
233
234         return IRQ_HANDLED;
235 }
236
237 /**
238  * free a syncpt's irq. syncpt interrupt should be disabled first.
239  */
240 static void free_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
241 {
242         if (syncpt->irq_requested) {
243                 free_irq(syncpt->irq, syncpt);
244                 syncpt->irq_requested = 0;
245         }
246 }
247
248
249 /*** host general interrupt service functions ***/
250
251
252 /*** Main API ***/
253
254 int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
255                         enum nvhost_intr_action action, void *data,
256                         void *_waiter,
257                         void **ref)
258 {
259         struct nvhost_waitlist *waiter = _waiter;
260         struct nvhost_intr_syncpt *syncpt;
261         int queue_was_empty;
262         int err;
263
264         BUG_ON(waiter == NULL);
265
266         BUG_ON(!(intr_op().set_syncpt_threshold &&
267                  intr_op().enable_syncpt_intr));
268
269         /* initialize a new waiter */
270         INIT_LIST_HEAD(&waiter->list);
271         kref_init(&waiter->refcount);
272         if (ref)
273                 kref_get(&waiter->refcount);
274         waiter->thresh = thresh;
275         waiter->action = action;
276         atomic_set(&waiter->state, WLS_PENDING);
277         waiter->data = data;
278         waiter->count = 1;
279
280         BUG_ON(id >= intr_to_dev(intr)->syncpt.nb_pts);
281         syncpt = intr->syncpt + id;
282
283         spin_lock(&syncpt->lock);
284
285         /* lazily request irq for this sync point */
286         if (!syncpt->irq_requested) {
287                 spin_unlock(&syncpt->lock);
288
289                 mutex_lock(&intr->mutex);
290                 BUG_ON(!(intr_op().request_syncpt_irq));
291                 err = intr_op().request_syncpt_irq(syncpt);
292                 mutex_unlock(&intr->mutex);
293
294                 if (err) {
295                         kfree(waiter);
296                         return err;
297                 }
298
299                 spin_lock(&syncpt->lock);
300         }
301
302         queue_was_empty = list_empty(&syncpt->wait_head);
303
304         if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
305                 /* added at head of list - new threshold value */
306                 intr_op().set_syncpt_threshold(intr, id, thresh);
307
308                 /* added as first waiter - enable interrupt */
309                 if (queue_was_empty)
310                         intr_op().enable_syncpt_intr(intr, id);
311         }
312
313         spin_unlock(&syncpt->lock);
314
315         if (ref)
316                 *ref = waiter;
317         return 0;
318 }
319
320 void *nvhost_intr_alloc_waiter()
321 {
322         return kzalloc(sizeof(struct nvhost_waitlist),
323                         GFP_KERNEL|__GFP_REPEAT);
324 }
325
326 void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
327 {
328         struct nvhost_waitlist *waiter = ref;
329
330         while (atomic_cmpxchg(&waiter->state,
331                                 WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
332                 schedule();
333
334         kref_put(&waiter->refcount, waiter_release);
335 }
336
337
338 /*** Init & shutdown ***/
339
340 int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
341 {
342         unsigned int id;
343         struct nvhost_intr_syncpt *syncpt;
344         struct nvhost_master *host =
345                 container_of(intr, struct nvhost_master, intr);
346         u32 nb_pts = host->syncpt.nb_pts;
347
348         mutex_init(&intr->mutex);
349         intr_op().init_host_sync(intr);
350         intr->host_general_irq = irq_gen;
351         intr->host_general_irq_requested = false;
352
353         for (id = 0, syncpt = intr->syncpt;
354              id < nb_pts;
355              ++id, ++syncpt) {
356                 syncpt->intr = &host->intr;
357                 syncpt->id = id;
358                 syncpt->irq = irq_sync + id;
359                 syncpt->irq_requested = 0;
360                 spin_lock_init(&syncpt->lock);
361                 INIT_LIST_HEAD(&syncpt->wait_head);
362                 snprintf(syncpt->thresh_irq_name,
363                         sizeof(syncpt->thresh_irq_name),
364                         "host_sp_%02d", id);
365         }
366
367         return 0;
368 }
369
370 void nvhost_intr_deinit(struct nvhost_intr *intr)
371 {
372         nvhost_intr_stop(intr);
373 }
374
375 void nvhost_intr_start(struct nvhost_intr *intr, u32 hz)
376 {
377         BUG_ON(!(intr_op().init_host_sync &&
378                  intr_op().set_host_clocks_per_usec &&
379                  intr_op().request_host_general_irq));
380
381         mutex_lock(&intr->mutex);
382
383         intr_op().init_host_sync(intr);
384         intr_op().set_host_clocks_per_usec(intr,
385                                                (hz + 1000000 - 1)/1000000);
386
387         intr_op().request_host_general_irq(intr);
388
389         mutex_unlock(&intr->mutex);
390 }
391
392 void nvhost_intr_stop(struct nvhost_intr *intr)
393 {
394         unsigned int id;
395         struct nvhost_intr_syncpt *syncpt;
396         u32 nb_pts = intr_to_dev(intr)->syncpt.nb_pts;
397
398         BUG_ON(!(intr_op().disable_all_syncpt_intrs &&
399                  intr_op().free_host_general_irq));
400
401         mutex_lock(&intr->mutex);
402
403         intr_op().disable_all_syncpt_intrs(intr);
404
405         for (id = 0, syncpt = intr->syncpt;
406              id < nb_pts;
407              ++id, ++syncpt) {
408                 struct nvhost_waitlist *waiter, *next;
409                 list_for_each_entry_safe(waiter, next, &syncpt->wait_head, list) {
410                         if (atomic_cmpxchg(&waiter->state, WLS_CANCELLED, WLS_HANDLED)
411                                 == WLS_CANCELLED) {
412                                 list_del(&waiter->list);
413                                 kref_put(&waiter->refcount, waiter_release);
414                         }
415                 }
416
417                 if (!list_empty(&syncpt->wait_head)) {  /* output diagnostics */
418                         printk(KERN_DEBUG "%s id=%d\n", __func__, id);
419                         BUG_ON(1);
420                 }
421
422                 free_syncpt_irq(syncpt);
423         }
424
425         intr_op().free_host_general_irq(intr);
426
427         mutex_unlock(&intr->mutex);
428 }