008c8bfcde1596654e93787c407923dd1f2e29bd
[linux-2.6.git] / drivers / video / tegra / host / host1x / host1x_cdma.c
1 /*
2  * drivers/video/tegra/host/host1x/host1x_cdma.c
3  *
4  * Tegra Graphics Host Command DMA
5  *
6  * Copyright (c) 2010-2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <linux/slab.h>
24 #include "nvhost_cdma.h"
25 #include "dev.h"
26
27 #include "host1x_hardware.h"
28 #include "host1x_syncpt.h"
29 #include "host1x_cdma.h"
30
31 static inline u32 host1x_channel_dmactrl(int stop, int get_rst, int init_get)
32 {
33         return HOST1X_CREATE(CHANNEL_DMACTRL, DMASTOP, stop)
34                         | HOST1X_CREATE(CHANNEL_DMACTRL, DMAGETRST, get_rst)
35                         | HOST1X_CREATE(CHANNEL_DMACTRL, DMAINITGET, init_get);
36 }
37
38 static void cdma_timeout_handler(struct work_struct *work);
39
40 /*
41  * push_buffer
42  *
43  * The push buffer is a circular array of words to be fetched by command DMA.
44  * Note that it works slightly differently to the sync queue; fence == cur
45  * means that the push buffer is full, not empty.
46  */
47
48
49 /**
50  * Reset to empty push buffer
51  */
52 static void push_buffer_reset(struct push_buffer *pb)
53 {
54         pb->fence = PUSH_BUFFER_SIZE - 8;
55         pb->cur = 0;
56 }
57
58 /**
59  * Init push buffer resources
60  */
61 static int push_buffer_init(struct push_buffer *pb)
62 {
63         struct nvhost_cdma *cdma = pb_to_cdma(pb);
64         struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
65         pb->mem = NULL;
66         pb->mapped = NULL;
67         pb->phys = 0;
68         pb->nvmap = NULL;
69
70         BUG_ON(!cdma_pb_op(cdma).reset);
71         cdma_pb_op(cdma).reset(pb);
72
73         /* allocate and map pushbuffer memory */
74         pb->mem = nvmap_alloc(nvmap, PUSH_BUFFER_SIZE + 4, 32,
75                               NVMAP_HANDLE_WRITE_COMBINE);
76         if (IS_ERR_OR_NULL(pb->mem)) {
77                 pb->mem = NULL;
78                 goto fail;
79         }
80         pb->mapped = nvmap_mmap(pb->mem);
81         if (pb->mapped == NULL)
82                 goto fail;
83
84         /* pin pushbuffer and get physical address */
85         pb->phys = nvmap_pin(nvmap, pb->mem);
86         if (pb->phys >= 0xfffff000) {
87                 pb->phys = 0;
88                 goto fail;
89         }
90
91         /* memory for storing nvmap client and handles for each opcode pair */
92         pb->nvmap = kzalloc(NVHOST_GATHER_QUEUE_SIZE *
93                                 sizeof(struct nvmap_client_handle),
94                         GFP_KERNEL);
95         if (!pb->nvmap)
96                 goto fail;
97
98         /* put the restart at the end of pushbuffer memory */
99         *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) =
100                 nvhost_opcode_restart(pb->phys);
101
102         return 0;
103
104 fail:
105         cdma_pb_op(cdma).destroy(pb);
106         return -ENOMEM;
107 }
108
109 /**
110  * Clean up push buffer resources
111  */
112 static void push_buffer_destroy(struct push_buffer *pb)
113 {
114         struct nvhost_cdma *cdma = pb_to_cdma(pb);
115         struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
116         if (pb->mapped)
117                 nvmap_munmap(pb->mem, pb->mapped);
118
119         if (pb->phys != 0)
120                 nvmap_unpin(nvmap, pb->mem);
121
122         if (pb->mem)
123                 nvmap_free(nvmap, pb->mem);
124
125         kfree(pb->nvmap);
126
127         pb->mem = NULL;
128         pb->mapped = NULL;
129         pb->phys = 0;
130         pb->nvmap = 0;
131 }
132
133 /**
134  * Push two words to the push buffer
135  * Caller must ensure push buffer is not full
136  */
137 static void push_buffer_push_to(struct push_buffer *pb,
138                 struct nvmap_client *client,
139                 struct nvmap_handle *handle, u32 op1, u32 op2)
140 {
141         u32 cur = pb->cur;
142         u32 *p = (u32 *)((u32)pb->mapped + cur);
143         u32 cur_nvmap = (cur/8) & (NVHOST_GATHER_QUEUE_SIZE - 1);
144         BUG_ON(cur == pb->fence);
145         *(p++) = op1;
146         *(p++) = op2;
147         pb->nvmap[cur_nvmap].client = client;
148         pb->nvmap[cur_nvmap].handle = handle;
149         pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1);
150 }
151
152 /**
153  * Pop a number of two word slots from the push buffer
154  * Caller must ensure push buffer is not empty
155  */
156 static void push_buffer_pop_from(struct push_buffer *pb,
157                 unsigned int slots)
158 {
159         /* Clear the nvmap references for old items from pb */
160         unsigned int i;
161         u32 fence_nvmap = pb->fence/8;
162         for (i = 0; i < slots; i++) {
163                 int cur_fence_nvmap = (fence_nvmap+i)
164                                 & (NVHOST_GATHER_QUEUE_SIZE - 1);
165                 struct nvmap_client_handle *h =
166                                 &pb->nvmap[cur_fence_nvmap];
167                 h->client = NULL;
168                 h->handle = NULL;
169         }
170         /* Advance the next write position */
171         pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1);
172 }
173
174 /**
175  * Return the number of two word slots free in the push buffer
176  */
177 static u32 push_buffer_space(struct push_buffer *pb)
178 {
179         return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8;
180 }
181
182 static u32 push_buffer_putptr(struct push_buffer *pb)
183 {
184         return pb->phys + pb->cur;
185 }
186
187 /*
188  * The syncpt incr buffer is filled with methods to increment syncpts, which
189  * is later GATHER-ed into the mainline PB. It's used when a timed out context
190  * is interleaved with other work, so needs to inline the syncpt increments
191  * to maintain the count (but otherwise does no work).
192  */
193
194 /**
195  * Init timeout and syncpt incr buffer resources
196  */
197 static int cdma_timeout_init(struct nvhost_cdma *cdma,
198                                  u32 syncpt_id)
199 {
200         struct nvhost_master *dev = cdma_to_dev(cdma);
201         struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
202         struct syncpt_buffer *sb = &cdma->syncpt_buffer;
203         struct nvhost_channel *ch = cdma_to_channel(cdma);
204         u32 i = 0;
205
206         if (syncpt_id == NVSYNCPT_INVALID)
207                 return -EINVAL;
208
209         /* allocate and map syncpt incr memory */
210         sb->mem = nvmap_alloc(nvmap,
211                         (SYNCPT_INCR_BUFFER_SIZE_WORDS * sizeof(u32)), 32,
212                         NVMAP_HANDLE_WRITE_COMBINE);
213         if (IS_ERR_OR_NULL(sb->mem)) {
214                 sb->mem = NULL;
215                 goto fail;
216         }
217         sb->mapped = nvmap_mmap(sb->mem);
218         if (sb->mapped == NULL)
219                 goto fail;
220
221         /* pin syncpt buffer and get physical address */
222         sb->phys = nvmap_pin(nvmap, sb->mem);
223         if (sb->phys >= 0xfffff000) {
224                 sb->phys = 0;
225                 goto fail;
226         }
227
228         dev_dbg(&dev->pdev->dev, "%s: SYNCPT_INCR buffer at 0x%x\n",
229                  __func__, sb->phys);
230
231         sb->words_per_incr = (syncpt_id == NVSYNCPT_3D) ? 5 : 3;
232         sb->incr_per_buffer = (SYNCPT_INCR_BUFFER_SIZE_WORDS /
233                                 sb->words_per_incr);
234
235         /* init buffer with SETCL and INCR_SYNCPT methods */
236         while (i < sb->incr_per_buffer) {
237                 sb->mapped[i++] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
238                                                 0, 0);
239                 sb->mapped[i++] = nvhost_opcode_imm_incr_syncpt(
240                                                 NV_SYNCPT_IMMEDIATE,
241                                                 syncpt_id);
242                 if (syncpt_id == NVSYNCPT_3D) {
243                         /* also contains base increments */
244                         sb->mapped[i++] = nvhost_opcode_nonincr(
245                                                 NV_CLASS_HOST_INCR_SYNCPT_BASE,
246                                                 1);
247                         sb->mapped[i++] = nvhost_class_host_incr_syncpt_base(
248                                                 NVWAITBASE_3D, 1);
249                 }
250                 sb->mapped[i++] = nvhost_opcode_setclass(ch->desc->class,
251                                                 0, 0);
252         }
253         wmb();
254
255         INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
256         cdma->timeout.initialized = true;
257
258         return 0;
259 fail:
260         cdma_op(cdma).timeout_destroy(cdma);
261         return -ENOMEM;
262 }
263
264 /**
265  * Clean up timeout syncpt buffer resources
266  */
267 static void cdma_timeout_destroy(struct nvhost_cdma *cdma)
268 {
269         struct nvmap_client *nvmap = cdma_to_nvmap(cdma);
270         struct syncpt_buffer *sb = &cdma->syncpt_buffer;
271
272         if (sb->mapped)
273                 nvmap_munmap(sb->mem, sb->mapped);
274
275         if (sb->phys != 0)
276                 nvmap_unpin(nvmap, sb->mem);
277
278         if (sb->mem)
279                 nvmap_free(nvmap, sb->mem);
280
281         sb->mem = NULL;
282         sb->mapped = NULL;
283         sb->phys = 0;
284
285         if (cdma->timeout.initialized)
286                 cancel_delayed_work(&cdma->timeout.wq);
287         cdma->timeout.initialized = false;
288 }
289
290 /**
291  * Increment timedout buffer's syncpt via CPU.
292  */
293 static void cdma_timeout_cpu_incr(struct nvhost_cdma *cdma, u32 getptr,
294                                 u32 syncpt_incrs, u32 syncval, u32 nr_slots)
295 {
296         struct nvhost_master *dev = cdma_to_dev(cdma);
297         struct push_buffer *pb = &cdma->push_buffer;
298         u32 i, getidx;
299
300         for (i = 0; i < syncpt_incrs; i++)
301                 nvhost_syncpt_cpu_incr(&dev->syncpt, cdma->timeout.syncpt_id);
302
303         /* after CPU incr, ensure shadow is up to date */
304         nvhost_syncpt_update_min(&dev->syncpt, cdma->timeout.syncpt_id);
305
306         /* update WAITBASE_3D by same number of incrs */
307         if (cdma->timeout.syncpt_id == NVSYNCPT_3D) {
308                 void __iomem *p;
309                 p = dev->sync_aperture + HOST1X_SYNC_SYNCPT_BASE_0 +
310                                 (NVWAITBASE_3D * sizeof(u32));
311                 writel(syncval, p);
312         }
313
314         /* NOP all the PB slots */
315         getidx = getptr - pb->phys;
316         while (nr_slots--) {
317                 u32 *p = (u32 *)((u32)pb->mapped + getidx);
318                 *(p++) = NVHOST_OPCODE_NOOP;
319                 *(p++) = NVHOST_OPCODE_NOOP;
320                 dev_dbg(&dev->pdev->dev, "%s: NOP at 0x%x\n",
321                         __func__, pb->phys + getidx);
322                 getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1);
323         }
324         wmb();
325 }
326
327 /**
328  * This routine is called at the point we transition back into a timed
329  * ctx. The syncpts are incremented via pushbuffer with a flag indicating
330  * whether there's a CTXSAVE that should be still executed (for the
331  * preceding HW ctx).
332  */
333 static void cdma_timeout_pb_incr(struct nvhost_cdma *cdma, u32 getptr,
334                                 u32 syncpt_incrs, u32 nr_slots,
335                                 bool exec_ctxsave)
336 {
337         struct nvhost_master *dev = cdma_to_dev(cdma);
338         struct syncpt_buffer *sb = &cdma->syncpt_buffer;
339         struct push_buffer *pb = &cdma->push_buffer;
340         struct nvhost_hwctx *hwctx = cdma->timeout.ctx;
341         u32 getidx, *p;
342
343         /* should have enough slots to incr to desired count */
344         BUG_ON(syncpt_incrs > (nr_slots * sb->incr_per_buffer));
345
346         getidx = getptr - pb->phys;
347         if (exec_ctxsave) {
348                 /* don't disrupt the CTXSAVE of a good/non-timed out ctx */
349                 nr_slots -= hwctx->save_slots;
350                 syncpt_incrs -= hwctx->save_incrs;
351
352                 getidx += (hwctx->save_slots * 8);
353                 getidx &= (PUSH_BUFFER_SIZE - 1);
354
355                 dev_dbg(&dev->pdev->dev,
356                         "%s: exec CTXSAVE of prev ctx (slots %d, incrs %d)\n",
357                         __func__, nr_slots, syncpt_incrs);
358         }
359
360         while (syncpt_incrs) {
361                 u32 incrs, count;
362
363                 /* GATHER count are incrs * number of DWORDs per incr */
364                 incrs = min(syncpt_incrs, sb->incr_per_buffer);
365                 count = incrs * sb->words_per_incr;
366
367                 p = (u32 *)((u32)pb->mapped + getidx);
368                 *(p++) = nvhost_opcode_gather(count);
369                 *(p++) = sb->phys;
370
371                 dev_dbg(&dev->pdev->dev,
372                         "%s: GATHER at 0x%x, from 0x%x, dcount = %d\n",
373                         __func__,
374                         pb->phys + getidx, sb->phys,
375                         (incrs * sb->words_per_incr));
376
377                 syncpt_incrs -= incrs;
378                 getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1);
379                 nr_slots--;
380         }
381
382         /* NOP remaining slots */
383         while (nr_slots--) {
384                 p = (u32 *)((u32)pb->mapped + getidx);
385                 *(p++) = NVHOST_OPCODE_NOOP;
386                 *(p++) = NVHOST_OPCODE_NOOP;
387                 dev_dbg(&dev->pdev->dev, "%s: NOP at 0x%x\n",
388                         __func__, pb->phys + getidx);
389                 getidx = (getidx + 8) & (PUSH_BUFFER_SIZE - 1);
390         }
391         wmb();
392 }
393
394 /**
395  * Start channel DMA
396  */
397 static void cdma_start(struct nvhost_cdma *cdma)
398 {
399         void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
400
401         if (cdma->running)
402                 return;
403
404         BUG_ON(!cdma_pb_op(cdma).putptr);
405         cdma->last_put = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
406
407         writel(host1x_channel_dmactrl(true, false, false),
408                 chan_regs + HOST1X_CHANNEL_DMACTRL);
409
410         /* set base, put, end pointer (all of memory) */
411         writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
412         writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
413         writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
414
415         /* reset GET */
416         writel(host1x_channel_dmactrl(true, true, true),
417                 chan_regs + HOST1X_CHANNEL_DMACTRL);
418
419         /* start the command DMA */
420         writel(host1x_channel_dmactrl(false, false, false),
421                 chan_regs + HOST1X_CHANNEL_DMACTRL);
422
423         cdma->running = true;
424 }
425
426 /**
427  * Similar to cdma_start(), but rather than starting from an idle
428  * state (where DMA GET is set to DMA PUT), on a timeout we restore
429  * DMA GET from an explicit value (so DMA may again be pending).
430  */
431 static void cdma_timeout_restart(struct nvhost_cdma *cdma, u32 getptr)
432 {
433         struct nvhost_master *dev = cdma_to_dev(cdma);
434         void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
435
436         if (cdma->running)
437                 return;
438
439         BUG_ON(!cdma_pb_op(cdma).putptr);
440         cdma->last_put = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
441
442         writel(host1x_channel_dmactrl(true, false, false),
443                 chan_regs + HOST1X_CHANNEL_DMACTRL);
444
445         /* set base, end pointer (all of memory) */
446         writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
447         writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
448
449         /* set GET, by loading the value in PUT (then reset GET) */
450         writel(getptr, chan_regs + HOST1X_CHANNEL_DMAPUT);
451         writel(host1x_channel_dmactrl(true, true, true),
452                 chan_regs + HOST1X_CHANNEL_DMACTRL);
453
454         dev_dbg(&dev->pdev->dev,
455                 "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
456                 __func__,
457                 readl(chan_regs + HOST1X_CHANNEL_DMAGET),
458                 readl(chan_regs + HOST1X_CHANNEL_DMAPUT),
459                 cdma->last_put);
460
461         /* deassert GET reset and set PUT */
462         writel(host1x_channel_dmactrl(true, false, false),
463                 chan_regs + HOST1X_CHANNEL_DMACTRL);
464         writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
465
466         /* start the command DMA */
467         writel(host1x_channel_dmactrl(false, false, false),
468                 chan_regs + HOST1X_CHANNEL_DMACTRL);
469
470         cdma->running = true;
471 }
472
473 /**
474  * Kick channel DMA into action by writing its PUT offset (if it has changed)
475  */
476 static void cdma_kick(struct nvhost_cdma *cdma)
477 {
478         u32 put;
479         BUG_ON(!cdma_pb_op(cdma).putptr);
480
481         put = cdma_pb_op(cdma).putptr(&cdma->push_buffer);
482
483         if (put != cdma->last_put) {
484                 void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
485                 wmb();
486                 writel(put, chan_regs + HOST1X_CHANNEL_DMAPUT);
487                 cdma->last_put = put;
488         }
489 }
490
491 static void cdma_stop(struct nvhost_cdma *cdma)
492 {
493         void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
494
495         mutex_lock(&cdma->lock);
496         if (cdma->running) {
497                 nvhost_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
498                 writel(host1x_channel_dmactrl(true, false, false),
499                         chan_regs + HOST1X_CHANNEL_DMACTRL);
500                 cdma->running = false;
501         }
502         mutex_unlock(&cdma->lock);
503 }
504
505 /**
506  * Retrieve the op pair at a slot offset from a DMA address
507  */
508 void cdma_peek(struct nvhost_cdma *cdma,
509                           u32 dmaget, int slot, u32 *out)
510 {
511         u32 offset = dmaget - cdma->push_buffer.phys;
512         u32 *p = cdma->push_buffer.mapped;
513
514         offset = ((offset + slot * 8) & (PUSH_BUFFER_SIZE - 1)) >> 2;
515         out[0] = p[offset];
516         out[1] = p[offset + 1];
517 }
518
519 /**
520  * Stops both channel's command processor and CDMA immediately.
521  * Also, tears down the channel and resets corresponding module.
522  */
523 void cdma_timeout_teardown_begin(struct nvhost_cdma *cdma)
524 {
525         struct nvhost_master *dev = cdma_to_dev(cdma);
526         struct nvhost_channel *ch = cdma_to_channel(cdma);
527         u32 cmdproc_stop;
528
529         BUG_ON(cdma->torndown);
530
531         dev_dbg(&dev->pdev->dev,
532                 "begin channel teardown (channel id %d)\n", ch->chid);
533
534         cmdproc_stop = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
535         cmdproc_stop |= BIT(ch->chid);
536         writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
537
538         dev_dbg(&dev->pdev->dev,
539                 "%s: DMA GET 0x%x, PUT HW 0x%x / shadow 0x%x\n",
540                 __func__,
541                 readl(ch->aperture + HOST1X_CHANNEL_DMAGET),
542                 readl(ch->aperture + HOST1X_CHANNEL_DMAPUT),
543                 cdma->last_put);
544
545         writel(host1x_channel_dmactrl(true, false, false),
546                 ch->aperture + HOST1X_CHANNEL_DMACTRL);
547
548         writel(BIT(ch->chid), dev->sync_aperture + HOST1X_SYNC_CH_TEARDOWN);
549         nvhost_module_reset(&dev->pdev->dev, &ch->mod);
550
551         cdma->running = false;
552         cdma->torndown = true;
553 }
554
555 void cdma_timeout_teardown_end(struct nvhost_cdma *cdma, u32 getptr)
556 {
557         struct nvhost_master *dev = cdma_to_dev(cdma);
558         struct nvhost_channel *ch = cdma_to_channel(cdma);
559         u32 cmdproc_stop;
560
561         BUG_ON(!cdma->torndown || cdma->running);
562
563         dev_dbg(&dev->pdev->dev,
564                 "end channel teardown (id %d, DMAGET restart = 0x%x)\n",
565                 ch->chid, getptr);
566
567         cmdproc_stop = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
568         cmdproc_stop &= ~(BIT(ch->chid));
569         writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
570
571         cdma->torndown = false;
572         cdma_timeout_restart(cdma, getptr);
573 }
574
575 /**
576  * If this timeout fires, it indicates the current sync_queue entry has
577  * exceeded its TTL and the userctx should be timed out and remaining
578  * submits already issued cleaned up (future submits return an error).
579  */
580 static void cdma_timeout_handler(struct work_struct *work)
581 {
582         struct nvhost_cdma *cdma;
583         struct nvhost_master *dev;
584         struct nvhost_syncpt *sp;
585         struct nvhost_channel *ch;
586
587         u32 syncpt_val;
588
589         u32 prev_cmdproc, cmdproc_stop;
590
591         cdma = container_of(to_delayed_work(work), struct nvhost_cdma,
592                             timeout.wq);
593         dev = cdma_to_dev(cdma);
594         sp = &dev->syncpt;
595         ch = cdma_to_channel(cdma);
596
597         mutex_lock(&cdma->lock);
598
599         if (!cdma->timeout.clientid) {
600                 dev_dbg(&dev->pdev->dev,
601                          "cdma_timeout: expired, but has no clientid\n");
602                 mutex_unlock(&cdma->lock);
603                 return;
604         }
605
606         /* stop processing to get a clean snapshot */
607         prev_cmdproc = readl(dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
608         cmdproc_stop = prev_cmdproc | BIT(ch->chid);
609         writel(cmdproc_stop, dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
610
611         dev_dbg(&dev->pdev->dev, "cdma_timeout: cmdproc was 0x%x is 0x%x\n",
612                 prev_cmdproc, cmdproc_stop);
613
614         syncpt_val = nvhost_syncpt_update_min(&dev->syncpt,
615                         cdma->timeout.syncpt_id);
616
617         /* has buffer actually completed? */
618         if ((s32)(syncpt_val - cdma->timeout.syncpt_val) >= 0) {
619                 dev_dbg(&dev->pdev->dev,
620                          "cdma_timeout: expired, but buffer had completed\n");
621                 /* restore */
622                 cmdproc_stop = prev_cmdproc & ~(BIT(ch->chid));
623                 writel(cmdproc_stop,
624                         dev->sync_aperture + HOST1X_SYNC_CMDPROC_STOP);
625                 mutex_unlock(&cdma->lock);
626                 return;
627         }
628
629         dev_warn(&dev->pdev->dev,
630                 "%s: timeout: %d (%s) ctx 0x%p, HW thresh %d, done %d\n",
631                 __func__,
632                 cdma->timeout.syncpt_id,
633                 syncpt_op(sp).name(sp, cdma->timeout.syncpt_id),
634                 cdma->timeout.ctx,
635                 syncpt_val, cdma->timeout.syncpt_val);
636
637         /* stop HW, resetting channel/module */
638         cdma_op(cdma).timeout_teardown_begin(cdma);
639
640         nvhost_cdma_update_sync_queue(cdma, sp, &dev->pdev->dev);
641         mutex_unlock(&cdma->lock);
642 }
643
644 int host1x_init_cdma_support(struct nvhost_master *host)
645 {
646         host->op.cdma.start = cdma_start;
647         host->op.cdma.stop = cdma_stop;
648         host->op.cdma.kick = cdma_kick;
649
650         host->op.cdma.timeout_init = cdma_timeout_init;
651         host->op.cdma.timeout_destroy = cdma_timeout_destroy;
652         host->op.cdma.timeout_teardown_begin = cdma_timeout_teardown_begin;
653         host->op.cdma.timeout_teardown_end = cdma_timeout_teardown_end;
654         host->op.cdma.timeout_cpu_incr = cdma_timeout_cpu_incr;
655         host->op.cdma.timeout_pb_incr = cdma_timeout_pb_incr;
656
657         host->sync_queue_size = NVHOST_SYNC_QUEUE_SIZE;
658
659         host->op.push_buffer.reset = push_buffer_reset;
660         host->op.push_buffer.init = push_buffer_init;
661         host->op.push_buffer.destroy = push_buffer_destroy;
662         host->op.push_buffer.push_to = push_buffer_push_to;
663         host->op.push_buffer.pop_from = push_buffer_pop_from;
664         host->op.push_buffer.space = push_buffer_space;
665         host->op.push_buffer.putptr = push_buffer_putptr;
666
667         return 0;
668 }