ded6ae7fc85f1a21fa3d20304963da678d08f119
[linux-3.10.git] / drivers / video / tegra / host / gr3d / gr3d_t30.c
1 /*
2  * drivers/video/tegra/host/gr3d/gr3d_t30.c
3  *
4  * Tegra Graphics Host 3D for Tegra3
5  *
6  * Copyright (c) 2011-2013 NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include "nvhost_hwctx.h"
22 #include "nvhost_channel.h"
23 #include "nvhost_cdma.h"
24 #include "dev.h"
25 #include "host1x/host1x01_hardware.h"
26 #include "gr3d.h"
27 #include "chip_support.h"
28 #include "nvhost_memmgr.h"
29 #include "nvhost_job.h"
30 #include "nvhost_acm.h"
31 #include "class_ids.h"
32
33 #include <mach/gpufuse.h>
34 #include <mach/hardware.h>
35 #include <linux/slab.h>
36 #include <linux/scatterlist.h>
37
38 static const struct hwctx_reginfo ctxsave_regs_3d_global[] = {
39         HWCTX_REGINFO(0xe00,    4, DIRECT),
40         HWCTX_REGINFO(0xe05,   30, DIRECT),
41         HWCTX_REGINFO(0xe25,    2, DIRECT),
42         HWCTX_REGINFO(0xe28,    2, DIRECT),
43         HWCTX_REGINFO(0xe30,   16, DIRECT),
44         HWCTX_REGINFO(0x001,    2, DIRECT),
45         HWCTX_REGINFO(0x00c,   10, DIRECT),
46         HWCTX_REGINFO(0x100,   34, DIRECT),
47         HWCTX_REGINFO(0x124,    2, DIRECT),
48         HWCTX_REGINFO(0x200,    5, DIRECT),
49         HWCTX_REGINFO(0x205, 1024, INDIRECT),
50         HWCTX_REGINFO(0x207, 1024, INDIRECT),
51         HWCTX_REGINFO(0x209,    1, DIRECT),
52         HWCTX_REGINFO(0x300,   64, DIRECT),
53         HWCTX_REGINFO(0x343,   25, DIRECT),
54         HWCTX_REGINFO(0x363,    2, DIRECT),
55         HWCTX_REGINFO(0x400,   16, DIRECT),
56         HWCTX_REGINFO(0x411,    1, DIRECT),
57         HWCTX_REGINFO(0x412,    1, DIRECT),
58         HWCTX_REGINFO(0x500,    4, DIRECT),
59         HWCTX_REGINFO(0x520,   32, DIRECT),
60         HWCTX_REGINFO(0x540,   64, INDIRECT),
61         HWCTX_REGINFO(0x600,   16, INDIRECT_4X),
62         HWCTX_REGINFO(0x603,  128, INDIRECT),
63         HWCTX_REGINFO(0x608,    4, DIRECT),
64         HWCTX_REGINFO(0x60e,    1, DIRECT),
65         HWCTX_REGINFO(0x700,   64, INDIRECT),
66         HWCTX_REGINFO(0x710,   50, DIRECT),
67         HWCTX_REGINFO(0x750,   16, DIRECT),
68         HWCTX_REGINFO(0x800,   16, INDIRECT_4X),
69         HWCTX_REGINFO(0x803,  512, INDIRECT),
70         HWCTX_REGINFO(0x805,   64, INDIRECT),
71         HWCTX_REGINFO(0x820,   32, DIRECT),
72         HWCTX_REGINFO(0x900,   64, INDIRECT),
73         HWCTX_REGINFO(0x902,    2, DIRECT),
74         HWCTX_REGINFO(0x90a,    1, DIRECT),
75         HWCTX_REGINFO(0xa02,   10, DIRECT),
76         HWCTX_REGINFO(0xb04,    1, DIRECT),
77         HWCTX_REGINFO(0xb06,   13, DIRECT),
78 };
79
80 static const struct hwctx_reginfo ctxsave_regs_3d_perset[] = {
81         HWCTX_REGINFO(0xe04,    1, DIRECT),
82         HWCTX_REGINFO(0xe2a,    1, DIRECT),
83         HWCTX_REGINFO(0x413,    1, DIRECT),
84         HWCTX_REGINFO(0x90b,    1, DIRECT),
85         HWCTX_REGINFO(0xe41,    1, DIRECT),
86 };
87
88 static unsigned int restore_set1_offset;
89
90 #define SAVE_BEGIN_V1_SIZE (1 + RESTORE_BEGIN_SIZE)
91 #define SAVE_DIRECT_V1_SIZE (4 + RESTORE_DIRECT_SIZE)
92 #define SAVE_INDIRECT_V1_SIZE (6 + RESTORE_INDIRECT_SIZE)
93 #define SAVE_END_V1_SIZE (9 + RESTORE_END_SIZE)
94 #define SAVE_INCRS 3
95 #define SAVE_THRESH_OFFSET 0
96 #define RESTORE_BEGIN_SIZE 4
97 #define RESTORE_DIRECT_SIZE 1
98 #define RESTORE_INDIRECT_SIZE 2
99 #define RESTORE_END_SIZE 1
100
101 struct save_info {
102         u32 *ptr;
103         unsigned int save_count;
104         unsigned int restore_count;
105         unsigned int save_incrs;
106         unsigned int restore_incrs;
107 };
108
109 /*** v1 saver ***/
110
111 static void save_push_v1(struct nvhost_hwctx *nctx, struct nvhost_cdma *cdma)
112 {
113         struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
114         struct host1x_hwctx_handler *p = host1x_hwctx_handler(ctx);
115
116         /* wait for 3d idle */
117         nvhost_cdma_push(cdma,
118                         nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
119                         nvhost_opcode_imm_incr_syncpt(
120                                 host1x_uclass_incr_syncpt_cond_op_done_v(),
121                                 p->h.syncpt));
122         nvhost_cdma_push(cdma,
123                         nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
124                                         host1x_uclass_wait_syncpt_base_r(), 1),
125                         nvhost_class_host_wait_syncpt_base(p->h.syncpt,
126                                                         p->h.waitbase, 1));
127         /* back to 3d */
128         nvhost_cdma_push(cdma,
129                         nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
130                         NVHOST_OPCODE_NOOP);
131
132         /* invalidate the FDC to prevent cache-coherency issues across GPUs
133            note that we assume FDC_CONTROL_0 is left in the reset state by all
134            contexts.  the invalidate bit will clear itself, so the register
135            should be unchanged after this */
136         nvhost_cdma_push(cdma,
137                 nvhost_opcode_imm(AR3D_FDC_CONTROL_0,
138                         AR3D_FDC_CONTROL_0_RESET_VAL
139                                 | AR3D_FDC_CONTROL_0_INVALIDATE),
140                 NVHOST_OPCODE_NOOP);
141
142         /* set register set 0 and 1 register read memory output addresses,
143            and send their reads to memory */
144
145         nvhost_cdma_push(cdma,
146                 nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 2),
147                 nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1));
148         nvhost_cdma_push(cdma,
149                 nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
150                 ctx->restore_phys + restore_set1_offset * 4);
151
152         nvhost_cdma_push(cdma,
153                 nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK, 1),
154                 nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1));
155         nvhost_cdma_push(cdma,
156                 nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
157                 ctx->restore_phys);
158         /* gather the save buffer */
159         nvhost_cdma_push_gather(cdma,
160                         nvhost_get_host(nctx->channel->dev)->memmgr,
161                         p->save_buf,
162                         0,
163                         nvhost_opcode_gather(p->save_size),
164                         p->save_phys);
165 }
166
167 static void save_begin_v1(struct host1x_hwctx_handler *p, u32 *ptr)
168 {
169         ptr[0] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA,
170                         RESTORE_BEGIN_SIZE);
171         nvhost_3dctx_restore_begin(p, ptr + 1);
172         ptr += RESTORE_BEGIN_SIZE;
173 }
174
175 static void save_direct_v1(u32 *ptr, u32 start_reg, u32 count)
176 {
177         ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
178                         AR3D_DW_MEMORY_OUTPUT_DATA, 1);
179         nvhost_3dctx_restore_direct(ptr + 1, start_reg, count);
180         ptr += RESTORE_DIRECT_SIZE;
181         ptr[1] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
182                                         host1x_uclass_indoff_r(), 1);
183         ptr[2] = nvhost_class_host_indoff_reg_read(
184                         host1x_uclass_indoff_indmodid_gr3d_v(),
185                         start_reg, true);
186         /* TODO could do this in the setclass if count < 6 */
187         ptr[3] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
188 }
189
190 static void save_indirect_v1(u32 *ptr, u32 offset_reg, u32 offset,
191                         u32 data_reg, u32 count)
192 {
193         ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
194         ptr[1] = nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_DATA,
195                         RESTORE_INDIRECT_SIZE);
196         nvhost_3dctx_restore_indirect(ptr + 2, offset_reg, offset, data_reg,
197                         count);
198         ptr += RESTORE_INDIRECT_SIZE;
199         ptr[2] = nvhost_opcode_imm(offset_reg, offset);
200         ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
201                                         host1x_uclass_indoff_r(), 1);
202         ptr[4] = nvhost_class_host_indoff_reg_read(
203                         host1x_uclass_indoff_indmodid_gr3d_v(),
204                         data_reg, false);
205         ptr[5] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
206 }
207
208 static void save_end_v1(struct host1x_hwctx_handler *p, u32 *ptr)
209 {
210         /* write end of restore buffer */
211         ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID,
212                         AR3D_DW_MEMORY_OUTPUT_DATA, 1);
213         nvhost_3dctx_restore_end(p, ptr + 1);
214         ptr += RESTORE_END_SIZE;
215         /* reset to dual reg if necessary */
216         ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
217                         (1 << 2) - 1);
218         /* op_done syncpt incr to flush FDC */
219         ptr[2] = nvhost_opcode_imm_incr_syncpt(
220                         host1x_uclass_incr_syncpt_cond_op_done_v(),
221                         p->h.syncpt);
222         /* host wait for that syncpt incr, and advance the wait base */
223         ptr[3] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
224                         host1x_uclass_wait_syncpt_base_r(),
225                         nvhost_mask2(
226                                 host1x_uclass_wait_syncpt_base_r(),
227                                 host1x_uclass_incr_syncpt_base_r()));
228         ptr[4] = nvhost_class_host_wait_syncpt_base(p->h.syncpt,
229                                 p->h.waitbase, p->save_incrs - 1);
230         ptr[5] = nvhost_class_host_incr_syncpt_base(p->h.waitbase,
231                         p->save_incrs);
232         /* set class back to 3d */
233         ptr[6] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
234         /* send reg reads back to host */
235         ptr[7] = nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 0);
236         /* final syncpt increment to release waiters */
237         ptr[8] = nvhost_opcode_imm(0, p->h.syncpt);
238 }
239
240 /*** save ***/
241
242
243
244 static void setup_save_regs(struct save_info *info,
245                         const struct hwctx_reginfo *regs,
246                         unsigned int nr_regs)
247 {
248         const struct hwctx_reginfo *rend = regs + nr_regs;
249         u32 *ptr = info->ptr;
250         unsigned int save_count = info->save_count;
251         unsigned int restore_count = info->restore_count;
252
253         for ( ; regs != rend; ++regs) {
254                 u32 offset = regs->offset;
255                 u32 count = regs->count;
256                 u32 indoff = offset + 1;
257                 switch (regs->type) {
258                 case HWCTX_REGINFO_DIRECT:
259                         if (ptr) {
260                                 save_direct_v1(ptr, offset, count);
261                                 ptr += SAVE_DIRECT_V1_SIZE;
262                         }
263                         save_count += SAVE_DIRECT_V1_SIZE;
264                         restore_count += RESTORE_DIRECT_SIZE;
265                         break;
266                 case HWCTX_REGINFO_INDIRECT_4X:
267                         ++indoff;
268                         /* fall through */
269                 case HWCTX_REGINFO_INDIRECT:
270                         if (ptr) {
271                                 save_indirect_v1(ptr, offset, 0,
272                                                 indoff, count);
273                                 ptr += SAVE_INDIRECT_V1_SIZE;
274                         }
275                         save_count += SAVE_INDIRECT_V1_SIZE;
276                         restore_count += RESTORE_INDIRECT_SIZE;
277                         break;
278                 }
279                 if (ptr) {
280                         /* SAVE cases only: reserve room for incoming data */
281                         u32 k = 0;
282                         /*
283                          * Create a signature pattern for indirect data (which
284                          * will be overwritten by true incoming data) for
285                          * better deducing where we are in a long command
286                          * sequence, when given only a FIFO snapshot for debug
287                          * purposes.
288                         */
289                         for (k = 0; k < count; k++)
290                                 *(ptr + k) = 0xd000d000 | (offset << 16) | k;
291                         ptr += count;
292                 }
293                 save_count += count;
294                 restore_count += count;
295         }
296
297         info->ptr = ptr;
298         info->save_count = save_count;
299         info->restore_count = restore_count;
300 }
301
302 static void switch_gpu(struct save_info *info,
303                         unsigned int save_src_set,
304                         u32 save_dest_sets,
305                         u32 restore_dest_sets)
306 {
307         if (info->ptr) {
308                 info->ptr[0] = nvhost_opcode_setclass(
309                                 NV_GRAPHICS_3D_CLASS_ID,
310                                 AR3D_DW_MEMORY_OUTPUT_DATA, 1);
311                 info->ptr[1] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
312                                 restore_dest_sets);
313                 info->ptr[2] = nvhost_opcode_imm(AR3D_GSHIM_WRITE_MASK,
314                                 save_dest_sets);
315                 info->ptr[3] = nvhost_opcode_imm(AR3D_GSHIM_READ_SELECT,
316                                 save_src_set);
317                 info->ptr += 4;
318         }
319         info->save_count += 4;
320         info->restore_count += 1;
321 }
322
323 static void setup_save(struct host1x_hwctx_handler *p, u32 *ptr)
324 {
325         struct save_info info = {
326                 ptr,
327                 SAVE_BEGIN_V1_SIZE,
328                 RESTORE_BEGIN_SIZE,
329                 SAVE_INCRS,
330                 1
331         };
332         int save_end_size = SAVE_END_V1_SIZE;
333
334         if (info.ptr) {
335                 save_begin_v1(p, info.ptr);
336                 info.ptr += SAVE_BEGIN_V1_SIZE;
337         }
338
339         /* read from set0, write cmds through set0, restore to set0 and 1 */
340         switch_gpu(&info, 0, 1, 3);
341
342         /* save regs that are common to both sets */
343         setup_save_regs(&info,
344                         ctxsave_regs_3d_global,
345                         ARRAY_SIZE(ctxsave_regs_3d_global));
346
347         /* read from set 0, write cmds through set0, restore to set0 */
348         switch_gpu(&info, 0, 1, 1);
349
350         /* save set 0 specific regs */
351         setup_save_regs(&info,
352                         ctxsave_regs_3d_perset,
353                         ARRAY_SIZE(ctxsave_regs_3d_perset));
354
355
356         /* read from set1, write cmds through set1, restore to set1 */
357         switch_gpu(&info, 1, 2, 2);
358         /* note offset at which set 1 restore starts */
359         restore_set1_offset = info.restore_count;
360         /* save set 1 specific regs */
361         setup_save_regs(&info,
362                         ctxsave_regs_3d_perset,
363                         ARRAY_SIZE(ctxsave_regs_3d_perset));
364
365
366         /* read from set0, write cmds through set1, restore to set0 and 1 */
367         switch_gpu(&info, 0, 2, 3);
368
369         if (info.ptr) {
370                 save_end_v1(p, info.ptr);
371                 info.ptr += SAVE_END_V1_SIZE;
372         }
373
374         wmb();
375
376         p->save_size = info.save_count + save_end_size;
377         p->restore_size = info.restore_count + RESTORE_END_SIZE;
378         p->save_incrs = info.save_incrs;
379         p->h.save_thresh = p->save_incrs - SAVE_THRESH_OFFSET;
380         p->restore_incrs = info.restore_incrs;
381 }
382
383
384 /*** ctx3d ***/
385
386 static struct nvhost_hwctx *ctx3d_alloc_v1(struct nvhost_hwctx_handler *h,
387                 struct nvhost_channel *ch)
388 {
389         struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
390         struct host1x_hwctx *ctx = nvhost_3dctx_alloc_common(p, ch, false);
391
392         if (ctx)
393                 return &ctx->hwctx;
394         else
395                 return NULL;
396 }
397
398 struct nvhost_hwctx_handler *nvhost_gr3d_t30_ctxhandler_init(
399                 u32 syncpt, u32 waitbase,
400                 struct nvhost_channel *ch)
401 {
402         struct mem_mgr *memmgr;
403         u32 *save_ptr;
404         struct host1x_hwctx_handler *p;
405
406         p = kmalloc(sizeof(*p), GFP_KERNEL);
407         if (!p)
408                 return NULL;
409
410         memmgr = nvhost_get_host(ch->dev)->memmgr;
411
412         p->h.syncpt = syncpt;
413         p->h.waitbase = waitbase;
414
415         setup_save(p, NULL);
416
417         p->save_buf = nvhost_memmgr_alloc(memmgr, p->save_size * 4, 32,
418                                 mem_mgr_flag_write_combine);
419         if (IS_ERR(p->save_buf))
420                 goto fail_alloc;
421
422         save_ptr = nvhost_memmgr_mmap(p->save_buf);
423         if (!save_ptr)
424                 goto fail_mmap;
425
426         p->save_sgt = nvhost_memmgr_pin(memmgr, p->save_buf);
427         if (IS_ERR(p->save_sgt))
428                 goto fail_pin;
429         p->save_phys = sg_dma_address(p->save_sgt->sgl);
430
431         setup_save(p, save_ptr);
432
433         nvhost_memmgr_munmap(p->save_buf, save_ptr);
434
435         p->save_slots = 8;
436         p->h.alloc = ctx3d_alloc_v1;
437         p->h.save_push = save_push_v1;
438         p->h.restore_push = nvhost_3dctx_restore_push;
439         p->h.save_service = NULL;
440         p->h.get = nvhost_3dctx_get;
441         p->h.put = nvhost_3dctx_put;
442
443         return &p->h;
444
445 fail_pin:
446         nvhost_memmgr_munmap(p->save_buf, save_ptr);
447 fail_mmap:
448         nvhost_memmgr_put(memmgr, p->save_buf);
449 fail_alloc:
450         kfree(p);
451         return NULL;
452 }
453
454 int nvhost_gr3d_t30_read_reg(
455         struct platform_device *dev,
456         struct nvhost_channel *channel,
457         struct nvhost_hwctx *hwctx,
458         u32 offset,
459         u32 *value)
460 {
461         struct host1x_hwctx_handler *h = to_host1x_hwctx_handler(hwctx->h);
462         u32 syncpt_incrs = 1;
463         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
464         void *ref;
465         void *read_waiter = NULL;
466         struct nvhost_job *job;
467         int err;
468         struct mem_handle *mem = NULL;
469         u32 *mem_ptr = NULL;
470         u32 *cmdbuf_ptr = NULL;
471         struct sg_table *mem_sgt = NULL;
472         struct mem_mgr *memmgr = hwctx->memmgr;
473         u32 opcodes[] = {
474                 /* Switch to 3D - set up output to memory */
475                 nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
476                 nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 1),
477                 nvhost_opcode_nonincr(AR3D_DW_MEMORY_OUTPUT_ADDRESS, 1),
478                 0xdeadbeef,
479                 /* Get host1x to request a register read */
480                 nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
481                                 host1x_uclass_indoff_r(), 1),
482                 nvhost_class_host_indoff_reg_read(
483                                 host1x_uclass_indoff_indmodid_gr3d_v(),
484                                 offset, false),
485                 nvhost_opcode_imm(host1x_uclass_inddata_r(), 0),
486                 /* send reg reads back to host */
487                 nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0),
488                 nvhost_opcode_imm(AR3D_GLOBAL_MEMORY_OUTPUT_READS, 0),
489                 /* Finalize with syncpt increment */
490                 nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
491                                 host1x_uclass_incr_syncpt_base_r(), 1),
492                 nvhost_class_host_incr_syncpt_base(h->h.waitbase,
493                                 1),
494                 nvhost_opcode_imm_incr_syncpt(
495                                 host1x_uclass_incr_syncpt_cond_immediate_v(),
496                                 h->h.syncpt),
497         };
498
499         /* 12 slots for gather, and one slot for storing the result value */
500         mem = nvhost_memmgr_alloc(memmgr, sizeof(opcodes)+4,
501                         32, mem_mgr_flag_uncacheable);
502         if (IS_ERR(mem))
503                 return PTR_ERR(mem);
504
505         mem_ptr = nvhost_memmgr_mmap(mem);
506         if (!mem_ptr) {
507                 err = -ENOMEM;
508                 goto done;
509         }
510         cmdbuf_ptr = mem_ptr + 1;
511
512         mem_sgt = nvhost_memmgr_pin(memmgr, mem);
513         if (IS_ERR(mem_sgt)) {
514                 err = -ENOMEM;
515                 mem_sgt = NULL;
516                 goto done;
517         }
518         /* Set address of target memory slot to the stream */
519         opcodes[3] = sg_dma_address(mem_sgt->sgl);
520
521         read_waiter = nvhost_intr_alloc_waiter();
522         if (!read_waiter) {
523                 err = -ENOMEM;
524                 goto done;
525         }
526
527         job = nvhost_job_alloc(channel, hwctx, 1, 0, 0, 1, memmgr);
528         if (!job) {
529                 err = -ENOMEM;
530                 goto done;
531         }
532
533         job->hwctx_syncpt_idx = 0;
534         job->sp->id = h->h.syncpt;
535         job->sp->incrs = syncpt_incrs;
536         job->num_syncpts = 1;
537         job->serialize = 1;
538         memcpy(cmdbuf_ptr, opcodes, sizeof(opcodes));
539
540         /* Submit job */
541         nvhost_job_add_gather(job, nvhost_memmgr_handle_to_id(mem),
542                         ARRAY_SIZE(opcodes), 4);
543
544         err = nvhost_job_pin(job, &nvhost_get_host(dev)->syncpt);
545         if (err)
546                 goto done;
547
548         err = nvhost_channel_submit(job);
549         if (err)
550                 goto done;
551
552         /* Wait for read to be ready */
553         err = nvhost_intr_add_action(&nvhost_get_host(dev)->intr,
554                         h->h.syncpt, job->sp->fence,
555                         NVHOST_INTR_ACTION_WAKEUP, &wq,
556                         read_waiter,
557                         &ref);
558         read_waiter = NULL;
559         WARN(err, "Failed to set wakeup interrupt");
560         wait_event(wq,
561                 nvhost_syncpt_is_expired(&nvhost_get_host(dev)->syncpt,
562                                 h->h.syncpt, job->sp->fence));
563         nvhost_job_put(job);
564         job = NULL;
565         nvhost_intr_put_ref(&nvhost_get_host(dev)->intr, h->h.syncpt,
566                         ref);
567
568         *value = *mem_ptr;
569
570 done:
571         kfree(read_waiter);
572         if (mem_ptr)
573                 nvhost_memmgr_munmap(mem, mem_ptr);
574         if (mem_sgt)
575                 nvhost_memmgr_unpin(memmgr, mem, mem_sgt);
576         if (mem)
577                 nvhost_memmgr_put(memmgr, mem);
578         return err;
579 }