ff06060d6f595919f442ebbc2ef7264fbcc466d4
[linux-3.10.git] / drivers / video / tegra / host / mpe / mpe.c
1 /*
2  * drivers/video/tegra/host/mpe/mpe.c
3  *
4  * Tegra Graphics Host MPE
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/resource.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/of_platform.h>
30
31 #include <mach/hardware.h>
32 #include <mach/pm_domains.h>
33
34 #include "nvhost_hwctx.h"
35 #include "nvhost_channel.h"
36 #include "dev.h"
37 #include "host1x/host1x01_hardware.h"
38 #include "host1x/host1x_hwctx.h"
39 #include "t20/t20.h"
40 #include "t30/t30.h"
41 #include "t114/t114.h"
42 #include "chip_support.h"
43 #include "nvhost_memmgr.h"
44 #include "class_ids.h"
45 #include "nvhost_job.h"
46 #include "nvhost_acm.h"
47 #include "mpe.h"
48
49 #include "bus_client.h"
50
51 enum {
52         HWCTX_REGINFO_NORMAL = 0,
53         HWCTX_REGINFO_STASH,
54         HWCTX_REGINFO_CALCULATE,
55         HWCTX_REGINFO_WRITEBACK
56 };
57
58 const struct hwctx_reginfo ctxsave_regs_mpe[] = {
59         HWCTX_REGINFO(0x124,  1, STASH),
60         HWCTX_REGINFO(0x123,  1, STASH),
61         HWCTX_REGINFO(0x103,  1, STASH),
62         HWCTX_REGINFO(0x074,  1, STASH),
63         HWCTX_REGINFO(0x021,  1, NORMAL),
64         HWCTX_REGINFO(0x020,  1, STASH),
65         HWCTX_REGINFO(0x024,  2, NORMAL),
66         HWCTX_REGINFO(0x0e6,  1, NORMAL),
67         HWCTX_REGINFO(0x3fc,  1, NORMAL),
68         HWCTX_REGINFO(0x3d0,  1, NORMAL),
69         HWCTX_REGINFO(0x3d4,  1, NORMAL),
70         HWCTX_REGINFO(0x013,  1, NORMAL),
71         HWCTX_REGINFO(0x022,  1, NORMAL),
72         HWCTX_REGINFO(0x030,  4, NORMAL),
73         HWCTX_REGINFO(0x023,  1, NORMAL),
74         HWCTX_REGINFO(0x070,  1, NORMAL),
75         HWCTX_REGINFO(0x0a0,  9, NORMAL),
76         HWCTX_REGINFO(0x071,  1, NORMAL),
77         HWCTX_REGINFO(0x100,  4, NORMAL),
78         HWCTX_REGINFO(0x104,  2, NORMAL),
79         HWCTX_REGINFO(0x108,  9, NORMAL),
80         HWCTX_REGINFO(0x112,  2, NORMAL),
81         HWCTX_REGINFO(0x114,  1, STASH),
82         HWCTX_REGINFO(0x014,  1, NORMAL),
83         HWCTX_REGINFO(0x072,  1, NORMAL),
84         HWCTX_REGINFO(0x200,  1, NORMAL),
85         HWCTX_REGINFO(0x0d1,  1, NORMAL),
86         HWCTX_REGINFO(0x0d0,  1, NORMAL),
87         HWCTX_REGINFO(0x0c0,  1, NORMAL),
88         HWCTX_REGINFO(0x0c3,  2, NORMAL),
89         HWCTX_REGINFO(0x0d2,  1, NORMAL),
90         HWCTX_REGINFO(0x0d8,  1, NORMAL),
91         HWCTX_REGINFO(0x0e0,  2, NORMAL),
92         HWCTX_REGINFO(0x07f,  2, NORMAL),
93         HWCTX_REGINFO(0x084,  8, NORMAL),
94         HWCTX_REGINFO(0x0d3,  1, NORMAL),
95         HWCTX_REGINFO(0x040, 13, NORMAL),
96         HWCTX_REGINFO(0x050,  6, NORMAL),
97         HWCTX_REGINFO(0x058,  1, NORMAL),
98         HWCTX_REGINFO(0x057,  1, NORMAL),
99         HWCTX_REGINFO(0x111,  1, NORMAL),
100         HWCTX_REGINFO(0x130,  3, NORMAL),
101         HWCTX_REGINFO(0x201,  1, NORMAL),
102         HWCTX_REGINFO(0x068,  2, NORMAL),
103         HWCTX_REGINFO(0x08c,  1, NORMAL),
104         HWCTX_REGINFO(0x0cf,  1, NORMAL),
105         HWCTX_REGINFO(0x082,  2, NORMAL),
106         HWCTX_REGINFO(0x075,  1, NORMAL),
107         HWCTX_REGINFO(0x0e8,  1, NORMAL),
108         HWCTX_REGINFO(0x056,  1, NORMAL),
109         HWCTX_REGINFO(0x057,  1, NORMAL),
110         HWCTX_REGINFO(0x073,  1, CALCULATE),
111         HWCTX_REGINFO(0x074,  1, NORMAL),
112         HWCTX_REGINFO(0x075,  1, NORMAL),
113         HWCTX_REGINFO(0x076,  1, STASH),
114         HWCTX_REGINFO(0x11a,  9, NORMAL),
115         HWCTX_REGINFO(0x123,  1, NORMAL),
116         HWCTX_REGINFO(0x124,  1, NORMAL),
117         HWCTX_REGINFO(0x12a,  5, NORMAL),
118         HWCTX_REGINFO(0x12f,  1, STASH),
119         HWCTX_REGINFO(0x125,  2, NORMAL),
120         HWCTX_REGINFO(0x034,  1, NORMAL),
121         HWCTX_REGINFO(0x133,  2, NORMAL),
122         HWCTX_REGINFO(0x127,  1, NORMAL),
123         HWCTX_REGINFO(0x106,  1, WRITEBACK),
124         HWCTX_REGINFO(0x107,  1, WRITEBACK)
125 };
126
127 #define NR_STASHES 8
128 #define NR_WRITEBACKS 2
129
130 #define RC_RAM_LOAD_CMD 0x115
131 #define RC_RAM_LOAD_DATA 0x116
132 #define RC_RAM_READ_CMD 0x128
133 #define RC_RAM_READ_DATA 0x129
134 #define RC_RAM_SIZE 692
135
136 #define IRFR_RAM_LOAD_CMD 0xc5
137 #define IRFR_RAM_LOAD_DATA 0xc6
138 #define IRFR_RAM_READ_CMD 0xcd
139 #define IRFR_RAM_READ_DATA 0xce
140 #define IRFR_RAM_SIZE 408
141
142 struct mpe_save_info {
143         u32 in[NR_STASHES];
144         u32 out[NR_WRITEBACKS];
145         unsigned in_pos;
146         unsigned out_pos;
147         u32 h264_mode;
148 };
149
150 /*** restore ***/
151
152 static unsigned int restore_size;
153
154 static void restore_begin(struct host1x_hwctx_handler *h, u32 *ptr)
155 {
156         /* set class to host */
157         ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
158                                         host1x_uclass_incr_syncpt_base_r(), 1);
159         /* increment sync point base */
160         ptr[1] = nvhost_class_host_incr_syncpt_base(h->h.waitbase, 1);
161         /* set class to MPE */
162         ptr[2] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
163 }
164 #define RESTORE_BEGIN_SIZE 3
165
166 static void restore_ram(u32 *ptr, unsigned words,
167                         unsigned cmd_reg, unsigned data_reg)
168 {
169         ptr[0] = nvhost_opcode_imm(cmd_reg, words);
170         ptr[1] = nvhost_opcode_nonincr(data_reg, words);
171 }
172 #define RESTORE_RAM_SIZE 2
173
174 static void restore_end(struct host1x_hwctx_handler *h, u32 *ptr)
175 {
176         /* syncpt increment to track restore gather. */
177         ptr[0] = nvhost_opcode_imm_incr_syncpt(
178                         host1x_uclass_incr_syncpt_cond_op_done_v(),
179                         h->h.syncpt);
180 }
181 #define RESTORE_END_SIZE 1
182
183 static u32 *setup_restore_regs(u32 *ptr,
184                         const struct hwctx_reginfo *regs,
185                         unsigned int nr_regs)
186 {
187         const struct hwctx_reginfo *rend = regs + nr_regs;
188
189         for ( ; regs != rend; ++regs) {
190                 u32 offset = regs->offset;
191                 u32 count = regs->count;
192                 *ptr++ = nvhost_opcode_incr(offset, count);
193                 ptr += count;
194         }
195         return ptr;
196 }
197
198 static u32 *setup_restore_ram(u32 *ptr, unsigned words,
199                         unsigned cmd_reg, unsigned data_reg)
200 {
201         restore_ram(ptr, words, cmd_reg, data_reg);
202         return ptr + (RESTORE_RAM_SIZE + words);
203 }
204
205 static void setup_restore(struct host1x_hwctx_handler *h, u32 *ptr)
206 {
207         restore_begin(h, ptr);
208         ptr += RESTORE_BEGIN_SIZE;
209
210         ptr = setup_restore_regs(ptr, ctxsave_regs_mpe,
211                                 ARRAY_SIZE(ctxsave_regs_mpe));
212
213         ptr = setup_restore_ram(ptr, RC_RAM_SIZE,
214                         RC_RAM_LOAD_CMD, RC_RAM_LOAD_DATA);
215
216         ptr = setup_restore_ram(ptr, IRFR_RAM_SIZE,
217                         IRFR_RAM_LOAD_CMD, IRFR_RAM_LOAD_DATA);
218
219         restore_end(h, ptr);
220
221         wmb();
222 }
223
224 /*** save ***/
225
226 struct save_info {
227         u32 *ptr;
228         unsigned int save_count;
229         unsigned int restore_count;
230 };
231
232 static void save_begin(struct host1x_hwctx_handler *h, u32 *ptr)
233 {
234         /* MPE: when done, increment syncpt to base+1 */
235         ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
236         ptr[1] = nvhost_opcode_imm_incr_syncpt(
237                 host1x_uclass_incr_syncpt_cond_op_done_v(), h->h.syncpt);
238         /* host: wait for syncpt base+1 */
239         ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
240                                         host1x_uclass_wait_syncpt_base_r(), 1);
241         ptr[3] = nvhost_class_host_wait_syncpt_base(
242                         h->h.syncpt, h->h.waitbase, 1);
243         /* host: signal context read thread to start reading */
244         ptr[4] = nvhost_opcode_imm_incr_syncpt(
245                         host1x_uclass_incr_syncpt_cond_immediate_v(),
246                         h->h.syncpt);
247 }
248 #define SAVE_BEGIN_SIZE 5
249
250 static void save_direct(u32 *ptr, u32 start_reg, u32 count)
251 {
252         ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
253                                         host1x_uclass_indoff_r(), 1);
254         ptr[1] = nvhost_class_host_indoff_reg_read(
255                         host1x_uclass_indoff_indmodid_mpe_v(),
256                         start_reg, true);
257         ptr[2] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
258 }
259 #define SAVE_DIRECT_SIZE 3
260
261 static void save_set_ram_cmd(u32 *ptr, u32 cmd_reg, u32 count)
262 {
263         ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID,
264                                         cmd_reg, 1);
265         ptr[1] = count;
266 }
267 #define SAVE_SET_RAM_CMD_SIZE 2
268
269 static void save_read_ram_data_nasty(u32 *ptr, u32 data_reg)
270 {
271         ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
272                                         host1x_uclass_indoff_r(), 1);
273         ptr[1] = nvhost_class_host_indoff_reg_read(
274                         host1x_uclass_indoff_indmodid_mpe_v(),
275                         data_reg, false);
276         ptr[2] = nvhost_opcode_imm(host1x_uclass_inddata_r(), 0);
277         /* write junk data to avoid 'cached problem with register memory' */
278         ptr[3] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID,
279                                         data_reg, 1);
280         ptr[4] = 0x99;
281 }
282 #define SAVE_READ_RAM_DATA_NASTY_SIZE 5
283
284 static void save_end(struct host1x_hwctx_handler *h, u32 *ptr)
285 {
286         /* Wait for context read service to finish (cpu incr 3) */
287         ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
288                                         host1x_uclass_wait_syncpt_base_r(), 1);
289         ptr[1] = nvhost_class_host_wait_syncpt_base(
290                         h->h.syncpt, h->h.waitbase, 3);
291         /* Advance syncpoint base */
292         ptr[2] = nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1);
293         ptr[3] = nvhost_class_host_incr_syncpt_base(h->h.waitbase, 3);
294         /* set class back to the unit */
295         ptr[4] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
296 }
297 #define SAVE_END_SIZE 5
298
299 static void setup_save_regs(struct save_info *info,
300                         const struct hwctx_reginfo *regs,
301                         unsigned int nr_regs)
302 {
303         const struct hwctx_reginfo *rend = regs + nr_regs;
304         u32 *ptr = info->ptr;
305         unsigned int save_count = info->save_count;
306         unsigned int restore_count = info->restore_count;
307
308         for ( ; regs != rend; ++regs) {
309                 u32 offset = regs->offset;
310                 u32 count = regs->count;
311                 if (regs->type != HWCTX_REGINFO_WRITEBACK) {
312                         if (ptr) {
313                                 save_direct(ptr, offset, count);
314                                 ptr += SAVE_DIRECT_SIZE;
315                                 memset(ptr, 0, count * 4);
316                                 ptr += count;
317                         }
318                         save_count += (SAVE_DIRECT_SIZE + count);
319                 }
320                 restore_count += (1 + count);
321         }
322
323         info->ptr = ptr;
324         info->save_count = save_count;
325         info->restore_count = restore_count;
326 }
327
328 static void setup_save_ram_nasty(struct save_info *info, unsigned words,
329                                         unsigned cmd_reg, unsigned data_reg)
330 {
331         u32 *ptr = info->ptr;
332         unsigned int save_count = info->save_count;
333         unsigned int restore_count = info->restore_count;
334         unsigned i;
335
336         if (ptr) {
337                 save_set_ram_cmd(ptr, cmd_reg, words);
338                 ptr += SAVE_SET_RAM_CMD_SIZE;
339                 for (i = words; i; --i) {
340                         save_read_ram_data_nasty(ptr, data_reg);
341                         ptr += SAVE_READ_RAM_DATA_NASTY_SIZE;
342                 }
343         }
344
345         save_count += SAVE_SET_RAM_CMD_SIZE;
346         save_count += words * SAVE_READ_RAM_DATA_NASTY_SIZE;
347         restore_count += (RESTORE_RAM_SIZE + words);
348
349         info->ptr = ptr;
350         info->save_count = save_count;
351         info->restore_count = restore_count;
352 }
353
354 static void setup_save(struct host1x_hwctx_handler *h, u32 *ptr)
355 {
356         struct save_info info = {
357                 ptr,
358                 SAVE_BEGIN_SIZE,
359                 RESTORE_BEGIN_SIZE
360         };
361
362         if (info.ptr) {
363                 save_begin(h, info.ptr);
364                 info.ptr += SAVE_BEGIN_SIZE;
365         }
366
367         setup_save_regs(&info, ctxsave_regs_mpe,
368                         ARRAY_SIZE(ctxsave_regs_mpe));
369
370         setup_save_ram_nasty(&info, RC_RAM_SIZE,
371                         RC_RAM_READ_CMD, RC_RAM_READ_DATA);
372
373         setup_save_ram_nasty(&info, IRFR_RAM_SIZE,
374                         IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA);
375
376         if (info.ptr) {
377                 save_end(h, info.ptr);
378                 info.ptr += SAVE_END_SIZE;
379         }
380
381         wmb();
382
383         h->save_size = info.save_count + SAVE_END_SIZE;
384         restore_size = info.restore_count + RESTORE_END_SIZE;
385 }
386
387 static u32 calculate_mpe(u32 word, struct mpe_save_info *msi)
388 {
389         u32 buffer_full_read = msi->in[0] & 0x01ffffff;
390         u32 byte_len = msi->in[1];
391         u32 drain = (msi->in[2] >> 2) & 0x007fffff;
392         u32 rep_frame = msi->in[3] & 0x0000ffff;
393         u32 h264_mode = (msi->in[4] >> 11) & 1;
394         int new_buffer_full;
395
396         if (h264_mode)
397                 byte_len >>= 3;
398         new_buffer_full = buffer_full_read + byte_len - (drain * 4);
399         msi->out[0] = max(0, new_buffer_full);
400         msi->out[1] = rep_frame;
401         if (rep_frame == 0)
402                 word &= 0xffff0000;
403         return word;
404 }
405
406 static u32 *save_regs(u32 *ptr, unsigned int *pending,
407                 struct nvhost_channel *channel,
408                 const struct hwctx_reginfo *regs,
409                 unsigned int nr_regs,
410                 struct mpe_save_info *msi)
411 {
412         const struct hwctx_reginfo *rend = regs + nr_regs;
413
414         for ( ; regs != rend; ++regs) {
415                 u32 count = regs->count;
416                 ++ptr; /* restore incr */
417                 if (regs->type == HWCTX_REGINFO_NORMAL) {
418                         nvhost_channel_drain_read_fifo(channel,
419                                                 ptr, count, pending);
420                         ptr += count;
421                 } else {
422                         u32 word;
423                         if (regs->type == HWCTX_REGINFO_WRITEBACK) {
424                                 WARN_ON(msi->out_pos >= NR_WRITEBACKS);
425                                 word = msi->out[msi->out_pos++];
426                         } else {
427                                 nvhost_channel_drain_read_fifo(channel,
428                                                 &word, 1, pending);
429                                 if (regs->type == HWCTX_REGINFO_STASH) {
430                                         WARN_ON(msi->in_pos >= NR_STASHES);
431                                         msi->in[msi->in_pos++] = word;
432                                 } else {
433                                         word = calculate_mpe(word, msi);
434                                 }
435                         }
436                         *ptr++ = word;
437                 }
438         }
439         return ptr;
440 }
441
442 static u32 *save_ram(u32 *ptr, unsigned int *pending,
443                 struct nvhost_channel *channel,
444                 unsigned words, unsigned cmd_reg, unsigned data_reg)
445 {
446         int err = 0;
447         ptr += RESTORE_RAM_SIZE;
448         err = nvhost_channel_drain_read_fifo(channel, ptr, words, pending);
449         WARN_ON(err);
450         return ptr + words;
451 }
452
453 /*** ctxmpe ***/
454
455 static struct nvhost_hwctx *ctxmpe_alloc(struct nvhost_hwctx_handler *h,
456                 struct nvhost_channel *ch)
457 {
458         struct mem_mgr *memmgr = nvhost_get_host(ch->dev)->memmgr;
459         struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
460         struct host1x_hwctx *ctx;
461
462         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
463         if (!ctx)
464                 return NULL;
465
466         ctx->restore = nvhost_memmgr_alloc(memmgr, restore_size * 4, 32,
467                                 mem_mgr_flag_write_combine);
468         if (IS_ERR(ctx->restore))
469                 goto fail_alloc;
470
471         ctx->restore_virt = nvhost_memmgr_mmap(ctx->restore);
472         if (!ctx->restore_virt)
473                 goto fail_mmap;
474
475         ctx->restore_sgt = nvhost_memmgr_pin(memmgr, ctx->restore);
476         if (IS_ERR(ctx->restore_sgt))
477                 goto fail_pin;
478         ctx->restore_phys = sg_dma_address(ctx->restore_sgt->sgl);
479
480         kref_init(&ctx->hwctx.ref);
481         ctx->hwctx.h = &p->h;
482         ctx->hwctx.channel = ch;
483         ctx->hwctx.valid = false;
484         ctx->hwctx.save_incrs = 3;
485         ctx->hwctx.save_thresh = 2;
486         ctx->hwctx.save_slots = p->save_slots;
487         ctx->restore_size = restore_size;
488         ctx->hwctx.restore_incrs = 1;
489
490         setup_restore(p, ctx->restore_virt);
491
492         return &ctx->hwctx;
493
494 fail_pin:
495         nvhost_memmgr_munmap(ctx->restore, ctx->restore_virt);
496 fail_mmap:
497         nvhost_memmgr_put(memmgr, ctx->restore);
498 fail_alloc:
499         kfree(ctx);
500         return NULL;
501 }
502
503 static void ctxmpe_get(struct nvhost_hwctx *ctx)
504 {
505         kref_get(&ctx->ref);
506 }
507
508 static void ctxmpe_free(struct kref *ref)
509 {
510         struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref);
511         struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
512         struct mem_mgr *memmgr = nvhost_get_host(nctx->channel->dev)->memmgr;
513
514         if (ctx->restore_virt)
515                 nvhost_memmgr_munmap(ctx->restore, ctx->restore_virt);
516         nvhost_memmgr_unpin(memmgr, ctx->restore, ctx->restore_sgt);
517         nvhost_memmgr_put(memmgr, ctx->restore);
518         kfree(ctx);
519 }
520
521 static void ctxmpe_put(struct nvhost_hwctx *ctx)
522 {
523         kref_put(&ctx->ref, ctxmpe_free);
524 }
525
526 static void ctxmpe_save_push(struct nvhost_hwctx *nctx,
527                 struct nvhost_cdma *cdma)
528 {
529         struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
530         struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx);
531         nvhost_cdma_push_gather(cdma,
532                         nvhost_get_host(nctx->channel->dev)->memmgr,
533                         h->save_buf,
534                         0,
535                         nvhost_opcode_gather(h->save_size),
536                         h->save_phys);
537 }
538
539 static void ctxmpe_restore_push(struct nvhost_hwctx *nctx,
540                 struct nvhost_cdma *cdma)
541 {
542         struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
543         nvhost_cdma_push_gather(cdma,
544                 ctx->hwctx.memmgr,
545                 ctx->restore,
546                 0,
547                 nvhost_opcode_gather(ctx->restore_size),
548                 ctx->restore_phys);
549 }
550
551 static void ctxmpe_save_service(struct nvhost_hwctx *nctx)
552 {
553         struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
554         struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx);
555
556         u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE;
557         unsigned int pending = 0;
558         struct mpe_save_info msi;
559
560         msi.in_pos = 0;
561         msi.out_pos = 0;
562
563         ptr = save_regs(ptr, &pending, nctx->channel,
564                         ctxsave_regs_mpe, ARRAY_SIZE(ctxsave_regs_mpe), &msi);
565
566         ptr = save_ram(ptr, &pending, nctx->channel,
567                 RC_RAM_SIZE, RC_RAM_READ_CMD, RC_RAM_READ_DATA);
568
569         ptr = save_ram(ptr, &pending, nctx->channel,
570                 IRFR_RAM_SIZE, IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA);
571
572         wmb();
573         nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt,
574                         h->h.syncpt);
575 }
576
577 struct nvhost_hwctx_handler *nvhost_mpe_ctxhandler_init(u32 syncpt,
578         u32 waitbase, struct nvhost_channel *ch)
579 {
580         struct mem_mgr *memmgr;
581         u32 *save_ptr;
582         struct host1x_hwctx_handler *p;
583
584         p = kmalloc(sizeof(*p), GFP_KERNEL);
585         if (!p)
586                 return NULL;
587
588         memmgr = nvhost_get_host(ch->dev)->memmgr;
589
590         p->h.syncpt = syncpt;
591         p->h.waitbase = waitbase;
592
593         setup_save(p, NULL);
594
595         p->save_buf = nvhost_memmgr_alloc(memmgr, p->save_size * 4, 32,
596                                 mem_mgr_flag_write_combine);
597         if (IS_ERR(p->save_buf))
598                 goto fail_alloc;
599
600         save_ptr = nvhost_memmgr_mmap(p->save_buf);
601         if (!save_ptr)
602                 goto fail_mmap;
603
604         p->save_sgt = nvhost_memmgr_pin(memmgr, p->save_buf);
605         if (IS_ERR(p->save_sgt))
606                 goto fail_pin;
607         p->save_phys = sg_dma_address(p->save_sgt->sgl);
608
609         setup_save(p, save_ptr);
610
611         nvhost_memmgr_munmap(p->save_buf, save_ptr);
612
613         p->save_slots = 1;
614         p->h.alloc = ctxmpe_alloc;
615         p->h.save_push = ctxmpe_save_push;
616         p->h.restore_push = ctxmpe_restore_push;
617         p->h.save_service = ctxmpe_save_service;
618         p->h.get = ctxmpe_get;
619         p->h.put = ctxmpe_put;
620
621         return &p->h;
622
623 fail_pin:
624         nvhost_memmgr_munmap(p->save_buf, save_ptr);
625 fail_mmap:
626         nvhost_memmgr_put(memmgr, p->save_buf);
627 fail_alloc:
628         kfree(p);
629         return NULL;
630 }
631
632 int nvhost_mpe_prepare_power_off(struct platform_device *dev)
633 {
634         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
635         return nvhost_channel_save_context(pdata->channel);
636 }
637
638 static struct of_device_id tegra_mpe_of_match[] = {
639         { .compatible = "nvidia,tegra20-mpe",
640                 .data = (struct nvhost_device_data *)&t20_mpe_info },
641         { .compatible = "nvidia,tegra30-mpe",
642                 .data = (struct nvhost_device_data *)&t30_mpe_info },
643         { },
644 };
645
646 static int mpe_probe(struct platform_device *dev)
647 {
648         int err = 0;
649         struct nvhost_device_data *pdata = NULL;
650
651         if (dev->dev.of_node) {
652                 const struct of_device_id *match;
653
654                 match = of_match_device(tegra_mpe_of_match, &dev->dev);
655                 if (match)
656                         pdata = (struct nvhost_device_data *)match->data;
657         } else
658                 pdata = (struct nvhost_device_data *)dev->dev.platform_data;
659
660         WARN_ON(!pdata);
661         if (!pdata) {
662                 dev_info(&dev->dev, "no platform data\n");
663                 return -ENODATA;
664         }
665
666         pdata->pdev = dev;
667         platform_set_drvdata(dev, pdata);
668
669         err = nvhost_client_device_get_resources(dev);
670         if (err)
671                 return err;
672
673         err = nvhost_client_device_init(dev);
674         if (err)
675                 return err;
676
677         tegra_pd_add_device(&tegra_mc_chain_a, &dev->dev);
678         pm_runtime_use_autosuspend(&dev->dev);
679         pm_runtime_set_autosuspend_delay(&dev->dev, 100);
680         pm_runtime_enable(&dev->dev);
681
682         return 0;
683 }
684
685 static int __exit mpe_remove(struct platform_device *dev)
686 {
687         /* Add clean-up */
688         return 0;
689 }
690
691 #ifdef CONFIG_PM
692 static int mpe_suspend(struct device *dev)
693 {
694         return nvhost_client_device_suspend(to_platform_device(dev));
695 }
696
697 static int mpe_resume(struct device *dev)
698 {
699         dev_info(dev, "resuming\n");
700         return 0;
701 }
702
703 static const struct dev_pm_ops mpe_pm_ops = {
704         .suspend = mpe_suspend,
705         .resume = mpe_resume,
706 };
707
708 #define MPE_PM_OPS      (&mpe_pm_ops)
709
710 #else
711
712 #define MPE_PM_OPS      NULL
713
714 #endif
715
716 static struct platform_driver mpe_driver = {
717         .probe = mpe_probe,
718         .remove = __exit_p(mpe_remove),
719         .driver = {
720                 .owner = THIS_MODULE,
721                 .name = "mpe",
722                 .pm = MPE_PM_OPS,
723 #ifdef CONFIG_OF
724                 .of_match_table = tegra_mpe_of_match,
725 #endif
726         },
727 };
728
729 static int __init mpe_init(void)
730 {
731         return platform_driver_register(&mpe_driver);
732 }
733
734 static void __exit mpe_exit(void)
735 {
736         platform_driver_unregister(&mpe_driver);
737 }
738
739 module_init(mpe_init);
740 module_exit(mpe_exit);
741
742 int nvhost_mpe_read_reg(struct platform_device *dev,
743         struct nvhost_channel *channel,
744         struct nvhost_hwctx *hwctx,
745         u32 offset,
746         u32 *value)
747 {
748         struct host1x_hwctx_handler *h = to_host1x_hwctx_handler(hwctx->h);
749         u32 syncpt_incrs = 4;
750         unsigned int pending = 0;
751         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
752         void *ref;
753         void *read_waiter = NULL;
754         struct nvhost_job *job;
755         int err;
756         struct mem_handle *mem = NULL;
757         u32 *cmdbuf_ptr = NULL;
758         struct mem_mgr *memmgr = hwctx->memmgr;
759         u32 opcodes[] = {
760                 /* Switch to MPE - wait for it to complete what it was doing */
761                 nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0),
762                 nvhost_opcode_imm_incr_syncpt(
763                                 host1x_uclass_incr_syncpt_cond_op_done_v(),
764                                 h->h.syncpt),
765                 nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
766                                 host1x_uclass_wait_syncpt_base_r(), 1),
767                 nvhost_class_host_wait_syncpt_base(h->h.syncpt,
768                                 h->h.waitbase, 1),
769                 /*  Tell MPE to send register value to FIFO */
770                 nvhost_opcode_nonincr(host1x_uclass_indoff_r(), 1),
771                 nvhost_class_host_indoff_reg_read(
772                                 host1x_uclass_indoff_indmodid_mpe_v(),
773                                 offset, false),
774                 nvhost_opcode_imm(host1x_uclass_inddata_r(), 0),
775                 /*  Increment syncpt to indicate that FIFO can be read */
776                 nvhost_opcode_imm_incr_syncpt(
777                                 host1x_uclass_incr_syncpt_cond_immediate_v(),
778                                 h->h.syncpt),
779                 /*  Wait for value to be read from FIFO */
780                 nvhost_opcode_nonincr(host1x_uclass_wait_syncpt_base_r(), 1),
781                 nvhost_class_host_wait_syncpt_base(h->h.syncpt,
782                                 h->h.waitbase, 3),
783                 /*  Indicate submit complete */
784                 nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1),
785                 nvhost_class_host_incr_syncpt_base(h->h.waitbase, 4),
786                 nvhost_opcode_imm_incr_syncpt(
787                                 host1x_uclass_incr_syncpt_cond_immediate_v(),
788                                 h->h.syncpt),
789         };
790
791         mem = nvhost_memmgr_alloc(memmgr, sizeof(opcodes),
792                         32, mem_mgr_flag_uncacheable);
793         if (IS_ERR(mem))
794                 return PTR_ERR(mem);
795
796         cmdbuf_ptr = nvhost_memmgr_mmap(mem);
797         if (!cmdbuf_ptr) {
798                 err = -ENOMEM;
799                 goto done;
800         }
801
802         read_waiter = nvhost_intr_alloc_waiter();
803         if (!read_waiter) {
804                 err = -ENOMEM;
805                 goto done;
806         }
807
808         job = nvhost_job_alloc(channel, hwctx, 1, 0, 0, memmgr);
809         if (!job) {
810                 err = -ENOMEM;
811                 goto done;
812         }
813
814         job->syncpt_id = h->h.syncpt;
815         job->syncpt_incrs = syncpt_incrs;
816         job->serialize = 1;
817         memcpy(cmdbuf_ptr, opcodes, sizeof(opcodes));
818
819         /* Submit job */
820         nvhost_job_add_gather(job, nvhost_memmgr_handle_to_id(mem),
821                         ARRAY_SIZE(opcodes), 0);
822
823         err = nvhost_job_pin(job, &nvhost_get_host(dev)->syncpt);
824         if (err)
825                 goto done;
826
827         err = nvhost_channel_submit(job);
828         if (err)
829                 goto done;
830
831         /* Wait for FIFO to be ready */
832         err = nvhost_intr_add_action(&nvhost_get_host(dev)->intr,
833                         h->h.syncpt, job->syncpt_end - 2,
834                         NVHOST_INTR_ACTION_WAKEUP, &wq,
835                         read_waiter,
836                         &ref);
837         read_waiter = NULL;
838         WARN(err, "Failed to set wakeup interrupt");
839         wait_event(wq,
840                 nvhost_syncpt_is_expired(&nvhost_get_host(dev)->syncpt,
841                                 h->h.syncpt, job->syncpt_end - 2));
842         nvhost_intr_put_ref(&nvhost_get_host(dev)->intr, h->h.syncpt,
843                         ref);
844
845         /* Read the register value from FIFO */
846         err = nvhost_channel_drain_read_fifo(channel, value, 1, &pending);
847
848         /* Indicate we've read the value */
849         nvhost_syncpt_cpu_incr(&nvhost_get_host(dev)->syncpt,
850                         h->h.syncpt);
851
852         nvhost_job_put(job);
853         job = NULL;
854
855 done:
856         kfree(read_waiter);
857         if (cmdbuf_ptr)
858                 nvhost_memmgr_munmap(mem, cmdbuf_ptr);
859         if (mem)
860                 nvhost_memmgr_put(memmgr, mem);
861         return err;
862 }