042b409870fd20ba90c671e2a9cd2f3142e3d262
[linux-3.10.git] / drivers / video / tegra / host / mpe / mpe.c
1 /*
2  * drivers/video/tegra/host/mpe/mpe.c
3  *
4  * Tegra Graphics Host MPE
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/export.h>
23 #include <linux/resource.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/of.h>
28 #include <linux/of_device.h>
29 #include <linux/of_platform.h>
30
31 #include <mach/pm_domains.h>
32
33 #include "nvhost_hwctx.h"
34 #include "nvhost_channel.h"
35 #include "dev.h"
36 #include "class_ids.h"
37 #include "host1x/host1x01_hardware.h"
38 #include "host1x/host1x_hwctx.h"
39 #include "t20/t20.h"
40 #include "t30/t30.h"
41 #include "t114/t114.h"
42 #include "chip_support.h"
43 #include "nvhost_memmgr.h"
44 #include "class_ids.h"
45 #include "nvhost_job.h"
46 #include "nvhost_acm.h"
47 #include "mpe.h"
48
49 #include <linux/slab.h>
50
51 #include "bus_client.h"
52
53 enum {
54         HWCTX_REGINFO_NORMAL = 0,
55         HWCTX_REGINFO_STASH,
56         HWCTX_REGINFO_CALCULATE,
57         HWCTX_REGINFO_WRITEBACK
58 };
59
60 const struct hwctx_reginfo ctxsave_regs_mpe[] = {
61         HWCTX_REGINFO(0x124,  1, STASH),
62         HWCTX_REGINFO(0x123,  1, STASH),
63         HWCTX_REGINFO(0x103,  1, STASH),
64         HWCTX_REGINFO(0x074,  1, STASH),
65         HWCTX_REGINFO(0x021,  1, NORMAL),
66         HWCTX_REGINFO(0x020,  1, STASH),
67         HWCTX_REGINFO(0x024,  2, NORMAL),
68         HWCTX_REGINFO(0x0e6,  1, NORMAL),
69         HWCTX_REGINFO(0x3fc,  1, NORMAL),
70         HWCTX_REGINFO(0x3d0,  1, NORMAL),
71         HWCTX_REGINFO(0x3d4,  1, NORMAL),
72         HWCTX_REGINFO(0x013,  1, NORMAL),
73         HWCTX_REGINFO(0x022,  1, NORMAL),
74         HWCTX_REGINFO(0x030,  4, NORMAL),
75         HWCTX_REGINFO(0x023,  1, NORMAL),
76         HWCTX_REGINFO(0x070,  1, NORMAL),
77         HWCTX_REGINFO(0x0a0,  9, NORMAL),
78         HWCTX_REGINFO(0x071,  1, NORMAL),
79         HWCTX_REGINFO(0x100,  4, NORMAL),
80         HWCTX_REGINFO(0x104,  2, NORMAL),
81         HWCTX_REGINFO(0x108,  9, NORMAL),
82         HWCTX_REGINFO(0x112,  2, NORMAL),
83         HWCTX_REGINFO(0x114,  1, STASH),
84         HWCTX_REGINFO(0x014,  1, NORMAL),
85         HWCTX_REGINFO(0x072,  1, NORMAL),
86         HWCTX_REGINFO(0x200,  1, NORMAL),
87         HWCTX_REGINFO(0x0d1,  1, NORMAL),
88         HWCTX_REGINFO(0x0d0,  1, NORMAL),
89         HWCTX_REGINFO(0x0c0,  1, NORMAL),
90         HWCTX_REGINFO(0x0c3,  2, NORMAL),
91         HWCTX_REGINFO(0x0d2,  1, NORMAL),
92         HWCTX_REGINFO(0x0d8,  1, NORMAL),
93         HWCTX_REGINFO(0x0e0,  2, NORMAL),
94         HWCTX_REGINFO(0x07f,  2, NORMAL),
95         HWCTX_REGINFO(0x084,  8, NORMAL),
96         HWCTX_REGINFO(0x0d3,  1, NORMAL),
97         HWCTX_REGINFO(0x040, 13, NORMAL),
98         HWCTX_REGINFO(0x050,  6, NORMAL),
99         HWCTX_REGINFO(0x058,  1, NORMAL),
100         HWCTX_REGINFO(0x057,  1, NORMAL),
101         HWCTX_REGINFO(0x111,  1, NORMAL),
102         HWCTX_REGINFO(0x130,  3, NORMAL),
103         HWCTX_REGINFO(0x201,  1, NORMAL),
104         HWCTX_REGINFO(0x068,  2, NORMAL),
105         HWCTX_REGINFO(0x08c,  1, NORMAL),
106         HWCTX_REGINFO(0x0cf,  1, NORMAL),
107         HWCTX_REGINFO(0x082,  2, NORMAL),
108         HWCTX_REGINFO(0x075,  1, NORMAL),
109         HWCTX_REGINFO(0x0e8,  1, NORMAL),
110         HWCTX_REGINFO(0x056,  1, NORMAL),
111         HWCTX_REGINFO(0x057,  1, NORMAL),
112         HWCTX_REGINFO(0x073,  1, CALCULATE),
113         HWCTX_REGINFO(0x074,  1, NORMAL),
114         HWCTX_REGINFO(0x075,  1, NORMAL),
115         HWCTX_REGINFO(0x076,  1, STASH),
116         HWCTX_REGINFO(0x11a,  9, NORMAL),
117         HWCTX_REGINFO(0x123,  1, NORMAL),
118         HWCTX_REGINFO(0x124,  1, NORMAL),
119         HWCTX_REGINFO(0x12a,  5, NORMAL),
120         HWCTX_REGINFO(0x12f,  1, STASH),
121         HWCTX_REGINFO(0x125,  2, NORMAL),
122         HWCTX_REGINFO(0x034,  1, NORMAL),
123         HWCTX_REGINFO(0x133,  2, NORMAL),
124         HWCTX_REGINFO(0x127,  1, NORMAL),
125         HWCTX_REGINFO(0x106,  1, WRITEBACK),
126         HWCTX_REGINFO(0x107,  1, WRITEBACK)
127 };
128
129 #define NR_STASHES 8
130 #define NR_WRITEBACKS 2
131
132 #define RC_RAM_LOAD_CMD 0x115
133 #define RC_RAM_LOAD_DATA 0x116
134 #define RC_RAM_READ_CMD 0x128
135 #define RC_RAM_READ_DATA 0x129
136 #define RC_RAM_SIZE 692
137
138 #define IRFR_RAM_LOAD_CMD 0xc5
139 #define IRFR_RAM_LOAD_DATA 0xc6
140 #define IRFR_RAM_READ_CMD 0xcd
141 #define IRFR_RAM_READ_DATA 0xce
142 #define IRFR_RAM_SIZE 408
143
144 struct mpe_save_info {
145         u32 in[NR_STASHES];
146         u32 out[NR_WRITEBACKS];
147         unsigned in_pos;
148         unsigned out_pos;
149         u32 h264_mode;
150 };
151
152 /*** restore ***/
153
154 static unsigned int restore_size;
155
156 static void restore_begin(struct host1x_hwctx_handler *h, u32 *ptr)
157 {
158         /* set class to host */
159         ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
160                                         host1x_uclass_incr_syncpt_base_r(), 1);
161         /* increment sync point base */
162         ptr[1] = nvhost_class_host_incr_syncpt_base(h->h.waitbase, 1);
163         /* set class to MPE */
164         ptr[2] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
165 }
166 #define RESTORE_BEGIN_SIZE 3
167
168 static void restore_ram(u32 *ptr, unsigned words,
169                         unsigned cmd_reg, unsigned data_reg)
170 {
171         ptr[0] = nvhost_opcode_imm(cmd_reg, words);
172         ptr[1] = nvhost_opcode_nonincr(data_reg, words);
173 }
174 #define RESTORE_RAM_SIZE 2
175
176 static void restore_end(struct host1x_hwctx_handler *h, u32 *ptr)
177 {
178         /* syncpt increment to track restore gather. */
179         ptr[0] = nvhost_opcode_imm_incr_syncpt(
180                         host1x_uclass_incr_syncpt_cond_op_done_v(),
181                         h->h.syncpt);
182 }
183 #define RESTORE_END_SIZE 1
184
185 static u32 *setup_restore_regs(u32 *ptr,
186                         const struct hwctx_reginfo *regs,
187                         unsigned int nr_regs)
188 {
189         const struct hwctx_reginfo *rend = regs + nr_regs;
190
191         for ( ; regs != rend; ++regs) {
192                 u32 offset = regs->offset;
193                 u32 count = regs->count;
194                 *ptr++ = nvhost_opcode_incr(offset, count);
195                 ptr += count;
196         }
197         return ptr;
198 }
199
200 static u32 *setup_restore_ram(u32 *ptr, unsigned words,
201                         unsigned cmd_reg, unsigned data_reg)
202 {
203         restore_ram(ptr, words, cmd_reg, data_reg);
204         return ptr + (RESTORE_RAM_SIZE + words);
205 }
206
207 static void setup_restore(struct host1x_hwctx_handler *h, u32 *ptr)
208 {
209         restore_begin(h, ptr);
210         ptr += RESTORE_BEGIN_SIZE;
211
212         ptr = setup_restore_regs(ptr, ctxsave_regs_mpe,
213                                 ARRAY_SIZE(ctxsave_regs_mpe));
214
215         ptr = setup_restore_ram(ptr, RC_RAM_SIZE,
216                         RC_RAM_LOAD_CMD, RC_RAM_LOAD_DATA);
217
218         ptr = setup_restore_ram(ptr, IRFR_RAM_SIZE,
219                         IRFR_RAM_LOAD_CMD, IRFR_RAM_LOAD_DATA);
220
221         restore_end(h, ptr);
222
223         wmb();
224 }
225
226 /*** save ***/
227
228 struct save_info {
229         u32 *ptr;
230         unsigned int save_count;
231         unsigned int restore_count;
232 };
233
234 static void save_begin(struct host1x_hwctx_handler *h, u32 *ptr)
235 {
236         /* MPE: when done, increment syncpt to base+1 */
237         ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
238         ptr[1] = nvhost_opcode_imm_incr_syncpt(
239                 host1x_uclass_incr_syncpt_cond_op_done_v(), h->h.syncpt);
240         /* host: wait for syncpt base+1 */
241         ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
242                                         host1x_uclass_wait_syncpt_base_r(), 1);
243         ptr[3] = nvhost_class_host_wait_syncpt_base(
244                         h->h.syncpt, h->h.waitbase, 1);
245         /* host: signal context read thread to start reading */
246         ptr[4] = nvhost_opcode_imm_incr_syncpt(
247                         host1x_uclass_incr_syncpt_cond_immediate_v(),
248                         h->h.syncpt);
249 }
250 #define SAVE_BEGIN_SIZE 5
251
252 static void save_direct(u32 *ptr, u32 start_reg, u32 count)
253 {
254         ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
255                                         host1x_uclass_indoff_r(), 1);
256         ptr[1] = nvhost_class_host_indoff_reg_read(
257                         host1x_uclass_indoff_indmodid_mpe_v(),
258                         start_reg, true);
259         ptr[2] = nvhost_opcode_nonincr(host1x_uclass_inddata_r(), count);
260 }
261 #define SAVE_DIRECT_SIZE 3
262
263 static void save_set_ram_cmd(u32 *ptr, u32 cmd_reg, u32 count)
264 {
265         ptr[0] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID,
266                                         cmd_reg, 1);
267         ptr[1] = count;
268 }
269 #define SAVE_SET_RAM_CMD_SIZE 2
270
271 static void save_read_ram_data_nasty(u32 *ptr, u32 data_reg)
272 {
273         ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
274                                         host1x_uclass_indoff_r(), 1);
275         ptr[1] = nvhost_class_host_indoff_reg_read(
276                         host1x_uclass_indoff_indmodid_mpe_v(),
277                         data_reg, false);
278         ptr[2] = nvhost_opcode_imm(host1x_uclass_inddata_r(), 0);
279         /* write junk data to avoid 'cached problem with register memory' */
280         ptr[3] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID,
281                                         data_reg, 1);
282         ptr[4] = 0x99;
283 }
284 #define SAVE_READ_RAM_DATA_NASTY_SIZE 5
285
286 static void save_end(struct host1x_hwctx_handler *h, u32 *ptr)
287 {
288         /* Wait for context read service to finish (cpu incr 3) */
289         ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
290                                         host1x_uclass_wait_syncpt_base_r(), 1);
291         ptr[1] = nvhost_class_host_wait_syncpt_base(
292                         h->h.syncpt, h->h.waitbase, 3);
293         /* Advance syncpoint base */
294         ptr[2] = nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1);
295         ptr[3] = nvhost_class_host_incr_syncpt_base(h->h.waitbase, 3);
296         /* set class back to the unit */
297         ptr[4] = nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0);
298 }
299 #define SAVE_END_SIZE 5
300
301 static void setup_save_regs(struct save_info *info,
302                         const struct hwctx_reginfo *regs,
303                         unsigned int nr_regs)
304 {
305         const struct hwctx_reginfo *rend = regs + nr_regs;
306         u32 *ptr = info->ptr;
307         unsigned int save_count = info->save_count;
308         unsigned int restore_count = info->restore_count;
309
310         for ( ; regs != rend; ++regs) {
311                 u32 offset = regs->offset;
312                 u32 count = regs->count;
313                 if (regs->type != HWCTX_REGINFO_WRITEBACK) {
314                         if (ptr) {
315                                 save_direct(ptr, offset, count);
316                                 ptr += SAVE_DIRECT_SIZE;
317                                 memset(ptr, 0, count * 4);
318                                 ptr += count;
319                         }
320                         save_count += (SAVE_DIRECT_SIZE + count);
321                 }
322                 restore_count += (1 + count);
323         }
324
325         info->ptr = ptr;
326         info->save_count = save_count;
327         info->restore_count = restore_count;
328 }
329
330 static void setup_save_ram_nasty(struct save_info *info, unsigned words,
331                                         unsigned cmd_reg, unsigned data_reg)
332 {
333         u32 *ptr = info->ptr;
334         unsigned int save_count = info->save_count;
335         unsigned int restore_count = info->restore_count;
336         unsigned i;
337
338         if (ptr) {
339                 save_set_ram_cmd(ptr, cmd_reg, words);
340                 ptr += SAVE_SET_RAM_CMD_SIZE;
341                 for (i = words; i; --i) {
342                         save_read_ram_data_nasty(ptr, data_reg);
343                         ptr += SAVE_READ_RAM_DATA_NASTY_SIZE;
344                 }
345         }
346
347         save_count += SAVE_SET_RAM_CMD_SIZE;
348         save_count += words * SAVE_READ_RAM_DATA_NASTY_SIZE;
349         restore_count += (RESTORE_RAM_SIZE + words);
350
351         info->ptr = ptr;
352         info->save_count = save_count;
353         info->restore_count = restore_count;
354 }
355
356 static void setup_save(struct host1x_hwctx_handler *h, u32 *ptr)
357 {
358         struct save_info info = {
359                 ptr,
360                 SAVE_BEGIN_SIZE,
361                 RESTORE_BEGIN_SIZE
362         };
363
364         if (info.ptr) {
365                 save_begin(h, info.ptr);
366                 info.ptr += SAVE_BEGIN_SIZE;
367         }
368
369         setup_save_regs(&info, ctxsave_regs_mpe,
370                         ARRAY_SIZE(ctxsave_regs_mpe));
371
372         setup_save_ram_nasty(&info, RC_RAM_SIZE,
373                         RC_RAM_READ_CMD, RC_RAM_READ_DATA);
374
375         setup_save_ram_nasty(&info, IRFR_RAM_SIZE,
376                         IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA);
377
378         if (info.ptr) {
379                 save_end(h, info.ptr);
380                 info.ptr += SAVE_END_SIZE;
381         }
382
383         wmb();
384
385         h->save_size = info.save_count + SAVE_END_SIZE;
386         restore_size = info.restore_count + RESTORE_END_SIZE;
387 }
388
389 static u32 calculate_mpe(u32 word, struct mpe_save_info *msi)
390 {
391         u32 buffer_full_read = msi->in[0] & 0x01ffffff;
392         u32 byte_len = msi->in[1];
393         u32 drain = (msi->in[2] >> 2) & 0x007fffff;
394         u32 rep_frame = msi->in[3] & 0x0000ffff;
395         u32 h264_mode = (msi->in[4] >> 11) & 1;
396         int new_buffer_full;
397
398         if (h264_mode)
399                 byte_len >>= 3;
400         new_buffer_full = buffer_full_read + byte_len - (drain * 4);
401         msi->out[0] = max(0, new_buffer_full);
402         msi->out[1] = rep_frame;
403         if (rep_frame == 0)
404                 word &= 0xffff0000;
405         return word;
406 }
407
408 static u32 *save_regs(u32 *ptr, unsigned int *pending,
409                 struct nvhost_channel *channel,
410                 const struct hwctx_reginfo *regs,
411                 unsigned int nr_regs,
412                 struct mpe_save_info *msi)
413 {
414         const struct hwctx_reginfo *rend = regs + nr_regs;
415
416         for ( ; regs != rend; ++regs) {
417                 u32 count = regs->count;
418                 ++ptr; /* restore incr */
419                 if (regs->type == HWCTX_REGINFO_NORMAL) {
420                         nvhost_channel_drain_read_fifo(channel,
421                                                 ptr, count, pending);
422                         ptr += count;
423                 } else {
424                         u32 word;
425                         if (regs->type == HWCTX_REGINFO_WRITEBACK) {
426                                 WARN_ON(msi->out_pos >= NR_WRITEBACKS);
427                                 word = msi->out[msi->out_pos++];
428                         } else {
429                                 nvhost_channel_drain_read_fifo(channel,
430                                                 &word, 1, pending);
431                                 if (regs->type == HWCTX_REGINFO_STASH) {
432                                         WARN_ON(msi->in_pos >= NR_STASHES);
433                                         msi->in[msi->in_pos++] = word;
434                                 } else {
435                                         word = calculate_mpe(word, msi);
436                                 }
437                         }
438                         *ptr++ = word;
439                 }
440         }
441         return ptr;
442 }
443
444 static u32 *save_ram(u32 *ptr, unsigned int *pending,
445                 struct nvhost_channel *channel,
446                 unsigned words, unsigned cmd_reg, unsigned data_reg)
447 {
448         int err = 0;
449         ptr += RESTORE_RAM_SIZE;
450         err = nvhost_channel_drain_read_fifo(channel, ptr, words, pending);
451         WARN_ON(err);
452         return ptr + words;
453 }
454
455 /*** ctxmpe ***/
456
457 static struct nvhost_hwctx *ctxmpe_alloc(struct nvhost_hwctx_handler *h,
458                 struct nvhost_channel *ch)
459 {
460         struct mem_mgr *memmgr = nvhost_get_host(ch->dev)->memmgr;
461         struct host1x_hwctx_handler *p = to_host1x_hwctx_handler(h);
462         struct host1x_hwctx *ctx;
463
464         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
465         if (!ctx)
466                 return NULL;
467
468         ctx->restore = nvhost_memmgr_alloc(memmgr, restore_size * 4, 32,
469                                 mem_mgr_flag_write_combine, 0);
470         if (IS_ERR(ctx->restore))
471                 goto fail_alloc;
472
473         ctx->restore_virt = nvhost_memmgr_mmap(ctx->restore);
474         if (!ctx->restore_virt)
475                 goto fail_mmap;
476
477         ctx->restore_sgt = nvhost_memmgr_pin(memmgr, ctx->restore,
478                         &ch->dev->dev);
479         if (IS_ERR(ctx->restore_sgt))
480                 goto fail_pin;
481         ctx->restore_phys = sg_dma_address(ctx->restore_sgt->sgl);
482
483         kref_init(&ctx->hwctx.ref);
484         ctx->hwctx.h = &p->h;
485         ctx->hwctx.channel = ch;
486         ctx->hwctx.valid = false;
487         ctx->hwctx.save_incrs = 3;
488         ctx->hwctx.save_thresh = 2;
489         ctx->hwctx.save_slots = p->save_slots;
490         ctx->restore_size = restore_size;
491         ctx->hwctx.restore_incrs = 1;
492
493         setup_restore(p, ctx->restore_virt);
494
495         return &ctx->hwctx;
496
497 fail_pin:
498         nvhost_memmgr_munmap(ctx->restore, ctx->restore_virt);
499 fail_mmap:
500         nvhost_memmgr_put(memmgr, ctx->restore);
501 fail_alloc:
502         kfree(ctx);
503         return NULL;
504 }
505
506 static void ctxmpe_get(struct nvhost_hwctx *ctx)
507 {
508         kref_get(&ctx->ref);
509 }
510
511 static void ctxmpe_free(struct kref *ref)
512 {
513         struct nvhost_hwctx *nctx = container_of(ref, struct nvhost_hwctx, ref);
514         struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
515         struct mem_mgr *memmgr = nvhost_get_host(nctx->channel->dev)->memmgr;
516
517         if (ctx->restore_virt)
518                 nvhost_memmgr_munmap(ctx->restore, ctx->restore_virt);
519         nvhost_memmgr_unpin(memmgr, ctx->restore,
520                         &nctx->channel->dev->dev, ctx->restore_sgt);
521         nvhost_memmgr_put(memmgr, ctx->restore);
522         kfree(ctx);
523 }
524
525 static void ctxmpe_put(struct nvhost_hwctx *ctx)
526 {
527         kref_put(&ctx->ref, ctxmpe_free);
528 }
529
530 static void ctxmpe_save_push(struct nvhost_hwctx *nctx,
531                 struct nvhost_cdma *cdma)
532 {
533         struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
534         struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx);
535         nvhost_cdma_push_gather(cdma,
536                         nvhost_get_host(nctx->channel->dev)->memmgr,
537                         h->save_buf,
538                         0,
539                         nvhost_opcode_gather(h->save_size),
540                         h->save_phys);
541 }
542
543 static void ctxmpe_restore_push(struct nvhost_hwctx *nctx,
544                 struct nvhost_cdma *cdma)
545 {
546         struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
547         nvhost_cdma_push_gather(cdma,
548                 ctx->hwctx.memmgr,
549                 ctx->restore,
550                 0,
551                 nvhost_opcode_gather(ctx->restore_size),
552                 ctx->restore_phys);
553 }
554
555 static void ctxmpe_save_service(struct nvhost_hwctx *nctx)
556 {
557         struct host1x_hwctx *ctx = to_host1x_hwctx(nctx);
558         struct host1x_hwctx_handler *h = host1x_hwctx_handler(ctx);
559
560         u32 *ptr = (u32 *)ctx->restore_virt + RESTORE_BEGIN_SIZE;
561         unsigned int pending = 0;
562         struct mpe_save_info msi;
563
564         msi.in_pos = 0;
565         msi.out_pos = 0;
566
567         ptr = save_regs(ptr, &pending, nctx->channel,
568                         ctxsave_regs_mpe, ARRAY_SIZE(ctxsave_regs_mpe), &msi);
569
570         ptr = save_ram(ptr, &pending, nctx->channel,
571                 RC_RAM_SIZE, RC_RAM_READ_CMD, RC_RAM_READ_DATA);
572
573         ptr = save_ram(ptr, &pending, nctx->channel,
574                 IRFR_RAM_SIZE, IRFR_RAM_READ_CMD, IRFR_RAM_READ_DATA);
575
576         wmb();
577         nvhost_syncpt_cpu_incr(&nvhost_get_host(nctx->channel->dev)->syncpt,
578                         h->h.syncpt);
579 }
580
581 struct nvhost_hwctx_handler *nvhost_mpe_ctxhandler_init(u32 syncpt,
582         u32 waitbase, struct nvhost_channel *ch)
583 {
584         struct mem_mgr *memmgr;
585         u32 *save_ptr;
586         struct host1x_hwctx_handler *p;
587
588         p = kmalloc(sizeof(*p), GFP_KERNEL);
589         if (!p)
590                 return NULL;
591
592         memmgr = nvhost_get_host(ch->dev)->memmgr;
593
594         p->h.syncpt = syncpt;
595         p->h.waitbase = waitbase;
596
597         setup_save(p, NULL);
598
599         p->save_buf = nvhost_memmgr_alloc(memmgr, p->save_size * 4, 32,
600                                 mem_mgr_flag_write_combine, 0);
601         if (IS_ERR(p->save_buf))
602                 goto fail_alloc;
603
604         save_ptr = nvhost_memmgr_mmap(p->save_buf);
605         if (!save_ptr)
606                 goto fail_mmap;
607
608         p->save_sgt = nvhost_memmgr_pin(memmgr, p->save_buf, &ch->dev->dev);
609         if (IS_ERR(p->save_sgt))
610                 goto fail_pin;
611         p->save_phys = sg_dma_address(p->save_sgt->sgl);
612
613         setup_save(p, save_ptr);
614
615         nvhost_memmgr_munmap(p->save_buf, save_ptr);
616
617         p->save_slots = 1;
618         p->h.alloc = ctxmpe_alloc;
619         p->h.save_push = ctxmpe_save_push;
620         p->h.restore_push = ctxmpe_restore_push;
621         p->h.save_service = ctxmpe_save_service;
622         p->h.get = ctxmpe_get;
623         p->h.put = ctxmpe_put;
624
625         return &p->h;
626
627 fail_pin:
628         nvhost_memmgr_munmap(p->save_buf, save_ptr);
629 fail_mmap:
630         nvhost_memmgr_put(memmgr, p->save_buf);
631 fail_alloc:
632         kfree(p);
633         return NULL;
634 }
635
636 int nvhost_mpe_prepare_power_off(struct platform_device *dev)
637 {
638         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
639         return nvhost_channel_save_context(pdata->channel);
640 }
641
642 static struct of_device_id tegra_mpe_of_match[] = {
643         { .compatible = "nvidia,tegra20-mpe",
644                 .data = (struct nvhost_device_data *)&t20_mpe_info },
645         { .compatible = "nvidia,tegra30-mpe",
646                 .data = (struct nvhost_device_data *)&t30_mpe_info },
647         { },
648 };
649
650 #ifdef CONFIG_PM_GENERIC_DOMAINS
651 static int mpe_unpowergate(struct generic_pm_domain *domain)
652 {
653         struct nvhost_device_data *pdata;
654
655         pdata = container_of(domain, struct nvhost_device_data, pd);
656         return nvhost_module_power_on(pdata->pdev);
657 }
658
659 static int mpe_powergate(struct generic_pm_domain *domain)
660 {
661         struct nvhost_device_data *pdata;
662
663         pdata = container_of(domain, struct nvhost_device_data, pd);
664         return nvhost_module_power_off(pdata->pdev);
665 }
666 #endif
667
668 static int mpe_probe(struct platform_device *dev)
669 {
670         int err = 0;
671         struct nvhost_device_data *pdata = NULL;
672
673         if (dev->dev.of_node) {
674                 const struct of_device_id *match;
675
676                 match = of_match_device(tegra_mpe_of_match, &dev->dev);
677                 if (match)
678                         pdata = (struct nvhost_device_data *)match->data;
679         } else
680                 pdata = (struct nvhost_device_data *)dev->dev.platform_data;
681
682         WARN_ON(!pdata);
683         if (!pdata) {
684                 dev_info(&dev->dev, "no platform data\n");
685                 return -ENODATA;
686         }
687
688         pdata->pdev = dev;
689         mutex_init(&pdata->lock);
690         platform_set_drvdata(dev, pdata);
691         nvhost_module_init(dev);
692
693         err = nvhost_client_device_get_resources(dev);
694         if (err)
695                 return err;
696
697 #ifdef CONFIG_PM_GENERIC_DOMAINS
698         pdata->pd.name = "mpe";
699         pdata->pd.power_off = mpe_powergate;
700         pdata->pd.power_on = mpe_unpowergate;
701         pdata->pd.dev_ops.start = nvhost_module_enable_clk;
702         pdata->pd.dev_ops.stop = nvhost_module_disable_clk;
703
704         err = nvhost_module_add_domain(&pdata->pd, dev);
705
706         /* overwrite save/restore fptrs set by pm_genpd_init */
707         pdata->pd.dev_ops.save_state = nvhost_module_prepare_poweroff;
708         pdata->pd.dev_ops.restore_state = nvhost_module_finalize_poweron;
709         pdata->pd.domain.ops.suspend = nvhost_client_device_suspend;
710         pdata->pd.domain.ops.resume = nvhost_client_device_resume;
711 #endif
712
713         if (pdata->clockgate_delay) {
714                 pm_runtime_set_autosuspend_delay(&dev->dev,
715                         pdata->clockgate_delay);
716                 pm_runtime_use_autosuspend(&dev->dev);
717         }
718         pm_runtime_enable(&dev->dev);
719
720         pm_runtime_get_sync(&dev->dev);
721         err = nvhost_client_device_init(dev);
722         if (pdata->clockgate_delay)
723                 pm_runtime_put_sync_autosuspend(&dev->dev);
724         else
725                 pm_runtime_put(&dev->dev);
726         if (err)
727                 return err;
728
729         return 0;
730 }
731
732 static int __exit mpe_remove(struct platform_device *dev)
733 {
734         /* Add clean-up */
735         return 0;
736 }
737
738 static struct platform_driver mpe_driver = {
739         .probe = mpe_probe,
740         .remove = __exit_p(mpe_remove),
741         .driver = {
742                 .owner = THIS_MODULE,
743                 .name = "mpe",
744 #ifdef CONFIG_OF
745                 .of_match_table = tegra_mpe_of_match,
746 #endif
747         },
748 };
749
750 static int __init mpe_init(void)
751 {
752         return platform_driver_register(&mpe_driver);
753 }
754
755 static void __exit mpe_exit(void)
756 {
757         platform_driver_unregister(&mpe_driver);
758 }
759
760 module_init(mpe_init);
761 module_exit(mpe_exit);
762
763 int nvhost_mpe_read_reg(struct platform_device *dev,
764         struct nvhost_channel *channel,
765         struct nvhost_hwctx *hwctx,
766         u32 offset,
767         u32 *value)
768 {
769         struct host1x_hwctx_handler *h = to_host1x_hwctx_handler(hwctx->h);
770         u32 syncpt_incrs = 4;
771         unsigned int pending = 0;
772         DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
773         void *ref;
774         void *read_waiter = NULL;
775         struct nvhost_job *job;
776         int err;
777         struct mem_handle *mem = NULL;
778         u32 *cmdbuf_ptr = NULL;
779         struct mem_mgr *memmgr = hwctx->memmgr;
780         u32 opcodes[] = {
781                 /* Switch to MPE - wait for it to complete what it was doing */
782                 nvhost_opcode_setclass(NV_VIDEO_ENCODE_MPEG_CLASS_ID, 0, 0),
783                 nvhost_opcode_imm_incr_syncpt(
784                                 host1x_uclass_incr_syncpt_cond_op_done_v(),
785                                 h->h.syncpt),
786                 nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
787                                 host1x_uclass_wait_syncpt_base_r(), 1),
788                 nvhost_class_host_wait_syncpt_base(h->h.syncpt,
789                                 h->h.waitbase, 1),
790                 /*  Tell MPE to send register value to FIFO */
791                 nvhost_opcode_nonincr(host1x_uclass_indoff_r(), 1),
792                 nvhost_class_host_indoff_reg_read(
793                                 host1x_uclass_indoff_indmodid_mpe_v(),
794                                 offset, false),
795                 nvhost_opcode_imm(host1x_uclass_inddata_r(), 0),
796                 /*  Increment syncpt to indicate that FIFO can be read */
797                 nvhost_opcode_imm_incr_syncpt(
798                                 host1x_uclass_incr_syncpt_cond_immediate_v(),
799                                 h->h.syncpt),
800                 /*  Wait for value to be read from FIFO */
801                 nvhost_opcode_nonincr(host1x_uclass_wait_syncpt_base_r(), 1),
802                 nvhost_class_host_wait_syncpt_base(h->h.syncpt,
803                                 h->h.waitbase, 3),
804                 /*  Indicate submit complete */
805                 nvhost_opcode_nonincr(host1x_uclass_incr_syncpt_base_r(), 1),
806                 nvhost_class_host_incr_syncpt_base(h->h.waitbase, 4),
807                 nvhost_opcode_imm_incr_syncpt(
808                                 host1x_uclass_incr_syncpt_cond_immediate_v(),
809                                 h->h.syncpt),
810         };
811
812         mem = nvhost_memmgr_alloc(memmgr, sizeof(opcodes),
813                         32, mem_mgr_flag_uncacheable, 0);
814         if (IS_ERR(mem))
815                 return PTR_ERR(mem);
816
817         cmdbuf_ptr = nvhost_memmgr_mmap(mem);
818         if (!cmdbuf_ptr) {
819                 err = -ENOMEM;
820                 goto done;
821         }
822
823         read_waiter = nvhost_intr_alloc_waiter();
824         if (!read_waiter) {
825                 err = -ENOMEM;
826                 goto done;
827         }
828
829         job = nvhost_job_alloc(channel, hwctx, 1, 0, 0, 1, memmgr);
830         if (!job) {
831                 err = -ENOMEM;
832                 goto done;
833         }
834
835         job->hwctx_syncpt_idx = 0;
836         job->sp->id = h->h.syncpt;
837         job->sp->waitbase = h->h.waitbase;
838         job->sp->incrs = syncpt_incrs;
839         job->num_syncpts = 1;
840         job->serialize = 1;
841         memcpy(cmdbuf_ptr, opcodes, sizeof(opcodes));
842
843         /* Submit job */
844         nvhost_job_add_gather(job, nvhost_memmgr_handle_to_id(mem),
845                         ARRAY_SIZE(opcodes), 0);
846
847         err = nvhost_job_pin(job, &nvhost_get_host(dev)->syncpt);
848         if (err)
849                 goto done;
850
851         err = nvhost_channel_submit(job);
852         if (err)
853                 goto done;
854
855         /* Wait for FIFO to be ready */
856         err = nvhost_intr_add_action(&nvhost_get_host(dev)->intr,
857                         h->h.syncpt, job->sp->fence - 2,
858                         NVHOST_INTR_ACTION_WAKEUP, &wq,
859                         read_waiter,
860                         &ref);
861         read_waiter = NULL;
862         WARN(err, "Failed to set wakeup interrupt");
863         wait_event(wq,
864                 nvhost_syncpt_is_expired(&nvhost_get_host(dev)->syncpt,
865                                 h->h.syncpt, job->sp->fence - 2));
866         nvhost_intr_put_ref(&nvhost_get_host(dev)->intr, h->h.syncpt,
867                         ref);
868
869         /* Read the register value from FIFO */
870         err = nvhost_channel_drain_read_fifo(channel, value, 1, &pending);
871
872         /* Indicate we've read the value */
873         nvhost_syncpt_cpu_incr(&nvhost_get_host(dev)->syncpt,
874                         h->h.syncpt);
875
876         nvhost_job_put(job);
877         job = NULL;
878
879 done:
880         kfree(read_waiter);
881         if (cmdbuf_ptr)
882                 nvhost_memmgr_munmap(mem, cmdbuf_ptr);
883         if (mem)
884                 nvhost_memmgr_put(memmgr, mem);
885         return err;
886 }