arm: tegra: nvmap: Update nvmap_alloc api.
[linux-2.6.git] / drivers / media / video / tegra / nvavp / nvavp_dev.c
1 /*
2  * drivers/media/video/tegra/nvavp/nvavp_dev.c
3  *
4  * Copyright (C) 2011 NVIDIA Corp.
5  *
6  * This file is licensed under the terms of the GNU General Public License
7  * version 2. This program is licensed "as is" without any warranty of any
8  * kind, whether express or implied.
9  */
10
11 #include <linux/uaccess.h>
12 #include <linux/clk.h>
13 #include <linux/completion.h>
14 #include <linux/delay.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/firmware.h>
18 #include <linux/fs.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/ioctl.h>
22 #include <linux/irq.h>
23 #include <linux/kref.h>
24 #include <linux/list.h>
25 #include <linux/miscdevice.h>
26 #include <linux/mutex.h>
27 #include <linux/nvhost.h>
28 #include <linux/platform_device.h>
29 #include <linux/rbtree.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/string.h>
33 #include <linux/tegra_nvavp.h>
34 #include <linux/types.h>
35 #include <linux/vmalloc.h>
36 #include <linux/workqueue.h>
37
38 #include <mach/clk.h>
39 #include <mach/hardware.h>
40 #include <mach/io.h>
41 #include <mach/iomap.h>
42 #include <mach/legacy_irq.h>
43 #include <mach/nvmap.h>
44
45 #include "../../../../video/tegra/nvmap/nvmap.h"
46 #include "../../../../video/tegra/host/host1x/host1x_syncpt.h"
47 #include "../../../../video/tegra/host/dev.h"
48 #include "../../../../video/tegra/host/nvhost_acm.h"
49
50 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
51 #include "../avp/headavp.h"
52 #endif
53 #include "nvavp_os.h"
54
55 #define TEGRA_NVAVP_NAME                        "nvavp"
56
57 #define NVAVP_PUSHBUFFER_SIZE                   4096
58
59 #define NVAVP_PUSHBUFFER_MIN_UPDATE_SPACE       (sizeof(u32) * 3)
60
61 #define TEGRA_NVAVP_RESET_VECTOR_ADDR   \
62                 (IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + 0x200)
63
64 #define FLOW_CTRL_HALT_COP_EVENTS       IO_ADDRESS(TEGRA_FLOW_CTRL_BASE + 0x4)
65 #define FLOW_MODE_STOP                  (0x2 << 29)
66 #define FLOW_MODE_NONE                  0x0
67
68 #define NVAVP_OS_INBOX                  IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x10)
69 #define NVAVP_OS_OUTBOX                 IO_ADDRESS(TEGRA_RES_SEMA_BASE + 0x20)
70
71 #define NVAVP_INBOX_VALID               (1 << 29)
72
73 /* AVP behavior params */
74 #define NVAVP_OS_IDLE_TIMEOUT           100 /* milli-seconds */
75
76 struct nvavp_info {
77         u32                             clk_enabled;
78         struct clk                      *bsev_clk;
79         struct clk                      *vde_clk;
80         struct clk                      *cop_clk;
81
82         /* used for dvfs */
83         struct clk                      *sclk;
84         struct clk                      *emc_clk;
85         unsigned long                   sclk_rate;
86         unsigned long                   emc_clk_rate;
87
88         int                             mbox_from_avp_pend_irq;
89
90         struct mutex                    open_lock;
91         int                             refcount;
92         int                             initialized;
93
94         struct work_struct              clock_disable_work;
95
96         /* os information */
97         struct nvavp_os_info            os_info;
98
99         /* ucode information */
100         struct nvavp_ucode_info         ucode_info;
101
102         /* client for driver allocations, persistent */
103         struct nvmap_client             *nvmap;
104
105         struct mutex                    pushbuffer_lock;
106         struct nvmap_handle_ref         *pushbuf_handle;
107         unsigned long                   pushbuf_phys;
108         u8                              *pushbuf_data;
109         u32                             pushbuf_index;
110         u32                             pushbuf_fence;
111
112         struct nv_e276_control          *os_control;
113
114         struct nvhost_syncpt            *nvhost_syncpt;
115         u32                             syncpt_id;
116         u32                             syncpt_value;
117
118         struct nvhost_device            *nvhost_dev;
119         struct miscdevice               misc_dev;
120 };
121
122 struct nvavp_clientctx {
123         struct nvmap_client *nvmap;
124         struct nvavp_pushbuffer_submit_hdr submit_hdr;
125         struct nvavp_reloc relocs[NVAVP_MAX_RELOCATION_COUNT];
126         struct nvmap_handle_ref *gather_mem;
127         int num_relocs;
128         struct nvavp_info *nvavp;
129 };
130
131 static struct clk *nvavp_clk_get(struct nvavp_info *nvavp, int id)
132 {
133         if (!nvavp)
134                 return NULL;
135
136         if (id == NVAVP_MODULE_ID_AVP)
137                 return nvavp->sclk;
138         if (id == NVAVP_MODULE_ID_VDE)
139                 return nvavp->vde_clk;
140         if (id == NVAVP_MODULE_ID_EMC)
141                 return nvavp->emc_clk;
142
143         return NULL;
144 }
145
146 static void nvavp_clk_ctrl(struct nvavp_info *nvavp, u32 clk_en)
147 {
148         if (clk_en && !nvavp->clk_enabled) {
149                 nvhost_module_busy(nvhost_get_host(nvavp->nvhost_dev)->dev);
150                 clk_enable(nvavp->bsev_clk);
151                 clk_enable(nvavp->vde_clk);
152                 clk_set_rate(nvavp->emc_clk, nvavp->emc_clk_rate);
153                 clk_set_rate(nvavp->sclk, nvavp->sclk_rate);
154                 nvavp->clk_enabled = 1;
155                 dev_dbg(&nvavp->nvhost_dev->dev, "%s: setting sclk to %lu\n",
156                                 __func__, nvavp->sclk_rate);
157                 dev_dbg(&nvavp->nvhost_dev->dev, "%s: setting emc_clk to %lu\n",
158                                 __func__, nvavp->emc_clk_rate);
159         } else if (!clk_en && nvavp->clk_enabled) {
160                 clk_disable(nvavp->bsev_clk);
161                 clk_disable(nvavp->vde_clk);
162                 clk_set_rate(nvavp->emc_clk, 0);
163                 clk_set_rate(nvavp->sclk, 0);
164                 nvhost_module_idle(nvhost_get_host(nvavp->nvhost_dev)->dev);
165                 nvavp->clk_enabled = 0;
166                 dev_dbg(&nvavp->nvhost_dev->dev, "%s: resetting emc_clk "
167                                 "and sclk\n", __func__);
168         }
169 }
170
171 static u32 nvavp_check_idle(struct nvavp_info *nvavp)
172 {
173         struct nv_e276_control *control = nvavp->os_control;
174         return (control->put == control->get) ? 1 : 0;
175 }
176
177 static void clock_disable_handler(struct work_struct *work)
178 {
179         struct nvavp_info *nvavp;
180
181         nvavp = container_of(work, struct nvavp_info,
182                             clock_disable_work);
183
184         mutex_lock(&nvavp->pushbuffer_lock);
185         nvavp_clk_ctrl(nvavp, !nvavp_check_idle(nvavp));
186         mutex_unlock(&nvavp->pushbuffer_lock);
187 }
188
189 static int nvavp_service(struct nvavp_info *nvavp)
190 {
191         struct nvavp_os_info *os = &nvavp->os_info;
192         u8 *debug_print;
193         u32 inbox;
194
195         inbox = readl(NVAVP_OS_INBOX);
196         if (!(inbox & NVAVP_INBOX_VALID))
197                 inbox = 0x00000000;
198
199         writel(0x00000000, NVAVP_OS_INBOX);
200
201         if (inbox & NVE276_OS_INTERRUPT_VIDEO_IDLE)
202                 schedule_work(&nvavp->clock_disable_work);
203
204         if (inbox & NVE276_OS_INTERRUPT_DEBUG_STRING) {
205                 /* Should only occur with debug AVP OS builds */
206                 debug_print = os->data;
207                 debug_print += os->debug_offset;
208                 dev_info(&nvavp->nvhost_dev->dev, "%s\n", debug_print);
209         }
210         if (inbox & (NVE276_OS_INTERRUPT_SEMAPHORE_AWAKEN |
211                      NVE276_OS_INTERRUPT_EXECUTE_AWAKEN)) {
212                 dev_info(&nvavp->nvhost_dev->dev,
213                         "AVP awaken event (0x%x)\n", inbox);
214         }
215         if (inbox & NVE276_OS_INTERRUPT_AVP_FATAL_ERROR) {
216                 dev_err(&nvavp->nvhost_dev->dev,
217                         "fatal AVP error (0x%08X)\n", inbox);
218         }
219         if (inbox & NVE276_OS_INTERRUPT_AVP_BREAKPOINT)
220                 dev_err(&nvavp->nvhost_dev->dev, "AVP breakpoint hit\n");
221         if (inbox & NVE276_OS_INTERRUPT_TIMEOUT)
222                 dev_err(&nvavp->nvhost_dev->dev, "AVP timeout\n");
223
224         return 0;
225 }
226
227 static irqreturn_t nvavp_mbox_pending_isr(int irq, void *data)
228 {
229         struct nvavp_info *nvavp = data;
230
231         nvavp_service(nvavp);
232
233         return IRQ_HANDLED;
234 }
235
236 static void nvavp_halt_avp(struct nvavp_info *nvavp)
237 {
238         /* ensure the AVP is halted */
239         writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
240         tegra_periph_reset_assert(nvavp->cop_clk);
241
242         writel(0, NVAVP_OS_OUTBOX);
243         writel(0, NVAVP_OS_INBOX);
244 }
245
246 static int nvavp_reset_avp(struct nvavp_info *nvavp, unsigned long reset_addr)
247 {
248 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
249         unsigned long stub_code_phys = virt_to_phys(_tegra_avp_boot_stub);
250         dma_addr_t stub_data_phys;
251
252         _tegra_avp_boot_stub_data.map_phys_addr = avp->kernel_phys;
253         _tegra_avp_boot_stub_data.jump_addr = reset_addr;
254         wmb();
255         stub_data_phys = dma_map_single(NULL, &_tegra_avp_boot_stub_data,
256                                         sizeof(_tegra_avp_boot_stub_data),
257                                         DMA_TO_DEVICE);
258         rmb();
259         reset_addr = (unsigned long)stub_data_phys;
260 #endif
261         writel(FLOW_MODE_STOP, FLOW_CTRL_HALT_COP_EVENTS);
262
263         writel(reset_addr, TEGRA_NVAVP_RESET_VECTOR_ADDR);
264
265         clk_enable(nvavp->sclk);
266         clk_enable(nvavp->emc_clk);
267
268         /* If sclk_rate and emc_clk is not set by user space,
269          * max clock in dvfs table will be used to get best performance.
270          */
271         nvavp->sclk_rate = ULONG_MAX;
272         nvavp->emc_clk_rate = ULONG_MAX;
273
274         tegra_periph_reset_assert(nvavp->cop_clk);
275         udelay(2);
276         tegra_periph_reset_deassert(nvavp->cop_clk);
277
278         writel(FLOW_MODE_NONE, FLOW_CTRL_HALT_COP_EVENTS);
279
280 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
281         dma_unmap_single(NULL, stub_data_phys,
282                          sizeof(_tegra_avp_boot_stub_data),
283                          DMA_TO_DEVICE);
284 #endif
285         return 0;
286 }
287
288 static void nvavp_halt_vde(struct nvavp_info *nvavp)
289 {
290         if (nvavp->clk_enabled) {
291                 tegra_periph_reset_assert(nvavp->bsev_clk);
292                 clk_disable(nvavp->bsev_clk);
293                 tegra_periph_reset_assert(nvavp->vde_clk);
294                 clk_disable(nvavp->vde_clk);
295                 nvhost_module_idle(nvhost_get_host(nvavp->nvhost_dev)->dev);
296                 nvavp->clk_enabled = 0;
297         }
298 }
299
300 static int nvavp_reset_vde(struct nvavp_info *nvavp)
301 {
302         if (!nvavp->clk_enabled)
303                 nvhost_module_busy(nvhost_get_host(nvavp->nvhost_dev)->dev);
304
305         clk_enable(nvavp->bsev_clk);
306         tegra_periph_reset_assert(nvavp->bsev_clk);
307         udelay(2);
308         tegra_periph_reset_deassert(nvavp->bsev_clk);
309
310         clk_enable(nvavp->vde_clk);
311         tegra_periph_reset_assert(nvavp->vde_clk);
312         udelay(2);
313         tegra_periph_reset_deassert(nvavp->vde_clk);
314
315         /*
316          * VDE clock is set to max freq by default.
317          * VDE clock can be set to different freq if needed
318          * through ioctl.
319          */
320         clk_set_rate(nvavp->vde_clk, ULONG_MAX);
321
322         nvavp->clk_enabled = 1;
323         return 0;
324 }
325
326 static int nvavp_pushbuffer_alloc(struct nvavp_info *nvavp)
327 {
328         int ret = 0;
329
330         nvavp->pushbuf_handle = nvmap_alloc(nvavp->nvmap, NVAVP_PUSHBUFFER_SIZE,
331                                 SZ_1M, NVMAP_HANDLE_UNCACHEABLE, 0);
332         if (IS_ERR(nvavp->pushbuf_handle)) {
333                 dev_err(&nvavp->nvhost_dev->dev,
334                         "cannot create pushbuffer handle\n");
335                 ret = PTR_ERR(nvavp->pushbuf_handle);
336                 goto err_pushbuf_alloc;
337         }
338         nvavp->pushbuf_data = (u8 *)nvmap_mmap(nvavp->pushbuf_handle);
339         if (!nvavp->pushbuf_data) {
340                 dev_err(&nvavp->nvhost_dev->dev,
341                         "cannot map pushbuffer handle\n");
342                 ret = -ENOMEM;
343                 goto err_pushbuf_mmap;
344         }
345         nvavp->pushbuf_phys = nvmap_pin(nvavp->nvmap, nvavp->pushbuf_handle);
346         if (IS_ERR((void *)nvavp->pushbuf_phys)) {
347                 dev_err(&nvavp->nvhost_dev->dev,
348                         "cannot pin pushbuffer handle\n");
349                 ret = PTR_ERR((void *)nvavp->pushbuf_phys);
350                 goto err_pushbuf_pin;
351         }
352
353         memset(nvavp->pushbuf_data, 0, NVAVP_PUSHBUFFER_SIZE);
354
355         return 0;
356
357 err_pushbuf_pin:
358         nvmap_munmap(nvavp->pushbuf_handle, nvavp->pushbuf_data);
359 err_pushbuf_mmap:
360         nvmap_free(nvavp->nvmap, nvavp->pushbuf_handle);
361 err_pushbuf_alloc:
362         return ret;
363 }
364
365 static void nvavp_pushbuffer_free(struct nvavp_info *nvavp)
366 {
367         nvmap_unpin(nvavp->nvmap, nvavp->pushbuf_handle);
368         nvmap_munmap(nvavp->pushbuf_handle, nvavp->pushbuf_data);
369         nvmap_free(nvavp->nvmap, nvavp->pushbuf_handle);
370 }
371
372 static int nvavp_pushbuffer_init(struct nvavp_info *nvavp)
373 {
374         void *ptr;
375         struct nvavp_os_info *os = &nvavp->os_info;
376         struct nv_e276_control *control;
377         u32 temp;
378         int ret;
379
380         ret = nvavp_pushbuffer_alloc(nvavp);
381         if (ret) {
382                 dev_err(&nvavp->nvhost_dev->dev,
383                         "unable to alloc pushbuffer\n");
384                 return ret;
385         }
386
387         ptr = os->data;
388         ptr += os->control_offset;
389         nvavp->os_control = (struct nv_e276_control *)ptr;
390
391         control = nvavp->os_control;
392         memset(control, 0, sizeof(struct nvavp_os_info));
393
394         /* init get and put pointers */
395         writel(0x0, &control->put);
396         writel(0x0, &control->get);
397
398         /* enable avp VDE clock control and disable iram clock gating */
399         writel(0x0, &control->idle_clk_enable);
400         writel(0x0, &control->iram_clk_gating);
401
402         /* enable avp idle timeout interrupt */
403         writel(0x1, &control->idle_notify_enable);
404         writel(NVAVP_OS_IDLE_TIMEOUT, &control->idle_notify_delay);
405
406         /* init dma start and end pointers */
407         writel(nvavp->pushbuf_phys, &control->dma_start);
408         writel((nvavp->pushbuf_phys + NVAVP_PUSHBUFFER_SIZE),
409                                         &control->dma_end);
410
411         writel(0x00, &nvavp->pushbuf_index);
412         temp = NVAVP_PUSHBUFFER_SIZE - NVAVP_PUSHBUFFER_MIN_UPDATE_SPACE;
413         writel(temp, &nvavp->pushbuf_fence);
414
415         nvavp->syncpt_id = NVSYNCPT_AVP_0;
416         nvavp->syncpt_value = nvhost_syncpt_read(nvavp->nvhost_syncpt,
417                                                  nvavp->syncpt_id);
418
419         return 0;
420 }
421
422 static void nvavp_pushbuffer_deinit(struct nvavp_info *nvavp)
423 {
424         nvavp_pushbuffer_free(nvavp);
425 }
426
427 static int nvavp_pushbuffer_update(struct nvavp_info *nvavp, u32 phys_addr,
428                         u32 gather_count, struct nvavp_syncpt *syncpt,
429                         u32 ext_ucode_flag)
430 {
431         struct nv_e276_control *control = nvavp->os_control;
432         u32 gather_cmd, setucode_cmd, sync = 0;
433         u32 wordcount = 0;
434         u32 index, value = -1;
435
436         mutex_lock(&nvavp->pushbuffer_lock);
437
438         /* check for pushbuffer wrapping */
439         if (nvavp->pushbuf_index >= nvavp->pushbuf_fence)
440                 nvavp->pushbuf_index = 0;
441
442         if (!ext_ucode_flag) {
443                 setucode_cmd =
444                         NVE26E_CH_OPCODE_INCR(NVE276_SET_MICROCODE_A, 3);
445
446                 index = wordcount + nvavp->pushbuf_index;
447                 writel(setucode_cmd, (nvavp->pushbuf_data + index));
448                 wordcount += sizeof(u32);
449
450                 index = wordcount + nvavp->pushbuf_index;
451                 writel(0, (nvavp->pushbuf_data + index));
452                 wordcount += sizeof(u32);
453
454                 index = wordcount + nvavp->pushbuf_index;
455                 writel(nvavp->ucode_info.phys, (nvavp->pushbuf_data + index));
456                 wordcount += sizeof(u32);
457
458                 index = wordcount + nvavp->pushbuf_index;
459                 writel(nvavp->ucode_info.size, (nvavp->pushbuf_data + index));
460                 wordcount += sizeof(u32);
461         }
462
463         gather_cmd = NVE26E_CH_OPCODE_GATHER(0, 0, 0, gather_count);
464
465         if (syncpt) {
466                 value = ++nvavp->syncpt_value;
467                 /* XXX: NvSchedValueWrappingComparison */
468                 sync = NVE26E_CH_OPCODE_IMM(NVE26E_HOST1X_INCR_SYNCPT,
469                         (NVE26E_HOST1X_INCR_SYNCPT_COND_OP_DONE << 8) |
470                         (nvavp->syncpt_id & 0xFF));
471         }
472
473         /* write commands out */
474         index = wordcount + nvavp->pushbuf_index;
475         writel(gather_cmd, (nvavp->pushbuf_data + index));
476         wordcount += sizeof(u32);
477
478         index = wordcount + nvavp->pushbuf_index;
479         writel(phys_addr, (nvavp->pushbuf_data + index));
480         wordcount += sizeof(u32);
481
482         if (syncpt) {
483                 index = wordcount + nvavp->pushbuf_index;
484                 writel(sync, (nvavp->pushbuf_data + index));
485                 wordcount += sizeof(u32);
486         }
487
488         /* enable clocks to VDE/BSEV */
489         nvavp_clk_ctrl(nvavp, 1);
490
491         /* update put pointer */
492         nvavp->pushbuf_index = (nvavp->pushbuf_index + wordcount) &
493                                         (NVAVP_PUSHBUFFER_SIZE - 1);
494         writel(nvavp->pushbuf_index, &control->put);
495         wmb();
496
497         /* wake up avp */
498         writel(0xA0000001, NVAVP_OS_OUTBOX);
499
500         /* Fill out fence struct */
501         if (syncpt) {
502                 syncpt->id = nvavp->syncpt_id;
503                 syncpt->value = value;
504         }
505
506         mutex_unlock(&nvavp->pushbuffer_lock);
507
508         return 0;
509 }
510
511 static void nvavp_unload_ucode(struct nvavp_info *nvavp)
512 {
513         nvmap_unpin(nvavp->nvmap, nvavp->ucode_info.handle);
514         nvmap_munmap(nvavp->ucode_info.handle, nvavp->ucode_info.data);
515         nvmap_free(nvavp->nvmap, nvavp->ucode_info.handle);
516         kfree(nvavp->ucode_info.ucode_bin);
517 }
518
519 static int nvavp_load_ucode(struct nvavp_info *nvavp)
520 {
521         struct nvavp_ucode_info *ucode_info = &nvavp->ucode_info;
522         const struct firmware *nvavp_ucode_fw;
523         char fw_ucode_file[32];
524         void *ptr;
525         int ret = 0;
526
527         if (!ucode_info->ucode_bin) {
528                 sprintf(fw_ucode_file, "nvavp_vid_ucode.bin");
529
530                 ret = request_firmware(&nvavp_ucode_fw, fw_ucode_file,
531                                         nvavp->misc_dev.this_device);
532                 if (ret) {
533                         /* Try alternative version */
534                         sprintf(fw_ucode_file, "nvavp_vid_ucode_alt.bin");
535
536                         ret = request_firmware(&nvavp_ucode_fw,
537                                                 fw_ucode_file,
538                                                 nvavp->misc_dev.this_device);
539
540                         if (ret) {
541                                 dev_err(&nvavp->nvhost_dev->dev,
542                                         "cannot read ucode firmware '%s'\n",
543                                         fw_ucode_file);
544                                 goto err_req_ucode;
545                         }
546                 }
547
548                 dev_info(&nvavp->nvhost_dev->dev,
549                         "read ucode firmware from '%s' (%d bytes)\n",
550                         fw_ucode_file, nvavp_ucode_fw->size);
551
552                 ptr = (void *)nvavp_ucode_fw->data;
553
554                 if (strncmp((const char *)ptr, "NVAVPAPP", 8)) {
555                         dev_info(&nvavp->nvhost_dev->dev,
556                                 "ucode hdr string mismatch\n");
557                         ret = -EINVAL;
558                         goto err_req_ucode;
559                 }
560                 ptr += 8;
561                 ucode_info->size = nvavp_ucode_fw->size - 8;
562
563                 ucode_info->ucode_bin = kzalloc(ucode_info->size,
564                                                 GFP_KERNEL);
565                 if (!ucode_info->ucode_bin) {
566                         dev_err(&nvavp->nvhost_dev->dev,
567                                 "cannot allocate ucode bin\n");
568                         ret = -ENOMEM;
569                         goto err_ubin_alloc;
570                 }
571
572                 ucode_info->handle = nvmap_alloc(nvavp->nvmap,
573                                                 nvavp->ucode_info.size,
574                                         SZ_1M, NVMAP_HANDLE_UNCACHEABLE, 0);
575                 if (IS_ERR(ucode_info->handle)) {
576                         dev_err(&nvavp->nvhost_dev->dev,
577                                 "cannot create ucode handle\n");
578                         ret = PTR_ERR(ucode_info->handle);
579                         goto err_ucode_alloc;
580                 }
581                 ucode_info->data = (u8 *)nvmap_mmap(ucode_info->handle);
582                 if (!ucode_info->data) {
583                         dev_err(&nvavp->nvhost_dev->dev,
584                                 "cannot map ucode handle\n");
585                         ret = -ENOMEM;
586                         goto err_ucode_mmap;
587                 }
588                 ucode_info->phys = nvmap_pin(nvavp->nvmap, ucode_info->handle);
589                 if (IS_ERR((void *)ucode_info->phys)) {
590                         dev_err(&nvavp->nvhost_dev->dev,
591                                 "cannot pin ucode handle\n");
592                         ret = PTR_ERR((void *)ucode_info->phys);
593                         goto err_ucode_pin;
594                 }
595                 memcpy(ucode_info->ucode_bin, ptr, ucode_info->size);
596                 release_firmware(nvavp_ucode_fw);
597         }
598
599         memcpy(ucode_info->data, ucode_info->ucode_bin, ucode_info->size);
600         return 0;
601
602 err_ucode_pin:
603         nvmap_munmap(ucode_info->handle, ucode_info->data);
604 err_ucode_mmap:
605         nvmap_free(nvavp->nvmap, ucode_info->handle);
606 err_ucode_alloc:
607         kfree(nvavp->ucode_info.ucode_bin);
608 err_ubin_alloc:
609         release_firmware(nvavp_ucode_fw);
610 err_req_ucode:
611         return ret;
612 }
613
614 static void nvavp_unload_os(struct nvavp_info *nvavp)
615 {
616         nvmap_unpin(nvavp->nvmap, nvavp->os_info.handle);
617         nvmap_munmap(nvavp->os_info.handle, nvavp->os_info.data);
618 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
619         nvmap_free(nvavp->nvmap, nvavp->os_info.handle);
620 #elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU)
621         nvmap_free_iovm(nvavp->nvmap, nvavp->os_info.handle);
622 #endif
623         kfree(nvavp->os_info.os_bin);
624 }
625
626 static int nvavp_load_os(struct nvavp_info *nvavp, char *fw_os_file)
627 {
628         struct nvavp_os_info *os_info = &nvavp->os_info;
629         const struct firmware *nvavp_os_fw;
630         void *ptr;
631         u32 size;
632         int ret = 0;
633
634         if (!os_info->os_bin) {
635                 ret = request_firmware(&nvavp_os_fw, fw_os_file,
636                                         nvavp->misc_dev.this_device);
637                 if (ret) {
638                         dev_err(&nvavp->nvhost_dev->dev,
639                                 "cannot read os firmware '%s'\n", fw_os_file);
640                         goto err_req_fw;
641                 }
642
643                 dev_info(&nvavp->nvhost_dev->dev,
644                         "read firmware from '%s' (%d bytes)\n",
645                         fw_os_file, nvavp_os_fw->size);
646
647                 ptr = (void *)nvavp_os_fw->data;
648
649                 if (strncmp((const char *)ptr, "NVAVP-OS", 8)) {
650                         dev_info(&nvavp->nvhost_dev->dev,
651                                 "os hdr string mismatch\n");
652                         ret = -EINVAL;
653                         goto err_os_bin;
654                 }
655
656                 ptr += 8;
657                 os_info->entry_offset = *((u32 *)ptr);
658                 ptr += sizeof(u32);
659                 os_info->control_offset = *((u32 *)ptr);
660                 ptr += sizeof(u32);
661                 os_info->debug_offset = *((u32 *)ptr);
662                 ptr += sizeof(u32);
663
664                 size = *((u32 *)ptr);    ptr += sizeof(u32);
665
666                 os_info->size = size;
667                 os_info->os_bin = kzalloc(os_info->size,
668                                                 GFP_KERNEL);
669                 if (!os_info->os_bin) {
670                         dev_err(&nvavp->nvhost_dev->dev,
671                                 "cannot allocate os bin\n");
672                         ret = -ENOMEM;
673                         goto err_os_bin;
674                 }
675
676                 memcpy(os_info->os_bin, ptr, os_info->size);
677                 memset(os_info->data + os_info->size, 0, SZ_1M - os_info->size);
678
679                 dev_info(&nvavp->nvhost_dev->dev,
680                         "entry=%08x control=%08x debug=%08x size=%d\n",
681                         os_info->entry_offset, os_info->control_offset,
682                         os_info->debug_offset, os_info->size);
683                 release_firmware(nvavp_os_fw);
684         }
685
686         memcpy(os_info->data, os_info->os_bin, os_info->size);
687         os_info->reset_addr = os_info->phys + os_info->entry_offset;
688
689         dev_info(&nvavp->nvhost_dev->dev,
690                 "AVP os at vaddr=%p paddr=%lx reset_addr=%p\n",
691                 os_info->data, (unsigned long)(os_info->phys),
692                                 (void *)os_info->reset_addr);
693         return 0;
694
695 err_os_bin:
696         release_firmware(nvavp_os_fw);
697 err_req_fw:
698         return ret;
699 }
700
701 static int nvavp_init(struct nvavp_info *nvavp)
702 {
703         char fw_os_file[32];
704         int ret = 0;
705
706         if (nvavp->initialized)
707                 return ret;
708
709 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
710         /* paddr is any address returned from nvmap_pin */
711         /* vaddr is AVP_KERNEL_VIRT_BASE */
712         dev_info(&nvavp->nvhost_dev->dev,
713                 "using AVP MMU to relocate AVP os\n");
714         sprintf(fw_os_file, "nvavp_os.bin");
715         nvavp->os_info.reset_addr = AVP_KERNEL_VIRT_BASE;
716 #elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
717         /* paddr is any address behind SMMU */
718         /* vaddr is TEGRA_SMMU_BASE */
719         dev_info(&nvavp->nvhost_dev->dev,
720                 "using SMMU at %lx to load AVP kernel\n",
721                 (unsigned long)nvavp->os_info.phys);
722         BUG_ON(nvavp->os_info.phys != 0xeff00000
723                 && nvavp->os_info.phys != 0x0ff00000);
724         sprintf(fw_os_file, "nvavp_os_%08lx.bin",
725                 (unsigned long)nvavp->os_info.phys);
726         nvavp->os_info.reset_addr = nvavp->os_info.phys;
727 #else /* nvmem= carveout */
728         /* paddr is found in nvmem= carveout */
729         /* vaddr is same as paddr */
730         /* Find nvmem carveout */
731         if (!pfn_valid(__phys_to_pfn(0x8e000000))) {
732                 nvavp->os_info.phys = 0x8e000000;
733         } else if (!pfn_valid(__phys_to_pfn(0x9e000000))) {
734                 nvavp->os_info.phys = 0x9e000000;
735         } else if (!pfn_valid(__phys_to_pfn(0xbe000000))) {
736                 nvavp->os_info.phys = 0xbe000000;
737         } else {
738                 dev_err(&nvavp->nvhost_dev->dev,
739                         "cannot find nvmem= carveout to load AVP os\n");
740                 dev_err(&nvavp->nvhost_dev->dev,
741                         "check kernel command line "
742                         "to see if nvmem= is defined\n");
743                 BUG();
744         }
745         dev_info(&nvavp->nvhost_dev->dev,
746                 "using nvmem= carveout at %lx to load AVP os\n",
747                 nvavp->os_info.phys);
748         sprintf(fw_os_file, "nvavp_os_%08lx.bin", nvavp->os_info.phys);
749         nvavp->os_info.reset_addr = nvavp->os_info.phys;
750         nvavp->os_info.data = ioremap(nvavp->os_info.phys, SZ_1M);
751 #endif
752
753         ret = nvavp_load_os(nvavp, fw_os_file);
754         if (ret) {
755                 dev_err(&nvavp->nvhost_dev->dev,
756                         "unable to load os firmware '%s'\n", fw_os_file);
757                 goto err_exit;
758         }
759
760         ret = nvavp_pushbuffer_init(nvavp);
761         if (ret) {
762                 dev_err(&nvavp->nvhost_dev->dev,
763                         "unable to init pushbuffer\n");
764                 goto err_exit;
765         }
766
767         ret = nvavp_load_ucode(nvavp);
768         if (ret) {
769                 dev_err(&nvavp->nvhost_dev->dev,
770                         "unable to load ucode\n");
771                 goto err_exit;
772         }
773
774         tegra_init_legacy_irq_cop();
775
776         nvavp_reset_vde(nvavp);
777         nvavp_reset_avp(nvavp, nvavp->os_info.reset_addr);
778         enable_irq(nvavp->mbox_from_avp_pend_irq);
779
780         nvavp->initialized = 1;
781
782 err_exit:
783         return ret;
784 }
785
786 static void nvavp_uninit(struct nvavp_info *nvavp)
787 {
788         if (!nvavp->initialized)
789                 return;
790
791         disable_irq(nvavp->mbox_from_avp_pend_irq);
792
793         cancel_work_sync(&nvavp->clock_disable_work);
794
795         nvavp_pushbuffer_deinit(nvavp);
796
797         nvavp_halt_vde(nvavp);
798         nvavp_halt_avp(nvavp);
799
800         clk_disable(nvavp->sclk);
801         clk_disable(nvavp->emc_clk);
802
803         nvavp->initialized = 0;
804 }
805
806 static int nvavp_set_clock_ioctl(struct file *filp, unsigned int cmd,
807                                                         unsigned long arg)
808 {
809         struct nvavp_clientctx *clientctx = filp->private_data;
810         struct nvavp_info *nvavp = clientctx->nvavp;
811         struct clk *c;
812         struct nvavp_clock_args config;
813
814         if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
815                 return -EFAULT;
816
817         dev_dbg(&nvavp->nvhost_dev->dev, "%s: clk_id=%d, clk_rate=%u\n",
818                         __func__, config.id, config.rate);
819
820         if (config.id == NVAVP_MODULE_ID_AVP)
821                 nvavp->sclk_rate = config.rate;
822         else if (config.id == NVAVP_MODULE_ID_EMC)
823                 nvavp->emc_clk_rate = config.rate;
824
825         c = nvavp_clk_get(nvavp, config.id);
826         if (IS_ERR_OR_NULL(c))
827                 return -EINVAL;
828
829         clk_enable(c);
830         clk_set_rate(c, config.rate);
831
832         config.rate = clk_get_rate(c);
833         clk_disable(c);
834         if (copy_to_user((void __user *)arg, &config, sizeof(struct nvavp_clock_args)))
835                 return -EFAULT;
836
837         return 0;
838 }
839
840 static int nvavp_get_clock_ioctl(struct file *filp, unsigned int cmd,
841                                                         unsigned long arg)
842 {
843         struct nvavp_clientctx *clientctx = filp->private_data;
844         struct nvavp_info *nvavp = clientctx->nvavp;
845         struct clk *c;
846         struct nvavp_clock_args config;
847
848         if (copy_from_user(&config, (void __user *)arg, sizeof(struct nvavp_clock_args)))
849                 return -EFAULT;
850
851         c = nvavp_clk_get(nvavp, config.id);
852         if (IS_ERR_OR_NULL(c))
853                 return -EINVAL;
854
855         clk_enable(c);
856         config.rate = clk_get_rate(c);
857         clk_disable(c);
858
859         if (copy_to_user((void __user *)arg, &config, sizeof(struct nvavp_clock_args)))
860                 return -EFAULT;
861
862         return 0;
863 }
864
865 static int nvavp_get_syncpointid_ioctl(struct file *filp, unsigned int cmd,
866                                                         unsigned long arg)
867 {
868         struct nvavp_clientctx *clientctx = filp->private_data;
869         struct nvavp_info *nvavp = clientctx->nvavp;
870         u32 id = nvavp->syncpt_id;
871
872         if (_IOC_DIR(cmd) & _IOC_READ) {
873                 if (copy_to_user((void __user *)arg, &id, sizeof(u32)))
874                         return -EFAULT;
875                 else
876                         return 0;
877         }
878         return -EFAULT;
879 }
880
881 static int nvavp_set_nvmapfd_ioctl(struct file *filp, unsigned int cmd,
882                                                         unsigned long arg)
883 {
884         struct nvavp_clientctx *clientctx = filp->private_data;
885         struct nvavp_set_nvmap_fd_args buf;
886         struct nvmap_client *new_client;
887         int fd;
888
889         if (_IOC_DIR(cmd) & _IOC_WRITE) {
890                 if (copy_from_user(&buf, (void __user *)arg, _IOC_SIZE(cmd)))
891                         return -EFAULT;
892         }
893
894         fd = buf.fd;
895         new_client = nvmap_client_get_file(fd);
896         if (IS_ERR(new_client))
897                 return PTR_ERR(new_client);
898
899         clientctx->nvmap = new_client;
900         return 0;
901 }
902
903 static int nvavp_pushbuffer_submit_ioctl(struct file *filp, unsigned int cmd,
904                                                         unsigned long arg)
905 {
906         struct nvavp_clientctx *clientctx = filp->private_data;
907         struct nvavp_info *nvavp = clientctx->nvavp;
908         struct nvavp_pushbuffer_submit_hdr hdr;
909         u32 *cmdbuf_data;
910         struct nvmap_handle *cmdbuf_handle = NULL;
911         struct nvmap_handle_ref *cmdbuf_dupe;
912         int ret = 0, i;
913         unsigned long phys_addr;
914         unsigned long virt_addr;
915         struct nvavp_pushbuffer_submit_hdr *user_hdr =
916                         (struct nvavp_pushbuffer_submit_hdr *) arg;
917         struct nvavp_syncpt syncpt;
918
919         syncpt.id = NVSYNCPT_INVALID;
920         syncpt.value = 0;
921
922         if (_IOC_DIR(cmd) & _IOC_WRITE) {
923                 if (copy_from_user(&hdr, (void __user *)arg,
924                         sizeof(struct nvavp_pushbuffer_submit_hdr)))
925                         return -EFAULT;
926         }
927
928         if (!hdr.cmdbuf.mem)
929                 return 0;
930
931         if (copy_from_user(clientctx->relocs, (void __user *)hdr.relocs,
932                         sizeof(struct nvavp_reloc) * hdr.num_relocs)) {
933                 return -EFAULT;
934         }
935
936         cmdbuf_handle = nvmap_get_handle_id(clientctx->nvmap, hdr.cmdbuf.mem);
937         if (cmdbuf_handle == NULL) {
938                 dev_err(&nvavp->nvhost_dev->dev,
939                         "invalid cmd buffer handle %08x\n", hdr.cmdbuf.mem);
940                 return -EPERM;
941         }
942
943         /* duplicate the new pushbuffer's handle into the nvavp driver's
944          * nvmap context, to ensure that the handle won't be freed as
945          * long as it is in-use by the fb driver */
946         cmdbuf_dupe = nvmap_duplicate_handle_id(nvavp->nvmap, hdr.cmdbuf.mem);
947         nvmap_handle_put(cmdbuf_handle);
948
949         if (IS_ERR(cmdbuf_dupe)) {
950                 dev_err(&nvavp->nvhost_dev->dev,
951                         "could not duplicate handle\n");
952                 return PTR_ERR(cmdbuf_dupe);
953         }
954
955         phys_addr = nvmap_pin(nvavp->nvmap, cmdbuf_dupe);
956         if (IS_ERR((void *)phys_addr)) {
957                 dev_err(&nvavp->nvhost_dev->dev, "could not pin handle\n");
958                 nvmap_free(nvavp->nvmap, cmdbuf_dupe);
959                 return PTR_ERR((void *)phys_addr);
960         }
961
962         virt_addr = (unsigned long)nvmap_mmap(cmdbuf_dupe);
963         if (!virt_addr) {
964                 dev_err(&nvavp->nvhost_dev->dev, "cannot map cmdbuf handle\n");
965                 ret = -ENOMEM;
966                 goto err_cmdbuf_mmap;
967         }
968
969         cmdbuf_data = (u32 *)(virt_addr + hdr.cmdbuf.offset);
970
971         for (i = 0; i < hdr.num_relocs; i++) {
972                 u32 *reloc_addr, target_phys_addr;
973
974                 if (clientctx->relocs[i].cmdbuf_mem != hdr.cmdbuf.mem) {
975                         dev_err(&nvavp->nvhost_dev->dev,
976                                 "reloc info does not match target bufferID\n");
977                         ret = -EPERM;
978                         goto err_reloc_info;
979                 }
980
981                 reloc_addr = cmdbuf_data +
982                              (clientctx->relocs[i].cmdbuf_offset >> 2);
983
984                 target_phys_addr = nvmap_handle_address(clientctx->nvmap,
985                                             clientctx->relocs[i].target);
986                 target_phys_addr += clientctx->relocs[i].target_offset;
987                 writel(target_phys_addr, reloc_addr);
988         }
989
990         if (hdr.syncpt) {
991                 ret = nvavp_pushbuffer_update(nvavp,
992                                              (phys_addr + hdr.cmdbuf.offset),
993                                               hdr.cmdbuf.words, &syncpt,
994                                               (hdr.flags & NVAVP_UCODE_EXT));
995
996                 if (copy_to_user((void __user *)user_hdr->syncpt, &syncpt,
997                                 sizeof(struct nvavp_syncpt))) {
998                         ret = -EFAULT;
999                         goto err_reloc_info;
1000                 }
1001         } else {
1002                 ret = nvavp_pushbuffer_update(nvavp,
1003                                              (phys_addr + hdr.cmdbuf.offset),
1004                                               hdr.cmdbuf.words, NULL,
1005                                               (hdr.flags & NVAVP_UCODE_EXT));
1006         }
1007
1008 err_reloc_info:
1009         nvmap_munmap(cmdbuf_dupe, (void *)virt_addr);
1010 err_cmdbuf_mmap:
1011         nvmap_unpin(nvavp->nvmap, cmdbuf_dupe);
1012         nvmap_free(nvavp->nvmap, cmdbuf_dupe);
1013         return ret;
1014 }
1015
1016 static int tegra_nvavp_open(struct inode *inode, struct file *filp)
1017 {
1018         struct miscdevice *miscdev = filp->private_data;
1019         struct nvavp_info *nvavp = dev_get_drvdata(miscdev->parent);
1020         int ret = 0;
1021         struct nvavp_clientctx *clientctx;
1022
1023         dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
1024
1025         nonseekable_open(inode, filp);
1026
1027         clientctx = kzalloc(sizeof(*clientctx), GFP_KERNEL);
1028         if (!clientctx)
1029                 return -ENOMEM;
1030
1031         mutex_lock(&nvavp->open_lock);
1032
1033         if (!nvavp->refcount)
1034                 ret = nvavp_init(nvavp);
1035
1036         if (!ret)
1037                 nvavp->refcount++;
1038
1039         clientctx->nvmap = nvavp->nvmap;
1040         clientctx->nvavp = nvavp;
1041
1042         filp->private_data = clientctx;
1043
1044         mutex_unlock(&nvavp->open_lock);
1045
1046         return ret;
1047 }
1048
1049 static int tegra_nvavp_release(struct inode *inode, struct file *filp)
1050 {
1051         struct nvavp_clientctx *clientctx = filp->private_data;
1052         struct nvavp_info *nvavp = clientctx->nvavp;
1053         int ret = 0;
1054
1055         dev_dbg(&nvavp->nvhost_dev->dev, "%s: ++\n", __func__);
1056
1057         filp->private_data = NULL;
1058
1059         mutex_lock(&nvavp->open_lock);
1060
1061         if (!nvavp->refcount) {
1062                 dev_err(&nvavp->nvhost_dev->dev,
1063                         "releasing while in invalid state\n");
1064                 ret = -EINVAL;
1065                 goto out;
1066         }
1067
1068         if (nvavp->refcount > 0)
1069                 nvavp->refcount--;
1070         if (!nvavp->refcount)
1071                 nvavp_uninit(nvavp);
1072
1073 out:
1074         nvmap_client_put(clientctx->nvmap);
1075         mutex_unlock(&nvavp->open_lock);
1076         kfree(clientctx);
1077         return ret;
1078 }
1079
1080 static long tegra_nvavp_ioctl(struct file *filp, unsigned int cmd,
1081                             unsigned long arg)
1082 {
1083         int ret = 0;
1084
1085         if (_IOC_TYPE(cmd) != NVAVP_IOCTL_MAGIC ||
1086             _IOC_NR(cmd) < NVAVP_IOCTL_MIN_NR ||
1087             _IOC_NR(cmd) > NVAVP_IOCTL_MAX_NR)
1088                 return -EFAULT;
1089
1090         switch (cmd) {
1091         case NVAVP_IOCTL_SET_NVMAP_FD:
1092                 ret = nvavp_set_nvmapfd_ioctl(filp, cmd, arg);
1093                 break;
1094         case NVAVP_IOCTL_GET_SYNCPOINT_ID:
1095                 ret = nvavp_get_syncpointid_ioctl(filp, cmd, arg);
1096                 break;
1097         case NVAVP_IOCTL_PUSH_BUFFER_SUBMIT:
1098                 ret = nvavp_pushbuffer_submit_ioctl(filp, cmd, arg);
1099                 break;
1100         case NVAVP_IOCTL_SET_CLOCK:
1101                 ret = nvavp_set_clock_ioctl(filp, cmd, arg);
1102                 break;
1103         case NVAVP_IOCTL_GET_CLOCK:
1104                 ret = nvavp_get_clock_ioctl(filp, cmd, arg);
1105                 break;
1106         default:
1107                 ret = -EINVAL;
1108                 break;
1109         }
1110         return ret;
1111 }
1112
1113 static const struct file_operations tegra_nvavp_fops = {
1114         .owner          = THIS_MODULE,
1115         .open           = tegra_nvavp_open,
1116         .release        = tegra_nvavp_release,
1117         .unlocked_ioctl = tegra_nvavp_ioctl,
1118 };
1119
1120 static int tegra_nvavp_probe(struct nvhost_device *ndev)
1121 {
1122         struct nvavp_info *nvavp;
1123         int irq;
1124         unsigned int heap_mask;
1125         u32 iovmm_addr;
1126         int ret = 0;
1127
1128         irq = nvhost_get_irq_byname(ndev, "mbox_from_nvavp_pending");
1129         if (irq < 0) {
1130                 dev_err(&ndev->dev, "invalid nvhost data\n");
1131                 return -EINVAL;
1132         }
1133
1134
1135         nvavp = kzalloc(sizeof(struct nvavp_info), GFP_KERNEL);
1136         if (!nvavp) {
1137                 dev_err(&ndev->dev, "cannot allocate avp_info\n");
1138                 return -ENOMEM;
1139         }
1140
1141         memset(nvavp, 0, sizeof(*nvavp));
1142
1143         nvavp->nvhost_syncpt = &nvhost_get_host(ndev)->syncpt;
1144         if (!nvavp->nvhost_syncpt) {
1145                 dev_err(&ndev->dev, "cannot get syncpt handle\n");
1146                 ret = -ENOENT;
1147                 goto err_get_syncpt;
1148         }
1149
1150         nvavp->nvmap = nvmap_create_client(nvmap_dev, "nvavp_drv");
1151         if (IS_ERR_OR_NULL(nvavp->nvmap)) {
1152                 dev_err(&ndev->dev, "cannot create nvmap client\n");
1153                 ret = PTR_ERR(nvavp->nvmap);
1154                 goto err_nvmap_create_drv_client;
1155         }
1156
1157 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU) /* Tegra2 with AVP MMU */
1158         heap_mask = NVMAP_HEAP_CARVEOUT_GENERIC;
1159 #elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU) /* Tegra3 with SMMU */
1160         heap_mask = NVMAP_HEAP_IOVMM;
1161 #else /* nvmem= carveout */
1162         heap_mask = 0;
1163 #endif
1164         switch (heap_mask) {
1165         case NVMAP_HEAP_IOVMM:
1166                 iovmm_addr = 0x0ff00000;
1167
1168                 /* Tegra3 A01 has different SMMU address */
1169                 if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA3
1170                         && tegra_get_revision() == TEGRA_REVISION_A01) {
1171                         iovmm_addr = 0xeff00000;
1172                 }
1173
1174                 nvavp->os_info.handle = nvmap_alloc_iovm(nvavp->nvmap, SZ_1M,
1175                                                 L1_CACHE_BYTES,
1176                                                 NVMAP_HANDLE_UNCACHEABLE,
1177                                                 iovmm_addr);
1178                 if (IS_ERR_OR_NULL(nvavp->os_info.handle)) {
1179                         dev_err(&ndev->dev,
1180                                 "cannot create os handle\n");
1181                         ret = PTR_ERR(nvavp->os_info.handle);
1182                         goto err_nvmap_alloc;
1183                 }
1184
1185                 nvavp->os_info.data = nvmap_mmap(nvavp->os_info.handle);
1186                 if (!nvavp->os_info.data) {
1187                         dev_err(&ndev->dev,
1188                                 "cannot map os handle\n");
1189                         ret = -ENOMEM;
1190                         goto err_nvmap_mmap;
1191                 }
1192
1193                 nvavp->os_info.phys =
1194                         nvmap_pin(nvavp->nvmap, nvavp->os_info.handle);
1195                 if (IS_ERR_OR_NULL((void *)nvavp->os_info.phys)) {
1196                         dev_err(&ndev->dev,
1197                                 "cannot pin os handle\n");
1198                         ret = PTR_ERR((void *)nvavp->os_info.phys);
1199                         goto err_nvmap_pin;
1200                 }
1201
1202                 dev_info(&ndev->dev,
1203                         "allocated IOVM at %lx for AVP os\n",
1204                         (unsigned long)nvavp->os_info.phys);
1205                 break;
1206         case NVMAP_HEAP_CARVEOUT_GENERIC:
1207                 nvavp->os_info.handle = nvmap_alloc(nvavp->nvmap, SZ_1M, SZ_1M,
1208                                                 NVMAP_HANDLE_UNCACHEABLE, 0);
1209                 if (IS_ERR_OR_NULL(nvavp->os_info.handle)) {
1210                         dev_err(&ndev->dev, "cannot create AVP os handle\n");
1211                         ret = PTR_ERR(nvavp->os_info.handle);
1212                         goto err_nvmap_alloc;
1213                 }
1214
1215                 nvavp->os_info.data = nvmap_mmap(nvavp->os_info.handle);
1216                 if (!nvavp->os_info.data) {
1217                         dev_err(&ndev->dev, "cannot map AVP os handle\n");
1218                         ret = -ENOMEM;
1219                         goto err_nvmap_mmap;
1220                 }
1221
1222                 nvavp->os_info.phys = nvmap_pin(nvavp->nvmap,
1223                                         nvavp->os_info.handle);
1224                 if (IS_ERR_OR_NULL((void *)nvavp->os_info.phys)) {
1225                         dev_err(&ndev->dev, "cannot pin AVP os handle\n");
1226                         ret = PTR_ERR((void *)nvavp->os_info.phys);
1227                         goto err_nvmap_pin;
1228                 }
1229
1230                 dev_info(&ndev->dev,
1231                         "allocated carveout memory at %lx for AVP os\n",
1232                         (unsigned long)nvavp->os_info.phys);
1233                 break;
1234         default:
1235                 dev_err(&ndev->dev, "invalid/non-supported heap for AVP os\n");
1236                 ret = -EINVAL;
1237                 goto err_get_syncpt;
1238         }
1239
1240         nvavp->mbox_from_avp_pend_irq = irq;
1241         mutex_init(&nvavp->open_lock);
1242         mutex_init(&nvavp->pushbuffer_lock);
1243
1244         /* TODO DO NOT USE NVAVP DEVICE */
1245         nvavp->cop_clk = clk_get(&ndev->dev, "cop");
1246         if (IS_ERR(nvavp->cop_clk)) {
1247                 dev_err(&ndev->dev, "cannot get cop clock\n");
1248                 ret = -ENOENT;
1249                 goto err_get_cop_clk;
1250         }
1251
1252         nvavp->vde_clk = clk_get(&ndev->dev, "vde");
1253         if (IS_ERR(nvavp->vde_clk)) {
1254                 dev_err(&ndev->dev, "cannot get vde clock\n");
1255                 ret = -ENOENT;
1256                 goto err_get_vde_clk;
1257         }
1258
1259         nvavp->bsev_clk = clk_get(&ndev->dev, "bsev");
1260         if (IS_ERR(nvavp->bsev_clk)) {
1261                 dev_err(&ndev->dev, "cannot get bsev clock\n");
1262                 ret = -ENOENT;
1263                 goto err_get_bsev_clk;
1264         }
1265
1266         nvavp->sclk = clk_get(&ndev->dev, "sclk");
1267         if (IS_ERR(nvavp->sclk)) {
1268                 dev_err(&ndev->dev, "cannot get avp.sclk clock\n");
1269                 ret = -ENOENT;
1270                 goto err_get_sclk;
1271         }
1272
1273         nvavp->emc_clk = clk_get(&ndev->dev, "emc");
1274         if (IS_ERR(nvavp->emc_clk)) {
1275                 dev_err(&ndev->dev, "cannot get emc clock\n");
1276                 ret = -ENOENT;
1277                 goto err_get_emc_clk;
1278         }
1279
1280         nvavp->clk_enabled = 0;
1281         nvavp_halt_avp(nvavp);
1282
1283         INIT_WORK(&nvavp->clock_disable_work, clock_disable_handler);
1284
1285         nvavp->misc_dev.minor = MISC_DYNAMIC_MINOR;
1286         nvavp->misc_dev.name = "tegra_avpchannel";
1287         nvavp->misc_dev.fops = &tegra_nvavp_fops;
1288         nvavp->misc_dev.mode = S_IRWXUGO;
1289         nvavp->misc_dev.parent = &ndev->dev;
1290
1291         ret = misc_register(&nvavp->misc_dev);
1292         if (ret) {
1293                 dev_err(&ndev->dev, "unable to register misc device!\n");
1294                 goto err_misc_reg;
1295         }
1296
1297         ret = request_irq(irq, nvavp_mbox_pending_isr, 0,
1298                           TEGRA_NVAVP_NAME, nvavp);
1299         if (ret) {
1300                 dev_err(&ndev->dev, "cannot register irq handler\n");
1301                 goto err_req_irq_pend;
1302         }
1303         disable_irq(nvavp->mbox_from_avp_pend_irq);
1304
1305         nvhost_set_drvdata(ndev, nvavp);
1306         nvavp->nvhost_dev = ndev;
1307
1308         return 0;
1309
1310 err_req_irq_pend:
1311         misc_deregister(&nvavp->misc_dev);
1312 err_misc_reg:
1313         clk_put(nvavp->emc_clk);
1314 err_get_emc_clk:
1315         clk_put(nvavp->sclk);
1316 err_get_sclk:
1317         clk_put(nvavp->bsev_clk);
1318 err_get_bsev_clk:
1319         clk_put(nvavp->vde_clk);
1320 err_get_vde_clk:
1321         clk_put(nvavp->cop_clk);
1322 err_get_cop_clk:
1323         nvmap_unpin(nvavp->nvmap, nvavp->os_info.handle);
1324 err_nvmap_pin:
1325         nvmap_munmap(nvavp->os_info.handle, nvavp->os_info.data);
1326 err_nvmap_mmap:
1327 #if defined(CONFIG_TEGRA_AVP_KERNEL_ON_MMU)
1328         nvmap_free(nvavp->nvmap, nvavp->os_info.handle);
1329 #elif defined(CONFIG_TEGRA_AVP_KERNEL_ON_SMMU)
1330         nvmap_free_iovm(nvavp->nvmap, nvavp->os_info.handle);
1331 #endif
1332 err_nvmap_alloc:
1333         nvmap_client_put(nvavp->nvmap);
1334 err_nvmap_create_drv_client:
1335 err_get_syncpt:
1336         kfree(nvavp);
1337         return ret;
1338 }
1339
1340 static int tegra_nvavp_remove(struct nvhost_device *ndev)
1341 {
1342         struct nvavp_info *nvavp = nvhost_get_drvdata(ndev);
1343
1344         if (!nvavp)
1345                 return 0;
1346
1347         mutex_lock(&nvavp->open_lock);
1348         if (nvavp->refcount) {
1349                 mutex_unlock(&nvavp->open_lock);
1350                 return -EBUSY;
1351         }
1352         mutex_unlock(&nvavp->open_lock);
1353
1354         nvavp_unload_ucode(nvavp);
1355         nvavp_unload_os(nvavp);
1356
1357         misc_deregister(&nvavp->misc_dev);
1358
1359         clk_put(nvavp->bsev_clk);
1360         clk_put(nvavp->vde_clk);
1361         clk_put(nvavp->cop_clk);
1362
1363         clk_put(nvavp->emc_clk);
1364         clk_put(nvavp->sclk);
1365
1366         nvmap_client_put(nvavp->nvmap);
1367
1368         kfree(nvavp);
1369         return 0;
1370 }
1371
1372 #ifdef CONFIG_PM
1373 static int tegra_nvavp_suspend(struct nvhost_device *ndev, pm_message_t state)
1374 {
1375         struct nvavp_info *nvavp = nvhost_get_drvdata(ndev);
1376         int ret = 0;
1377
1378         mutex_lock(&nvavp->open_lock);
1379
1380         if (nvavp->refcount) {
1381                 if (nvavp_check_idle(nvavp))
1382                         nvavp_uninit(nvavp);
1383                 else
1384                         ret = -EBUSY;
1385         }
1386
1387         mutex_unlock(&nvavp->open_lock);
1388
1389         return ret;
1390 }
1391
1392 static int tegra_nvavp_resume(struct nvhost_device *ndev)
1393 {
1394         struct nvavp_info *nvavp = nvhost_get_drvdata(ndev);
1395
1396         mutex_lock(&nvavp->open_lock);
1397
1398         if (nvavp->refcount)
1399                 nvavp_init(nvavp);
1400
1401         mutex_unlock(&nvavp->open_lock);
1402
1403         return 0;
1404 }
1405 #endif
1406
1407 static struct nvhost_driver tegra_nvavp_driver = {
1408         .driver = {
1409                 .name   = TEGRA_NVAVP_NAME,
1410                 .owner  = THIS_MODULE,
1411         },
1412         .probe          = tegra_nvavp_probe,
1413         .remove         = tegra_nvavp_remove,
1414 #ifdef CONFIG_PM
1415         .suspend        = tegra_nvavp_suspend,
1416         .resume         = tegra_nvavp_resume,
1417 #endif
1418 };
1419
1420 static int __init tegra_nvavp_init(void)
1421 {
1422         return nvhost_driver_register(&tegra_nvavp_driver);
1423 }
1424
1425 static void __exit tegra_nvavp_exit(void)
1426 {
1427         nvhost_driver_unregister(&tegra_nvavp_driver);
1428 }
1429
1430 module_init(tegra_nvavp_init);
1431 module_exit(tegra_nvavp_exit);
1432
1433 MODULE_AUTHOR("NVIDIA");
1434 MODULE_DESCRIPTION("Channel based AVP driver for Tegra");
1435 MODULE_VERSION("1.0");
1436 MODULE_LICENSE("Dual BSD/GPL");