video: tegra: host: Tegra12 updates to host
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * drivers/video/tegra/host/bus_client.c
3  *
4  * Tegra Graphics Host Client Module
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/spinlock.h>
24 #include <linux/fs.h>
25 #include <linux/cdev.h>
26 #include <linux/uaccess.h>
27 #include <linux/file.h>
28 #include <linux/clk.h>
29 #include <linux/hrtimer.h>
30 #include <linux/export.h>
31 #include <linux/firmware.h>
32
33 #include <trace/events/nvhost.h>
34
35 #include <linux/io.h>
36 #include <linux/string.h>
37
38 #include <linux/nvhost.h>
39 #include <linux/nvhost_ioctl.h>
40
41 #include <mach/gpufuse.h>
42 #include <mach/hardware.h>
43
44 #include "debug.h"
45 #include "bus_client.h"
46 #include "dev.h"
47 #include "class_ids.h"
48 #include "nvhost_as.h"
49 #include "nvhost_memmgr.h"
50 #include "chip_support.h"
51 #include "nvhost_acm.h"
52
53 #include "nvhost_syncpt.h"
54 #include "nvhost_channel.h"
55 #include "nvhost_job.h"
56 #include "nvhost_hwctx.h"
57 #include "user_hwctx.h"
58
59 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
60 {
61         struct resource *r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
62         int err = 0;
63
64         if (offset + 4 * count > resource_size(r)
65                         || (offset + 4 * count < offset))
66                 err = -EPERM;
67
68         return err;
69 }
70
71 int nvhost_read_module_regs(struct platform_device *ndev,
72                         u32 offset, int count, u32 *values)
73 {
74         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
75         void __iomem *p = pdata->aperture[0] + offset;
76         int err;
77
78         if (!pdata->aperture[0])
79                 return -ENODEV;
80
81         /* verify offset */
82         err = validate_reg(ndev, offset, count);
83         if (err)
84                 return err;
85
86         nvhost_module_busy(ndev);
87         while (count--) {
88                 *(values++) = readl(p);
89                 p += 4;
90         }
91         rmb();
92         nvhost_module_idle(ndev);
93
94         return 0;
95 }
96
97 int nvhost_write_module_regs(struct platform_device *ndev,
98                         u32 offset, int count, const u32 *values)
99 {
100         void __iomem *p;
101         int err;
102         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
103
104         if (!pdata->aperture[0])
105                 return -ENODEV;
106
107         p = pdata->aperture[0] + offset;
108
109         /* verify offset */
110         err = validate_reg(ndev, offset, count);
111         if (err)
112                 return err;
113
114         nvhost_module_busy(ndev);
115         while (count--) {
116                 writel(*(values++), p);
117                 p += 4;
118         }
119         wmb();
120         nvhost_module_idle(ndev);
121
122         return 0;
123 }
124
125 struct nvhost_channel_userctx {
126         struct nvhost_channel *ch;
127         struct nvhost_hwctx *hwctx;
128         struct nvhost_submit_hdr_ext hdr;
129         int num_relocshifts;
130         struct nvhost_job *job;
131         struct mem_mgr *memmgr;
132         u32 timeout;
133         u32 priority;
134         int clientid;
135         bool timeout_debug_dump;
136 };
137
138 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
139 {
140         struct nvhost_channel_userctx *priv = filp->private_data;
141
142         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
143
144         filp->private_data = NULL;
145
146         nvhost_module_remove_client(priv->ch->dev, priv);
147         nvhost_putchannel(priv->ch, priv->hwctx);
148
149         if (priv->hwctx)
150                 priv->hwctx->h->put(priv->hwctx);
151
152         if (priv->job)
153                 nvhost_job_put(priv->job);
154
155         nvhost_memmgr_put_mgr(priv->memmgr);
156         kfree(priv);
157         return 0;
158 }
159
160 static int nvhost_channelopen(struct inode *inode, struct file *filp)
161 {
162         struct nvhost_channel_userctx *priv;
163         struct nvhost_channel *ch;
164
165         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
166         ch = nvhost_getchannel(ch);
167         if (!ch)
168                 return -ENOMEM;
169         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
170
171         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
172         if (!priv) {
173                 nvhost_putchannel(ch, NULL);
174                 return -ENOMEM;
175         }
176         filp->private_data = priv;
177         priv->ch = ch;
178         if(nvhost_module_add_client(ch->dev, priv))
179                 goto fail;
180
181         if (ch->ctxhandler && ch->ctxhandler->alloc) {
182                 nvhost_module_busy(ch->dev);
183                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
184                 nvhost_module_idle(ch->dev);
185                 if (!priv->hwctx)
186                         goto fail;
187         }
188         priv->priority = NVHOST_PRIORITY_MEDIUM;
189         priv->clientid = atomic_add_return(1,
190                         &nvhost_get_host(ch->dev)->clientid);
191         priv->timeout = CONFIG_TEGRA_GRHOST_DEFAULT_TIMEOUT;
192         priv->timeout_debug_dump = true;
193         if (tegra_platform_is_linsim())
194                 priv->timeout = 0;
195
196         return 0;
197 fail:
198         nvhost_channelrelease(inode, filp);
199         return -ENOMEM;
200 }
201
202 static int set_submit(struct nvhost_channel_userctx *ctx)
203 {
204         struct platform_device *ndev = ctx->ch->dev;
205         struct nvhost_master *host = nvhost_get_host(ndev);
206
207         /* submit should have at least 1 cmdbuf */
208         if (!ctx->hdr.num_cmdbufs ||
209                         !nvhost_syncpt_is_valid(&host->syncpt,
210                                 ctx->hdr.syncpt_id))
211                 return -EIO;
212
213         if (!ctx->memmgr) {
214                 dev_err(&ndev->dev, "no nvmap context set\n");
215                 return -EFAULT;
216         }
217
218         if (ctx->job) {
219                 dev_warn(&ndev->dev, "performing channel submit when a job already exists\n");
220                 nvhost_job_put(ctx->job);
221         }
222         ctx->job = nvhost_job_alloc(ctx->ch,
223                         ctx->hwctx,
224                         ctx->hdr.num_cmdbufs,
225                         ctx->hdr.num_relocs,
226                         ctx->hdr.num_waitchks,
227                         1,
228                         ctx->memmgr);
229         if (!ctx->job)
230                 return -ENOMEM;
231         ctx->job->timeout = ctx->timeout;
232         ctx->job->sp->id = ctx->hdr.syncpt_id;
233         ctx->job->sp->incrs = ctx->hdr.syncpt_incrs;
234         ctx->job->hwctx_syncpt_idx = 0;
235         ctx->job->num_syncpts = 1;
236         ctx->job->priority = ctx->priority;
237         ctx->job->clientid = ctx->clientid;
238         ctx->job->timeout_debug_dump = ctx->timeout_debug_dump;
239
240         if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
241                 ctx->num_relocshifts = ctx->hdr.num_relocs;
242
243         return 0;
244 }
245
246 static void reset_submit(struct nvhost_channel_userctx *ctx)
247 {
248         ctx->hdr.num_cmdbufs = 0;
249         ctx->hdr.num_relocs = 0;
250         ctx->num_relocshifts = 0;
251         ctx->hdr.num_waitchks = 0;
252
253         if (ctx->job) {
254                 nvhost_job_put(ctx->job);
255                 ctx->job = NULL;
256         }
257 }
258
259 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
260                                 size_t count, loff_t *offp)
261 {
262         struct nvhost_channel_userctx *priv = filp->private_data;
263         size_t remaining = count;
264         int err = 0;
265         struct nvhost_job *job = priv->job;
266         struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
267         const char *chname = priv->ch->dev->name;
268
269         if (!job)
270                 return -EIO;
271
272         while (remaining) {
273                 size_t consumed;
274                 if (!hdr->num_relocs &&
275                     !priv->num_relocshifts &&
276                     !hdr->num_cmdbufs &&
277                     !hdr->num_waitchks) {
278                         consumed = sizeof(struct nvhost_submit_hdr);
279                         if (remaining < consumed)
280                                 break;
281                         if (copy_from_user(hdr, buf, consumed)) {
282                                 err = -EFAULT;
283                                 break;
284                         }
285                         hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
286                         err = set_submit(priv);
287                         if (err)
288                                 break;
289                         trace_nvhost_channel_write_submit(chname,
290                           count, hdr->num_cmdbufs, hdr->num_relocs,
291                           hdr->syncpt_id, hdr->syncpt_incrs);
292                 } else if (hdr->num_cmdbufs) {
293                         struct nvhost_cmdbuf cmdbuf;
294                         consumed = sizeof(cmdbuf);
295                         if (remaining < consumed)
296                                 break;
297                         if (copy_from_user(&cmdbuf, buf, consumed)) {
298                                 err = -EFAULT;
299                                 break;
300                         }
301                         trace_nvhost_channel_write_cmdbuf(chname,
302                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
303                         nvhost_job_add_gather(job,
304                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
305                         hdr->num_cmdbufs--;
306                 } else if (hdr->num_relocs) {
307                         int numrelocs = remaining / sizeof(struct nvhost_reloc);
308                         if (!numrelocs)
309                                 break;
310                         numrelocs = min_t(int, numrelocs, priv->hdr.num_relocs);
311                         consumed = numrelocs * sizeof(struct nvhost_reloc);
312                         if (copy_from_user(&job->relocarray[job->num_relocs],
313                                         buf, consumed)) {
314                                 err = -EFAULT;
315                                 break;
316                         }
317                         while (numrelocs) {
318                                 struct nvhost_reloc *reloc =
319                                         &job->relocarray[job->num_relocs];
320                                 trace_nvhost_channel_write_reloc(chname,
321                                         reloc->cmdbuf_mem,
322                                         reloc->cmdbuf_offset,
323                                         reloc->target,
324                                         reloc->target_offset);
325                                 job->num_relocs++;
326                                 hdr->num_relocs--;
327                                 numrelocs--;
328                         }
329                 } else if (hdr->num_waitchks) {
330                         int numwaitchks =
331                                 (remaining / sizeof(struct nvhost_waitchk));
332                         if (!numwaitchks)
333                                 break;
334                         numwaitchks = min_t(int,
335                                 numwaitchks, hdr->num_waitchks);
336                         consumed = numwaitchks * sizeof(struct nvhost_waitchk);
337                         if (copy_from_user(&job->waitchk[job->num_waitchk],
338                                         buf, consumed)) {
339                                 err = -EFAULT;
340                                 break;
341                         }
342                         trace_nvhost_channel_write_waitchks(
343                           chname, numwaitchks);
344                         job->num_waitchk += numwaitchks;
345                         hdr->num_waitchks -= numwaitchks;
346                 } else if (priv->num_relocshifts) {
347                         int next_shift =
348                                 job->num_relocs - priv->num_relocshifts;
349                         int num =
350                                 (remaining / sizeof(struct nvhost_reloc_shift));
351                         if (!num)
352                                 break;
353                         num = min_t(int, num, priv->num_relocshifts);
354                         consumed = num * sizeof(struct nvhost_reloc_shift);
355                         if (copy_from_user(&job->relocshiftarray[next_shift],
356                                         buf, consumed)) {
357                                 err = -EFAULT;
358                                 break;
359                         }
360                         priv->num_relocshifts -= num;
361                 } else {
362                         err = -EFAULT;
363                         break;
364                 }
365                 remaining -= consumed;
366                 buf += consumed;
367         }
368
369         if (err < 0) {
370                 dev_err(&priv->ch->dev->dev, "channel write error\n");
371                 reset_submit(priv);
372                 return err;
373         }
374
375         return count - remaining;
376 }
377
378 static int nvhost_ioctl_channel_flush(
379         struct nvhost_channel_userctx *ctx,
380         struct nvhost_get_param_args *args,
381         int null_kickoff)
382 {
383         struct platform_device *ndev = to_platform_device(&ctx->ch->dev->dev);
384         int err;
385
386         trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name);
387
388         if (!ctx->job ||
389             ctx->hdr.num_relocs ||
390             ctx->hdr.num_cmdbufs ||
391             ctx->hdr.num_waitchks) {
392                 reset_submit(ctx);
393                 dev_err(&ndev->dev, "channel submit out of sync\n");
394                 return -EFAULT;
395         }
396
397         err = nvhost_job_pin(ctx->job, &nvhost_get_host(ndev)->syncpt);
398         if (err) {
399                 dev_warn(&ndev->dev, "nvhost_job_pin failed: %d\n", err);
400                 goto fail;
401         }
402
403         if (nvhost_debug_null_kickoff_pid == current->tgid)
404                 null_kickoff = 1;
405         ctx->job->null_kickoff = null_kickoff;
406
407         if ((nvhost_debug_force_timeout_pid == current->tgid) &&
408             (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
409                 ctx->timeout = nvhost_debug_force_timeout_val;
410         }
411
412         /* context switch if needed, and submit user's gathers to the channel */
413         err = nvhost_channel_submit(ctx->job);
414         args->value = ctx->job->sp->fence;
415
416 fail:
417         if (err)
418                 nvhost_job_unpin(ctx->job);
419
420         nvhost_job_put(ctx->job);
421         ctx->job = NULL;
422
423         return err;
424 }
425
426 static int nvhost_ioctl_channel_alloc_obj_ctx(
427         struct nvhost_channel_userctx *ctx,
428         struct nvhost_alloc_obj_ctx_args *args)
429 {
430         int ret;
431
432         BUG_ON(!channel_op().alloc_obj);
433         ret = channel_op().alloc_obj(ctx->hwctx, args);
434         return ret;
435 }
436
437 static int nvhost_ioctl_channel_free_obj_ctx(
438         struct nvhost_channel_userctx *ctx,
439         struct nvhost_free_obj_ctx_args *args)
440 {
441         int ret;
442
443         BUG_ON(!channel_op().free_obj);
444         ret = channel_op().free_obj(ctx->hwctx, args);
445         return ret;
446 }
447
448 static int nvhost_ioctl_channel_alloc_gpfifo(
449         struct nvhost_channel_userctx *ctx,
450         struct nvhost_alloc_gpfifo_args *args)
451 {
452         int ret;
453
454         BUG_ON(!channel_op().alloc_gpfifo);
455         ret = channel_op().alloc_gpfifo(ctx->hwctx, args);
456         return ret;
457 }
458
459 static int nvhost_ioctl_channel_submit_gpfifo(
460         struct nvhost_channel_userctx *ctx,
461         struct nvhost_submit_gpfifo_args *args)
462 {
463         struct nvhost_gpfifo *gpfifo;
464         u32 size = args->num_entries * sizeof(struct nvhost_gpfifo);
465         int ret = 0;
466
467         gpfifo = kzalloc(size, GFP_KERNEL);
468         if (IS_ERR_OR_NULL(gpfifo))
469                 return -ENOMEM;
470
471         if (copy_from_user(gpfifo, (void __user *)args->gpfifo, size)) {
472                 ret = -EINVAL;
473                 goto clean_up;
474         }
475
476         BUG_ON(!channel_op().submit_gpfifo);
477         ret = channel_op().submit_gpfifo(ctx->hwctx, gpfifo,
478                         args->num_entries, &args->fence, args->flags);
479 clean_up:
480         kfree(gpfifo);
481         return ret;
482 }
483
484 static int nvhost_ioctl_channel_map_buffer(
485         struct nvhost_channel_userctx *ctx,
486         struct nvhost_map_buffer_args *map_buffer_args)
487 {
488         int ret = 0;
489
490         BUG_ON(!channel_op().map_buffer);
491         ret = channel_op().map_buffer(ctx->hwctx, map_buffer_args);
492         return ret;
493 }
494
495 static int nvhost_ioctl_channel_unmap_buffer(
496         struct nvhost_channel_userctx *ctx,
497         struct nvhost_unmap_buffer_args *args)
498 {
499         int ret;
500
501         BUG_ON(!channel_op().unmap_buffer);
502         ret = channel_op().unmap_buffer(ctx->hwctx, args);
503         return ret;
504 }
505
506 static int nvhost_ioctl_channel_wait(
507         struct nvhost_channel_userctx *ctx,
508         struct nvhost_wait_args *args)
509 {
510         int ret;
511
512         BUG_ON(!channel_op().wait);
513         ret = channel_op().wait(ctx->hwctx, args);
514         return ret;
515 }
516
517 static int nvhost_ioctl_channel_zcull_get_size(
518         struct nvhost_channel_userctx *ctx,
519         struct nvhost_zcull_get_size_args *args)
520 {
521         int ret;
522
523         BUG_ON(!channel_zcull_op().get_size);
524         ret = channel_zcull_op().get_size(ctx->hwctx, args);
525         return ret;
526 }
527
528 static int nvhost_ioctl_channel_zcull_bind(
529         struct nvhost_channel_userctx *ctx,
530         struct nvhost_zcull_bind_args *args)
531 {
532         int ret;
533
534         BUG_ON(!channel_zcull_op().bind);
535         ret = channel_zcull_op().bind(ctx->hwctx, args);
536         return ret;
537 }
538
539 static int nvhost_ioctl_channel_zcull_get_info(
540         struct nvhost_channel_userctx *ctx,
541         struct nvhost_zcull_get_info_args *args)
542 {
543         int ret;
544
545         BUG_ON(!channel_zcull_op().get_info);
546         ret = channel_zcull_op().get_info(ctx->hwctx, args);
547         return ret;
548 }
549
550 static int nvhost_ioctl_channel_zbc_set_table(
551         struct nvhost_channel_userctx *ctx,
552         struct nvhost_zbc_set_table_args *args)
553 {
554         int ret;
555
556         BUG_ON(!channel_zbc_op().set_table);
557         ret = channel_zbc_op().set_table(ctx->hwctx, args);
558         return ret;
559 }
560
561 static int nvhost_ioctl_channel_zbc_query_table(
562         struct nvhost_channel_userctx *ctx,
563         struct nvhost_zbc_query_table_args *args)
564 {
565         int ret;
566
567         BUG_ON(!channel_zbc_op().query_table);
568         ret = channel_zbc_op().query_table(ctx->hwctx, args);
569         return ret;
570 }
571
572 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
573                 struct nvhost_submit_args *args)
574 {
575         struct nvhost_job *job;
576         int num_cmdbufs = args->num_cmdbufs;
577         int num_relocs = args->num_relocs;
578         int num_waitchks = args->num_waitchks;
579         int num_syncpt_incrs = args->num_syncpt_incrs;
580         struct nvhost_cmdbuf __user *cmdbufs =
581                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
582         struct nvhost_reloc __user *relocs =
583                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
584         struct nvhost_reloc_shift __user *reloc_shifts =
585                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
586         struct nvhost_waitchk __user *waitchks =
587                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
588         struct nvhost_syncpt_incr __user *syncpt_incrs =
589                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
590         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
591         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
592
593         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
594         u32 *local_waitbases = NULL;
595         int err, i, hwctx_syncpt_idx = -1;
596
597         if (num_syncpt_incrs > host->info.nb_pts)
598                 return -EINVAL;
599
600         job = nvhost_job_alloc(ctx->ch,
601                         ctx->hwctx,
602                         num_cmdbufs,
603                         num_relocs,
604                         num_waitchks,
605                         num_syncpt_incrs,
606                         ctx->memmgr);
607         if (!job)
608                 return -ENOMEM;
609
610         job->num_relocs = args->num_relocs;
611         job->num_waitchk = args->num_waitchks;
612         job->num_syncpts = args->num_syncpt_incrs;
613         job->priority = ctx->priority;
614         job->clientid = ctx->clientid;
615
616         while (num_cmdbufs) {
617                 struct nvhost_cmdbuf cmdbuf;
618                 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
619                 if (err)
620                         goto fail;
621                 nvhost_job_add_gather(job,
622                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
623                 num_cmdbufs--;
624                 cmdbufs++;
625         }
626
627         err = copy_from_user(job->relocarray,
628                         relocs, sizeof(*relocs) * num_relocs);
629         if (err)
630                 goto fail;
631
632         err = copy_from_user(job->relocshiftarray,
633                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
634         if (err)
635                 goto fail;
636
637         err = copy_from_user(job->waitchk,
638                         waitchks, sizeof(*waitchks) * num_waitchks);
639         if (err)
640                 goto fail;
641
642         /* mass copy waitbases */
643         if (args->waitbases) {
644                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
645                         GFP_KERNEL);
646                 err = copy_from_user(local_waitbases, waitbases,
647                         sizeof(u32) * num_syncpt_incrs);
648                 if (err) {
649                         err = -EINVAL;
650                         goto fail;
651                 }
652         }
653
654         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
655         if (!ctx->hwctx)
656                 hwctx_syncpt_idx = 0;
657
658         /*
659          * Go through each syncpoint from userspace. Here we:
660          * - Copy syncpoint information
661          * - Validate each syncpoint
662          * - Determine waitbase for each syncpoint
663          * - Determine the index of hwctx syncpoint in the table
664          */
665
666         for (i = 0; i < num_syncpt_incrs; ++i) {
667                 u32 waitbase;
668                 struct nvhost_syncpt_incr sp;
669
670                 /* Copy */
671                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
672                 if (err)
673                         goto fail;
674
675                 /* Validate */
676                 if (sp.syncpt_id > host->info.nb_pts) {
677                         err = -EINVAL;
678                         goto fail;
679                 }
680
681                 /* Determine waitbase */
682                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
683                         waitbase = local_waitbases[i];
684                 else
685                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
686                                 sp.syncpt_id);
687
688                 /* Store */
689                 job->sp[i].id = sp.syncpt_id;
690                 job->sp[i].incrs = sp.syncpt_incrs;
691                 job->sp[i].waitbase = waitbase;
692
693                 /* Find hwctx syncpoint */
694                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
695                         hwctx_syncpt_idx = i;
696         }
697
698         /* not needed anymore */
699         kfree(local_waitbases);
700         local_waitbases = NULL;
701
702         /* Is hwctx_syncpt_idx valid? */
703         if (hwctx_syncpt_idx == -1) {
704                 err = -EINVAL;
705                 goto fail;
706         }
707
708         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
709
710         trace_nvhost_channel_submit(ctx->ch->dev->name,
711                 job->num_gathers, job->num_relocs, job->num_waitchk,
712                 job->sp[job->hwctx_syncpt_idx].id,
713                 job->sp[job->hwctx_syncpt_idx].incrs);
714
715         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
716         if (err)
717                 goto fail;
718
719         if (args->timeout)
720                 job->timeout = min(ctx->timeout, args->timeout);
721         else
722                 job->timeout = ctx->timeout;
723         job->timeout_debug_dump = ctx->timeout_debug_dump;
724
725         err = nvhost_channel_submit(job);
726         if (err)
727                 goto fail_submit;
728
729         /* Deliver multiple fences back to the userspace */
730         if (fences)
731                 for (i = 0; i < num_syncpt_incrs; ++i) {
732                         u32 fence = job->sp[i].fence;
733                         err = copy_to_user(fences, &fence, sizeof(u32));
734                         if (err)
735                                 break;
736                         fences++;
737                 }
738
739         args->fence = job->sp[job->hwctx_syncpt_idx].fence;
740
741         nvhost_job_put(job);
742
743         return 0;
744
745 fail_submit:
746         nvhost_job_unpin(job);
747 fail:
748         nvhost_job_put(job);
749         kfree(local_waitbases);
750         return err;
751 }
752
753 static int nvhost_ioctl_channel_set_ctxswitch(
754                 struct nvhost_channel_userctx *ctx,
755                 struct nvhost_set_ctxswitch_args *args)
756 {
757         struct nvhost_cmdbuf cmdbuf_save;
758         struct nvhost_cmdbuf cmdbuf_restore;
759         struct nvhost_syncpt_incr save_incr, restore_incr;
760         u32 save_waitbase, restore_waitbase;
761         struct nvhost_reloc reloc;
762         struct nvhost_hwctx_handler *ctxhandler = NULL;
763         struct nvhost_hwctx *nhwctx = NULL;
764         struct user_hwctx *hwctx;
765         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
766         int err;
767
768         /* Only channels with context support */
769         if (!ctx->hwctx)
770                 return -EFAULT;
771
772         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
773         if (args->num_cmdbufs_save != 1
774                         || args->num_cmdbufs_restore != 1
775                         || args->num_save_incrs != 1
776                         || args->num_restore_incrs != 1
777                         || args->num_relocs != 1)
778                 return -EINVAL;
779
780         err = copy_from_user(&cmdbuf_save,
781                         (void *)(uintptr_t)args->cmdbuf_save,
782                         sizeof(cmdbuf_save));
783         if (err)
784                 goto fail;
785
786         err = copy_from_user(&cmdbuf_restore,
787                         (void *)(uintptr_t)args->cmdbuf_restore,
788                         sizeof(cmdbuf_restore));
789         if (err)
790                 goto fail;
791
792         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
793                         sizeof(reloc));
794         if (err)
795                 goto fail;
796
797         err = copy_from_user(&save_incr,
798                         (void *)(uintptr_t)args->save_incrs,
799                         sizeof(save_incr));
800         if (err)
801                 goto fail;
802         err = copy_from_user(&save_waitbase,
803                         (void *)(uintptr_t)args->save_waitbases,
804                         sizeof(save_waitbase));
805
806         err = copy_from_user(&restore_incr,
807                         (void *)(uintptr_t)args->restore_incrs,
808                         sizeof(restore_incr));
809         if (err)
810                 goto fail;
811         err = copy_from_user(&restore_waitbase,
812                         (void *)(uintptr_t)args->restore_waitbases,
813                         sizeof(restore_waitbase));
814
815         if (save_incr.syncpt_id != pdata->syncpts[0]
816                         || restore_incr.syncpt_id != pdata->syncpts[0]
817                         || save_waitbase != pdata->waitbases[0]
818                         || restore_waitbase != pdata->waitbases[0]) {
819                 err = -EINVAL;
820                 goto fail;
821         }
822         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
823                         save_waitbase, ctx->ch);
824         if (!ctxhandler) {
825                 err = -ENOMEM;
826                 goto fail;
827         }
828
829         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
830         if (!nhwctx) {
831                 err = -ENOMEM;
832                 goto fail_hwctx;
833         }
834         hwctx = to_user_hwctx(nhwctx);
835
836         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
837                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
838                         cmdbuf_restore.mem, cmdbuf_restore.offset,
839                         cmdbuf_restore.words,
840                         pdata->syncpts[0], pdata->waitbases[0],
841                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
842
843         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
844         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
845                         cmdbuf_restore.offset, cmdbuf_restore.words);
846         if (err)
847                 goto fail_set_restore;
848
849         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
850                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
851         if (err)
852                 goto fail_set_save;
853
854         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
855         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
856
857         /* Free old context */
858         ctx->hwctx->h->put(ctx->hwctx);
859         ctx->hwctx = nhwctx;
860
861         return 0;
862
863 fail_set_save:
864 fail_set_restore:
865         ctxhandler->put(&hwctx->hwctx);
866 fail_hwctx:
867         user_ctxhandler_free(ctxhandler);
868 fail:
869         return err;
870 }
871
872 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
873         struct nvhost_read_3d_reg_args *args)
874 {
875         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
876                         args->offset, &args->value);
877 }
878
879 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
880 {
881         int i;
882         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
883
884         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
885                 if (pdata->clocks[i].moduleid == moduleid)
886                         return i;
887         }
888
889         /* Old user space is sending a random number in args. Return clock
890          * zero in these cases. */
891         return 0;
892 }
893
894 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
895         struct nvhost_clk_rate_args *arg)
896 {
897         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
898                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
899         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
900                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
901         int index = moduleid ?
902                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
903
904         return nvhost_module_set_rate(ctx->ch->dev,
905                         ctx, arg->rate, index, attr);
906 }
907
908 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
909         u32 moduleid, u32 *rate)
910 {
911         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
912
913         return nvhost_module_get_rate(ctx->ch->dev,
914                         (unsigned long *)rate, index);
915 }
916
917 static int nvhost_ioctl_channel_module_regrdwr(
918         struct nvhost_channel_userctx *ctx,
919         struct nvhost_ctrl_module_regrdwr_args *args)
920 {
921         u32 num_offsets = args->num_offsets;
922         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
923         u32 __user *values = (u32 *)(uintptr_t)args->values;
924         u32 vals[64];
925         struct platform_device *ndev;
926
927         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
928                 args->num_offsets, args->write);
929
930         /* Check that there is something to read and that block size is
931          * u32 aligned */
932         if (num_offsets == 0 || args->block_size & 3)
933                 return -EINVAL;
934
935         ndev = ctx->ch->dev;
936
937         while (num_offsets--) {
938                 int err;
939                 u32 offs;
940                 int remaining = args->block_size >> 2;
941
942                 if (get_user(offs, offsets))
943                         return -EFAULT;
944
945                 offsets++;
946                 while (remaining) {
947                         int batch = min(remaining, 64);
948                         if (args->write) {
949                                 if (copy_from_user(vals, values,
950                                                 batch * sizeof(u32)))
951                                         return -EFAULT;
952
953                                 err = nvhost_write_module_regs(ndev,
954                                         offs, batch, vals);
955                                 if (err)
956                                         return err;
957                         } else {
958                                 err = nvhost_read_module_regs(ndev,
959                                                 offs, batch, vals);
960                                 if (err)
961                                         return err;
962
963                                 if (copy_to_user(values, vals,
964                                                 batch * sizeof(u32)))
965                                         return -EFAULT;
966                         }
967
968                         remaining -= batch;
969                         offs += batch * sizeof(u32);
970                         values += batch;
971                 }
972         }
973
974         return 0;
975 }
976
977 static u32 create_mask(u32 *words, int num)
978 {
979         int i;
980         u32 word = 0;
981         for (i = 0; i < num && words[i] && words[i] < BITS_PER_LONG; i++)
982                 word |= BIT(words[i]);
983
984         return word;
985 }
986
987 static long nvhost_channelctl(struct file *filp,
988         unsigned int cmd, unsigned long arg)
989 {
990         struct nvhost_channel_userctx *priv = filp->private_data;
991         struct device *dev = &priv->ch->dev->dev;
992         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
993         int err = 0;
994
995         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
996                 (_IOC_NR(cmd) == 0) ||
997                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
998                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
999                 return -EFAULT;
1000
1001         if (_IOC_DIR(cmd) & _IOC_WRITE) {
1002                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1003                         return -EFAULT;
1004         }
1005
1006         switch (cmd) {
1007         case NVHOST_IOCTL_CHANNEL_FLUSH:
1008                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
1009                 break;
1010         case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
1011                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
1012                 break;
1013         case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
1014         {
1015                 struct nvhost_submit_hdr_ext *hdr;
1016
1017                 if (priv->hdr.num_relocs ||
1018                     priv->num_relocshifts ||
1019                     priv->hdr.num_cmdbufs ||
1020                     priv->hdr.num_waitchks) {
1021                         reset_submit(priv);
1022                         dev_err(&priv->ch->dev->dev,
1023                                 "channel submit out of sync\n");
1024                         err = -EIO;
1025                         break;
1026                 }
1027
1028                 hdr = (struct nvhost_submit_hdr_ext *)buf;
1029                 if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
1030                         dev_err(&priv->ch->dev->dev,
1031                                 "submit version %d > max supported %d\n",
1032                                 hdr->submit_version,
1033                                 NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
1034                         err = -EINVAL;
1035                         break;
1036                 }
1037                 memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
1038                 err = set_submit(priv);
1039                 trace_nvhost_ioctl_channel_submit(priv->ch->dev->name,
1040                         priv->hdr.submit_version,
1041                         priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
1042                         priv->hdr.num_waitchks,
1043                         priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
1044                 break;
1045         }
1046         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
1047         {
1048                 struct nvhost_device_data *pdata = \
1049                         platform_get_drvdata(priv->ch->dev);
1050                 ((struct nvhost_get_param_args *)buf)->value =
1051                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
1052                 break;
1053         }
1054         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
1055         {
1056                 struct nvhost_device_data *pdata = \
1057                         platform_get_drvdata(priv->ch->dev);
1058                 struct nvhost_get_param_arg *arg =
1059                         (struct nvhost_get_param_arg *)buf;
1060                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
1061                                 || !pdata->syncpts[arg->param])
1062                         return -EINVAL;
1063                 arg->value = pdata->syncpts[arg->param];
1064                 break;
1065         }
1066         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
1067         {
1068                 struct nvhost_device_data *pdata = \
1069                         platform_get_drvdata(priv->ch->dev);
1070                 ((struct nvhost_get_param_args *)buf)->value =
1071                         create_mask(pdata->waitbases,
1072                                         NVHOST_MODULE_MAX_WAITBASES);
1073                 break;
1074         }
1075         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
1076         {
1077                 struct nvhost_device_data *pdata = \
1078                         platform_get_drvdata(priv->ch->dev);
1079                 struct nvhost_get_param_arg *arg =
1080                         (struct nvhost_get_param_arg *)buf;
1081                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
1082                                 || !pdata->waitbases[arg->param])
1083                         return -EINVAL;
1084                 arg->value = pdata->waitbases[arg->param];
1085                 break;
1086         }
1087         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
1088         {
1089                 struct nvhost_device_data *pdata = \
1090                         platform_get_drvdata(priv->ch->dev);
1091                 ((struct nvhost_get_param_args *)buf)->value =
1092                         create_mask(pdata->modulemutexes,
1093                                         NVHOST_MODULE_MAX_MODMUTEXES);
1094                 break;
1095         }
1096         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
1097         {
1098                 struct nvhost_device_data *pdata = \
1099                         platform_get_drvdata(priv->ch->dev);
1100                 struct nvhost_get_param_arg *arg =
1101                         (struct nvhost_get_param_arg *)buf;
1102                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
1103                                 || !pdata->modulemutexes[arg->param])
1104                         return -EINVAL;
1105                 arg->value = pdata->modulemutexes[arg->param];
1106                 break;
1107         }
1108         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
1109         {
1110                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
1111                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
1112
1113                 if (IS_ERR(new_client)) {
1114                         err = PTR_ERR(new_client);
1115                         break;
1116                 }
1117                 if (priv->memmgr)
1118                         nvhost_memmgr_put_mgr(priv->memmgr);
1119
1120                 priv->memmgr = new_client;
1121
1122                 if (priv->hwctx)
1123                         priv->hwctx->memmgr = new_client;
1124
1125                 break;
1126         }
1127         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
1128                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
1129                 break;
1130         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
1131                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
1132                 break;
1133         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
1134                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
1135                 break;
1136         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
1137                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
1138                 break;
1139         case NVHOST_IOCTL_CHANNEL_MAP_BUFFER:
1140                 err = nvhost_ioctl_channel_map_buffer(priv, (void *)buf);
1141                 break;
1142         case NVHOST_IOCTL_CHANNEL_UNMAP_BUFFER:
1143                 err = nvhost_ioctl_channel_unmap_buffer(priv, (void *)buf);
1144                 break;
1145         case NVHOST_IOCTL_CHANNEL_WAIT:
1146                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
1147                 break;
1148         case NVHOST_IOCTL_CHANNEL_ZCULL_GET_SIZE:
1149                 err = nvhost_ioctl_channel_zcull_get_size(priv, (void *)buf);
1150                 break;
1151         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
1152                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
1153                 break;
1154         case NVHOST_IOCTL_CHANNEL_ZCULL_GET_INFO:
1155                 err = nvhost_ioctl_channel_zcull_get_info(priv, (void *)buf);
1156                 break;
1157         case NVHOST_IOCTL_CHANNEL_ZBC_SET_TABLE:
1158                 err = nvhost_ioctl_channel_zbc_set_table(priv, (void *)buf);
1159                 break;
1160         case NVHOST_IOCTL_CHANNEL_ZBC_QUERY_TABLE:
1161                 err = nvhost_ioctl_channel_zbc_query_table(priv, (void *)buf);
1162                 break;
1163         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
1164                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
1165                 break;
1166         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
1167         {
1168                 struct nvhost_clk_rate_args *arg =
1169                                 (struct nvhost_clk_rate_args *)buf;
1170
1171                 err = nvhost_ioctl_channel_get_rate(priv,
1172                                 arg->moduleid, &arg->rate);
1173                 break;
1174         }
1175         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
1176         {
1177                 struct nvhost_clk_rate_args *arg =
1178                                 (struct nvhost_clk_rate_args *)buf;
1179
1180                 err = nvhost_ioctl_channel_set_rate(priv, arg);
1181                 break;
1182         }
1183         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1184                 priv->timeout =
1185                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1186                 dev_dbg(&priv->ch->dev->dev,
1187                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1188                         __func__, priv->timeout, priv);
1189                 break;
1190         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1191                 ((struct nvhost_get_param_args *)buf)->value =
1192                                 priv->hwctx->has_timedout;
1193                 break;
1194         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1195                 priv->priority =
1196                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
1197                 break;
1198         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1199                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1200                 break;
1201         case NVHOST_IOCTL_CHANNEL_SUBMIT:
1202                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1203                 break;
1204         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1205                 priv->timeout = (u32)
1206                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
1207                 priv->timeout_debug_dump = !((u32)
1208                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1209                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1210                 dev_dbg(&priv->ch->dev->dev,
1211                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1212                         __func__, priv->timeout, priv);
1213                 break;
1214         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
1215                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
1216                 break;
1217         default:
1218                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
1219                 err = -ENOTTY;
1220                 break;
1221         }
1222
1223         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1224                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1225
1226         return err;
1227 }
1228
1229 static const struct file_operations nvhost_channelops = {
1230         .owner = THIS_MODULE,
1231         .release = nvhost_channelrelease,
1232         .open = nvhost_channelopen,
1233         .write = nvhost_channelwrite,
1234         .unlocked_ioctl = nvhost_channelctl
1235 };
1236
1237 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
1238 {
1239         struct nvhost_channel_userctx *userctx;
1240         struct file *f = fget(fd);
1241         if (!f)
1242                 return 0;
1243
1244         if (f->f_op != &nvhost_channelops) {
1245                 fput(f);
1246                 return 0;
1247         }
1248
1249         userctx = (struct nvhost_channel_userctx *)f->private_data;
1250         fput(f);
1251         return userctx->hwctx;
1252 }
1253
1254
1255 static const struct file_operations nvhost_asops = {
1256         .owner = THIS_MODULE,
1257         .release = nvhost_as_dev_release,
1258         .open = nvhost_as_dev_open,
1259         .unlocked_ioctl = nvhost_as_dev_ctl,
1260 };
1261
1262 static struct {
1263         int class_id;
1264         const char *dev_name;
1265 } class_id_dev_name_map[] = {
1266         /*      { NV_HOST1X_CLASS_ID, ""}, */
1267         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1268         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1269         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1270         { NV_GRAPHICS_GPU_CLASS_ID, "gr3d"},  /* TBD: move to "gpu" */
1271         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1272         { NV_TSEC_CLASS_ID, "tsec" },
1273 };
1274
1275 static struct {
1276         int module_id;
1277         const char *dev_name;
1278 } module_id_dev_name_map[] = {
1279         { NVHOST_MODULE_VI, "vi"},
1280         { NVHOST_MODULE_ISP, "isp"},
1281         { NVHOST_MODULE_MPE, "mpe"},
1282         { NVHOST_MODULE_MSENC, "msenc"},
1283         { NVHOST_MODULE_TSEC, "tsec"},
1284         { NVHOST_MODULE_GPU, "gpu"},
1285         { NVHOST_MODULE_VIC, "vic"},
1286 };
1287
1288 static const char *get_device_name_for_dev(struct nvhost_device *dev)
1289 {
1290         int i;
1291         /* first choice is to use the class id if specified */
1292         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++)
1293                 if (dev->class == class_id_dev_name_map[i].class_id)
1294                         return class_id_dev_name_map[i].dev_name;
1295
1296         /* second choice is module name if specified */
1297         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++)
1298                 if (dev->moduleid == module_id_dev_name_map[i].module_id)
1299                         return module_id_dev_name_map[i].dev_name;
1300
1301
1302         /* last choice is to just use the given dev name */
1303         return dev->name;
1304 }
1305
1306 int nvhost_client_user_init(struct platform_device *dev)
1307 {
1308         int err, devno;
1309         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1310
1311         struct nvhost_channel *ch = pdata->channel;
1312         struct nvhost_master *host = nvhost_get_host(dev);
1313         const char *use_dev_name;
1314
1315         BUG_ON(!ch);
1316         BUG_ON(!host);
1317         // reserve 2 minor #s for both <dev> and as-<dev>
1318         err = alloc_chrdev_region(&devno, 0, 2, IFACE_NAME);
1319         if (err < 0) {
1320                 dev_err(&dev->dev, "failed to allocate devno\n");
1321                 goto fail;
1322         }
1323
1324         cdev_init(&ch->cdev, &nvhost_channelops);
1325         ch->cdev.owner = THIS_MODULE;
1326
1327         err = cdev_add(&ch->cdev, devno, 1);
1328         if (err < 0) {
1329                 dev_err(&dev->dev,
1330                         "failed to add chan %i cdev\n", pdata->index);
1331                 goto fail;
1332         }
1333         use_dev_name = get_device_name_for_dev(dev);
1334
1335         ch->node = device_create(nvhost_get_host(dev)->nvhost_class,
1336                                  NULL, devno, NULL,
1337                                  (dev->id == 0) ?
1338                                  IFACE_NAME "-%s" :
1339                                  IFACE_NAME "-%s.%d",
1340                                  use_dev_name, dev->id);
1341
1342         if (IS_ERR(ch->node)) {
1343                 err = PTR_ERR(ch->node);
1344                 dev_err(&dev->dev,
1345                         "failed to create %s channel device for %s\n",
1346                         use_dev_name, dev->name);
1347                 goto fail;
1348         }
1349
1350         /* do the same as above for the address space driver */
1351         cdev_init(&ch->as_cdev, &nvhost_asops);
1352         ch->as_cdev.owner = THIS_MODULE;
1353
1354         ++ devno; // create a new minor for as-<dev>
1355         err = cdev_add(&ch->as_cdev, devno, 1);
1356         if (err < 0) {
1357                 dev_err(&pdata->dev,
1358                         "failed to add chan %i as_cdev\n", pdata->index);
1359                 goto fail;
1360         }
1361         ch->as_node = device_create(host->nvhost_class, NULL, devno, NULL,
1362                                     (dev->id == 0) ?
1363                                     IFACE_NAME "-as-%s" :
1364                                     IFACE_NAME "-as-%s.%d",
1365                                     use_dev_name, pdata->id);
1366         if (IS_ERR(ch->as_node)) {
1367                 err = PTR_ERR(ch->as_node);
1368                 dev_err(&pdata->dev,
1369                         "failed to create chan aspace %i device\n", pdata->index);
1370                 goto fail;
1371         }
1372
1373         return 0;
1374 fail:
1375         return err;
1376 }
1377
1378 int nvhost_client_device_init(struct platform_device *dev)
1379 {
1380         int err;
1381         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1382         struct nvhost_channel *ch;
1383         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1384
1385         ch = nvhost_alloc_channel(dev);
1386         if (ch == NULL)
1387                 return -ENODEV;
1388
1389         /* store the pointer to this device for channel */
1390         ch->dev = dev;
1391
1392         /* Create debugfs directory for the device */
1393         nvhost_device_debug_init(dev);
1394
1395         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1396         if (err)
1397                 goto fail;
1398
1399         err = nvhost_client_user_init(dev);
1400         if (err)
1401                 goto fail;
1402
1403         if (tickctrl_op().init_channel)
1404                 tickctrl_op().init_channel(dev);
1405
1406         err = nvhost_device_list_add(dev);
1407         if (err)
1408                 goto fail;
1409
1410         if (pdata->scaling_init)
1411                 pdata->scaling_init(dev);
1412
1413         /* reset syncpoint values for this unit */
1414         nvhost_module_busy(nvhost_master->dev);
1415         nvhost_syncpt_reset_client(dev);
1416         nvhost_module_idle(nvhost_master->dev);
1417
1418         dev_info(&dev->dev, "initialized\n");
1419
1420         return 0;
1421
1422 fail:
1423         /* Add clean-up */
1424         nvhost_free_channel(ch);
1425         return err;
1426 }
1427 EXPORT_SYMBOL(nvhost_client_device_init);
1428
1429 int nvhost_client_device_release(struct platform_device *dev)
1430 {
1431         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1432         struct nvhost_channel *ch;
1433         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1434
1435         ch = pdata->channel;
1436
1437         /* Release nvhost module resources */
1438         nvhost_module_deinit(dev);
1439
1440         /* Remove from nvhost device list */
1441         nvhost_device_list_remove(dev);
1442
1443         /* Release chardev and device node for user space */
1444         device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1445         cdev_del(&ch->cdev);
1446
1447         /* Free nvhost channel */
1448         nvhost_free_channel(ch);
1449
1450         return 0;
1451 }
1452 EXPORT_SYMBOL(nvhost_client_device_release);
1453
1454 int nvhost_client_device_suspend(struct device *dev)
1455 {
1456         int ret = 0;
1457         struct nvhost_device_data *pdata = dev_get_drvdata(dev);
1458
1459         ret = nvhost_channel_suspend(pdata->channel);
1460         if (ret)
1461                 return ret;
1462
1463         dev_info(dev, "suspend status: %d\n", ret);
1464
1465         return ret;
1466 }
1467
1468 int nvhost_client_device_resume(struct device *dev)
1469 {
1470         dev_info(dev, "resuming\n");
1471         return 0;
1472 }
1473
1474 int nvhost_client_device_get_resources(struct platform_device *dev)
1475 {
1476         struct resource *r[NVHOST_MODULE_MAX_IORESOURCE_MEM];
1477         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1478         int n;
1479
1480         if (dev->num_resources > NVHOST_MODULE_MAX_IORESOURCE_MEM) {
1481                 dev_err(&dev->dev, "too many io mem resources: %d, max is %d\n",
1482                         dev->num_resources, NVHOST_MODULE_MAX_IORESOURCE_MEM);
1483                 return -ENOMEM;
1484         }
1485
1486         for (n = 0; n < NVHOST_MODULE_MAX_IORESOURCE_MEM; n++)
1487                 r[n] = NULL;
1488
1489         for (n = 0; n < dev->num_resources; n++ ) {
1490                 r[n] = platform_get_resource(dev, IORESOURCE_MEM, n);
1491                 if (!r[n])
1492                         goto fail;
1493
1494                 pdata->reg_mem[n] = request_mem_region(r[n]->start,
1495                                                      resource_size(r[n]),
1496                                                      dev_name(&dev->dev));
1497                 if (!pdata->reg_mem[n])
1498                         goto fail;
1499
1500                 pdata->aperture[n] = ioremap(r[n]->start, resource_size(r[n]));
1501                 if (!pdata->aperture[n])
1502                         goto fail;
1503         }
1504
1505         return 0;
1506
1507 fail:
1508         for (n = 0; n < dev->num_resources; n++ ) {
1509                 if (r[n]) {
1510                         if (pdata->aperture[n])
1511                                 iounmap(dev->aperture[n]);
1512                         if (pdata->reg_mem[n])
1513                                 release_mem_region(r[n]->start, resource_size(r[n]));
1514                 }
1515         }
1516         dev_err(&dev->dev, "failed to get register memory\n");
1517         return -ENXIO;
1518 }
1519 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1520
1521 /* This is a simple wrapper around request_firmware that takes
1522  * 'fw_name' and if available applies a SOC relative path prefix to it.
1523  * The caller is responsible for calling release_firmware later.
1524  */
1525 const struct firmware *
1526 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1527 {
1528         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1529         const struct firmware *fw;
1530         char *fw_path = NULL;
1531         int path_len, err;
1532
1533         if (!fw_name)
1534                 return NULL;
1535
1536         if (op->soc_name) {
1537                 path_len = strlen(fw_name) + strlen(op->soc_name);
1538                 path_len += 2; /* for the path separator and zero terminator*/
1539
1540                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1541                                      GFP_KERNEL);
1542                 if (!fw_path)
1543                         return NULL;
1544
1545                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1546                 fw_name = fw_path;
1547         }
1548
1549         err = request_firmware(&fw, fw_name, &dev->dev);
1550         kfree(fw_path);
1551         if (err) {
1552                 dev_err(&dev->dev, "failed to get firmware\n");
1553                 return NULL;
1554         }
1555
1556         /* note: caller must release_firmware */
1557         return fw;
1558 }