video: tegra: host: Use dma_map_sg()
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * drivers/video/tegra/host/bus_client.c
3  *
4  * Tegra Graphics Host Client Module
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/spinlock.h>
24 #include <linux/fs.h>
25 #include <linux/cdev.h>
26 #include <linux/uaccess.h>
27 #include <linux/file.h>
28 #include <linux/clk.h>
29 #include <linux/hrtimer.h>
30 #include <linux/export.h>
31 #include <linux/firmware.h>
32 #include <linux/dma-mapping.h>
33
34 #include <trace/events/nvhost.h>
35
36 #include <linux/io.h>
37 #include <linux/string.h>
38
39 #include <linux/nvhost.h>
40 #include <linux/nvhost_ioctl.h>
41
42 #include <mach/gpufuse.h>
43 #include <mach/hardware.h>
44
45 #include "debug.h"
46 #include "bus_client.h"
47 #include "dev.h"
48 #include "class_ids.h"
49 #include "nvhost_as.h"
50 #include "nvhost_memmgr.h"
51 #include "chip_support.h"
52 #include "nvhost_acm.h"
53
54 #include "nvhost_syncpt.h"
55 #include "nvhost_channel.h"
56 #include "nvhost_job.h"
57 #include "nvhost_hwctx.h"
58 #include "user_hwctx.h"
59
60 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
61 {
62         struct resource *r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
63         int err = 0;
64
65         if (offset + 4 * count > resource_size(r)
66                         || (offset + 4 * count < offset))
67                 err = -EPERM;
68
69         return err;
70 }
71
72 int nvhost_read_module_regs(struct platform_device *ndev,
73                         u32 offset, int count, u32 *values)
74 {
75         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
76         void __iomem *p = pdata->aperture[0] + offset;
77         int err;
78
79         if (!pdata->aperture[0])
80                 return -ENODEV;
81
82         /* verify offset */
83         err = validate_reg(ndev, offset, count);
84         if (err)
85                 return err;
86
87         nvhost_module_busy(ndev);
88         while (count--) {
89                 *(values++) = readl(p);
90                 p += 4;
91         }
92         rmb();
93         nvhost_module_idle(ndev);
94
95         return 0;
96 }
97
98 int nvhost_write_module_regs(struct platform_device *ndev,
99                         u32 offset, int count, const u32 *values)
100 {
101         void __iomem *p;
102         int err;
103         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
104
105         if (!pdata->aperture[0])
106                 return -ENODEV;
107
108         p = pdata->aperture[0] + offset;
109
110         /* verify offset */
111         err = validate_reg(ndev, offset, count);
112         if (err)
113                 return err;
114
115         nvhost_module_busy(ndev);
116         while (count--) {
117                 writel(*(values++), p);
118                 p += 4;
119         }
120         wmb();
121         nvhost_module_idle(ndev);
122
123         return 0;
124 }
125
126 struct nvhost_channel_userctx {
127         struct nvhost_channel *ch;
128         struct nvhost_hwctx *hwctx;
129         struct nvhost_submit_hdr_ext hdr;
130         int num_relocshifts;
131         struct nvhost_job *job;
132         struct mem_mgr *memmgr;
133         u32 timeout;
134         u32 priority;
135         int clientid;
136         bool timeout_debug_dump;
137 };
138
139 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
140 {
141         struct nvhost_channel_userctx *priv = filp->private_data;
142
143         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
144
145         filp->private_data = NULL;
146
147         nvhost_module_remove_client(priv->ch->dev, priv);
148
149         if (priv->hwctx) {
150                 struct nvhost_channel *ch = priv->ch;
151                 struct nvhost_hwctx *ctx = priv->hwctx;
152
153                 mutex_lock(&ch->submitlock);
154                 if (ch->cur_ctx == ctx)
155                         ch->cur_ctx = NULL;
156                 mutex_unlock(&ch->submitlock);
157
158                 priv->hwctx->h->put(priv->hwctx);
159         }
160
161         if (priv->job)
162                 nvhost_job_put(priv->job);
163
164         nvhost_putchannel(priv->ch);
165
166         nvhost_memmgr_put_mgr(priv->memmgr);
167         kfree(priv);
168         return 0;
169 }
170
171 static int nvhost_channelopen(struct inode *inode, struct file *filp)
172 {
173         struct nvhost_channel_userctx *priv;
174         struct nvhost_channel *ch;
175         struct nvhost_device_data *pdata;
176
177         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
178         ch = nvhost_getchannel(ch);
179         if (!ch)
180                 return -ENOMEM;
181         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
182
183         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
184         if (!priv) {
185                 nvhost_putchannel(ch);
186                 return -ENOMEM;
187         }
188         filp->private_data = priv;
189         priv->ch = ch;
190         if(nvhost_module_add_client(ch->dev, priv))
191                 goto fail;
192
193         if (ch->ctxhandler && ch->ctxhandler->alloc) {
194                 nvhost_module_busy(ch->dev);
195                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
196                 nvhost_module_idle(ch->dev);
197                 if (!priv->hwctx)
198                         goto fail;
199         }
200         priv->priority = NVHOST_PRIORITY_MEDIUM;
201         priv->clientid = atomic_add_return(1,
202                         &nvhost_get_host(ch->dev)->clientid);
203         pdata = platform_get_drvdata(ch->dev);
204         priv->timeout = pdata->nvhost_timeout_default;
205         priv->timeout_debug_dump = true;
206         if (!tegra_platform_is_silicon())
207                 priv->timeout = 0;
208
209         return 0;
210 fail:
211         nvhost_channelrelease(inode, filp);
212         return -ENOMEM;
213 }
214
215 static int set_submit(struct nvhost_channel_userctx *ctx)
216 {
217         struct platform_device *ndev = ctx->ch->dev;
218         struct nvhost_master *host = nvhost_get_host(ndev);
219
220         /* submit should have at least 1 cmdbuf */
221         if (!ctx->hdr.num_cmdbufs ||
222                         !nvhost_syncpt_is_valid(&host->syncpt,
223                                 ctx->hdr.syncpt_id))
224                 return -EIO;
225
226         if (!ctx->memmgr) {
227                 dev_err(&ndev->dev, "no nvmap context set\n");
228                 return -EFAULT;
229         }
230
231         if (ctx->job) {
232                 dev_warn(&ndev->dev, "performing channel submit when a job already exists\n");
233                 nvhost_job_put(ctx->job);
234         }
235         ctx->job = nvhost_job_alloc(ctx->ch,
236                         ctx->hwctx,
237                         ctx->hdr.num_cmdbufs,
238                         ctx->hdr.num_relocs,
239                         ctx->hdr.num_waitchks,
240                         1,
241                         ctx->memmgr);
242         if (!ctx->job)
243                 return -ENOMEM;
244         ctx->job->timeout = ctx->timeout;
245         ctx->job->sp->id = ctx->hdr.syncpt_id;
246         ctx->job->sp->incrs = ctx->hdr.syncpt_incrs;
247         ctx->job->hwctx_syncpt_idx = 0;
248         ctx->job->num_syncpts = 1;
249         ctx->job->priority = ctx->priority;
250         ctx->job->clientid = ctx->clientid;
251         ctx->job->timeout_debug_dump = ctx->timeout_debug_dump;
252
253         if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
254                 ctx->num_relocshifts = ctx->hdr.num_relocs;
255
256         return 0;
257 }
258
259 static void reset_submit(struct nvhost_channel_userctx *ctx)
260 {
261         ctx->hdr.num_cmdbufs = 0;
262         ctx->hdr.num_relocs = 0;
263         ctx->num_relocshifts = 0;
264         ctx->hdr.num_waitchks = 0;
265
266         if (ctx->job) {
267                 nvhost_job_put(ctx->job);
268                 ctx->job = NULL;
269         }
270 }
271
272 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
273                                 size_t count, loff_t *offp)
274 {
275         struct nvhost_channel_userctx *priv = filp->private_data;
276         size_t remaining = count;
277         int err = 0;
278         struct nvhost_job *job = priv->job;
279         struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
280         const char *chname = priv->ch->dev->name;
281
282         if (!job)
283                 return -EIO;
284
285         while (remaining) {
286                 size_t consumed;
287                 if (!hdr->num_relocs &&
288                     !priv->num_relocshifts &&
289                     !hdr->num_cmdbufs &&
290                     !hdr->num_waitchks) {
291                         consumed = sizeof(struct nvhost_submit_hdr);
292                         if (remaining < consumed)
293                                 break;
294                         if (copy_from_user(hdr, buf, consumed)) {
295                                 err = -EFAULT;
296                                 break;
297                         }
298                         hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
299                         err = set_submit(priv);
300                         if (err)
301                                 break;
302                         trace_nvhost_channel_write_submit(chname,
303                           count, hdr->num_cmdbufs, hdr->num_relocs,
304                           hdr->syncpt_id, hdr->syncpt_incrs);
305                 } else if (hdr->num_cmdbufs) {
306                         struct nvhost_cmdbuf cmdbuf;
307                         consumed = sizeof(cmdbuf);
308                         if (remaining < consumed)
309                                 break;
310                         if (copy_from_user(&cmdbuf, buf, consumed)) {
311                                 err = -EFAULT;
312                                 break;
313                         }
314                         trace_nvhost_channel_write_cmdbuf(chname,
315                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
316                         nvhost_job_add_gather(job,
317                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
318                         hdr->num_cmdbufs--;
319                 } else if (hdr->num_relocs) {
320                         int numrelocs = remaining / sizeof(struct nvhost_reloc);
321                         if (!numrelocs)
322                                 break;
323                         numrelocs = min_t(int, numrelocs, priv->hdr.num_relocs);
324                         consumed = numrelocs * sizeof(struct nvhost_reloc);
325                         if (copy_from_user(&job->relocarray[job->num_relocs],
326                                         buf, consumed)) {
327                                 err = -EFAULT;
328                                 break;
329                         }
330                         while (numrelocs) {
331                                 struct nvhost_reloc *reloc =
332                                         &job->relocarray[job->num_relocs];
333                                 trace_nvhost_channel_write_reloc(chname,
334                                         reloc->cmdbuf_mem,
335                                         reloc->cmdbuf_offset,
336                                         reloc->target,
337                                         reloc->target_offset);
338                                 job->num_relocs++;
339                                 hdr->num_relocs--;
340                                 numrelocs--;
341                         }
342                 } else if (hdr->num_waitchks) {
343                         int numwaitchks =
344                                 (remaining / sizeof(struct nvhost_waitchk));
345                         if (!numwaitchks)
346                                 break;
347                         numwaitchks = min_t(int,
348                                 numwaitchks, hdr->num_waitchks);
349                         consumed = numwaitchks * sizeof(struct nvhost_waitchk);
350                         if (copy_from_user(&job->waitchk[job->num_waitchk],
351                                         buf, consumed)) {
352                                 err = -EFAULT;
353                                 break;
354                         }
355                         trace_nvhost_channel_write_waitchks(
356                           chname, numwaitchks);
357                         job->num_waitchk += numwaitchks;
358                         hdr->num_waitchks -= numwaitchks;
359                 } else if (priv->num_relocshifts) {
360                         int next_shift =
361                                 job->num_relocs - priv->num_relocshifts;
362                         int num =
363                                 (remaining / sizeof(struct nvhost_reloc_shift));
364                         if (!num)
365                                 break;
366                         num = min_t(int, num, priv->num_relocshifts);
367                         consumed = num * sizeof(struct nvhost_reloc_shift);
368                         if (copy_from_user(&job->relocshiftarray[next_shift],
369                                         buf, consumed)) {
370                                 err = -EFAULT;
371                                 break;
372                         }
373                         priv->num_relocshifts -= num;
374                 } else {
375                         err = -EFAULT;
376                         break;
377                 }
378                 remaining -= consumed;
379                 buf += consumed;
380         }
381
382         if (err < 0) {
383                 dev_err(&priv->ch->dev->dev, "channel write error\n");
384                 reset_submit(priv);
385                 return err;
386         }
387
388         return count - remaining;
389 }
390
391 static int nvhost_ioctl_channel_flush(
392         struct nvhost_channel_userctx *ctx,
393         struct nvhost_get_param_args *args,
394         int null_kickoff)
395 {
396         struct platform_device *ndev = to_platform_device(&ctx->ch->dev->dev);
397         int err;
398
399         trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name);
400
401         if (!ctx->job ||
402             ctx->hdr.num_relocs ||
403             ctx->hdr.num_cmdbufs ||
404             ctx->hdr.num_waitchks) {
405                 reset_submit(ctx);
406                 dev_err(&ndev->dev, "channel submit out of sync\n");
407                 return -EFAULT;
408         }
409
410         err = nvhost_job_pin(ctx->job, &nvhost_get_host(ndev)->syncpt);
411         if (err) {
412                 dev_warn(&ndev->dev, "nvhost_job_pin failed: %d\n", err);
413                 goto fail;
414         }
415
416         if (nvhost_debug_null_kickoff_pid == current->tgid)
417                 null_kickoff = 1;
418         ctx->job->null_kickoff = null_kickoff;
419
420         if ((nvhost_debug_force_timeout_pid == current->tgid) &&
421             (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
422                 ctx->timeout = nvhost_debug_force_timeout_val;
423         }
424
425         /* context switch if needed, and submit user's gathers to the channel */
426         err = nvhost_channel_submit(ctx->job);
427         args->value = ctx->job->sp->fence;
428
429 fail:
430         if (err)
431                 nvhost_job_unpin(ctx->job);
432
433         nvhost_job_put(ctx->job);
434         ctx->job = NULL;
435
436         return err;
437 }
438
439 static int nvhost_ioctl_channel_alloc_obj_ctx(
440         struct nvhost_channel_userctx *ctx,
441         struct nvhost_alloc_obj_ctx_args *args)
442 {
443         int ret;
444
445         BUG_ON(!channel_op().alloc_obj);
446         nvhost_module_busy(ctx->ch->dev);
447         ret = channel_op().alloc_obj(ctx->hwctx, args);
448         nvhost_module_idle(ctx->ch->dev);
449         return ret;
450 }
451
452 static int nvhost_ioctl_channel_alloc_obj_ctx_old(
453         struct nvhost_channel_userctx *ctx,
454         struct nvhost_alloc_obj_ctx_old_args *args)
455 {
456         struct nvhost_alloc_obj_ctx_args new_args;
457         int err;
458
459         new_args.class_num = args->class_num;
460         err = nvhost_ioctl_channel_alloc_obj_ctx(ctx, &new_args);
461         if (!err)
462                 args->obj_id = new_args.obj_id;
463         return err;
464 }
465
466 static int nvhost_ioctl_channel_free_obj_ctx(
467         struct nvhost_channel_userctx *ctx,
468         struct nvhost_free_obj_ctx_args *args)
469 {
470         int ret;
471
472         BUG_ON(!channel_op().free_obj);
473         nvhost_module_busy(ctx->ch->dev);
474         ret = channel_op().free_obj(ctx->hwctx, args);
475         nvhost_module_idle(ctx->ch->dev);
476         return ret;
477 }
478
479 static int nvhost_ioctl_channel_free_obj_ctx_old(
480         struct nvhost_channel_userctx *ctx,
481         struct nvhost_free_obj_ctx_old_args *args)
482 {
483         struct nvhost_free_obj_ctx_args new_args;
484         new_args.obj_id = args->obj_id;
485         return nvhost_ioctl_channel_free_obj_ctx(ctx, &new_args);
486 }
487
488 static int nvhost_ioctl_channel_alloc_gpfifo(
489         struct nvhost_channel_userctx *ctx,
490         struct nvhost_alloc_gpfifo_args *args)
491 {
492         int ret;
493
494         BUG_ON(!channel_op().alloc_gpfifo);
495         nvhost_module_busy(ctx->ch->dev);
496         ret = channel_op().alloc_gpfifo(ctx->hwctx, args);
497         nvhost_module_idle(ctx->ch->dev);
498         return ret;
499 }
500
501 static int nvhost_ioctl_channel_submit_gpfifo(
502         struct nvhost_channel_userctx *ctx,
503         struct nvhost_submit_gpfifo_args *args)
504 {
505         void *gpfifo;
506         u32 size;
507         int ret = 0;
508
509         size = args->num_entries * sizeof(struct nvhost_gpfifo);
510
511         gpfifo = kzalloc(size, GFP_KERNEL);
512         if (IS_ERR_OR_NULL(gpfifo))
513                 return -ENOMEM;
514
515         if (copy_from_user(gpfifo,
516                            (void __user *)(uintptr_t)args->gpfifo, size)) {
517                 ret = -EINVAL;
518                 goto clean_up;
519         }
520
521         BUG_ON(!channel_op().submit_gpfifo);
522
523         nvhost_module_busy(ctx->ch->dev);
524         ret = channel_op().submit_gpfifo(ctx->hwctx, gpfifo,
525                         args->num_entries, &args->fence, args->flags);
526         nvhost_module_idle(ctx->ch->dev);
527 clean_up:
528         kfree(gpfifo);
529         return ret;
530 }
531
532 static int nvhost_ioctl_channel_submit_gpfifo_old(
533         struct nvhost_channel_userctx *ctx,
534         struct nvhost_submit_gpfifo_old_args *args)
535 {
536         int ret;
537         struct nvhost_submit_gpfifo_args new_args;
538
539         new_args.gpfifo = (u64)(uintptr_t)args->gpfifo;
540         new_args.num_entries = args->num_entries;
541         new_args.fence = args->fence;
542         new_args.flags = args->flags;
543         ret = nvhost_ioctl_channel_submit_gpfifo(ctx, &new_args);
544         if (!ret)
545                 args->fence = new_args.fence;
546         return ret;
547 }
548
549 static int nvhost_ioctl_channel_wait(
550         struct nvhost_channel_userctx *ctx,
551         struct nvhost_wait_args *args)
552 {
553         int ret;
554
555         BUG_ON(!channel_op().wait);
556         nvhost_module_busy(ctx->ch->dev);
557         ret = channel_op().wait(ctx->hwctx, args);
558         nvhost_module_idle(ctx->ch->dev);
559         return ret;
560 }
561
562 static int nvhost_ioctl_channel_zcull_bind(
563         struct nvhost_channel_userctx *ctx,
564         struct nvhost_zcull_bind_args *args)
565 {
566         int ret;
567
568         BUG_ON(!channel_zcull_op().bind);
569         nvhost_module_busy(ctx->ch->dev);
570         ret = channel_zcull_op().bind(ctx->hwctx, args);
571         nvhost_module_idle(ctx->ch->dev);
572         return ret;
573 }
574
575 static int nvhost_ioctl_channel_zcull_bind_old(
576         struct nvhost_channel_userctx *ctx,
577         struct nvhost_zcull_bind_old_args *args)
578 {
579         struct nvhost_zcull_bind_args new_args;
580
581         new_args.gpu_va = args->gpu_va;
582         new_args.mode = args->mode;
583         return nvhost_ioctl_channel_zcull_bind(ctx, &new_args);
584 }
585
586 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
587                 struct nvhost_submit_args *args)
588 {
589         struct nvhost_job *job;
590         int num_cmdbufs = args->num_cmdbufs;
591         int num_relocs = args->num_relocs;
592         int num_waitchks = args->num_waitchks;
593         int num_syncpt_incrs = args->num_syncpt_incrs;
594         struct nvhost_cmdbuf __user *cmdbufs =
595                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
596         struct nvhost_reloc __user *relocs =
597                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
598         struct nvhost_reloc_shift __user *reloc_shifts =
599                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
600         struct nvhost_waitchk __user *waitchks =
601                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
602         struct nvhost_syncpt_incr __user *syncpt_incrs =
603                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
604         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
605         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
606
607         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
608         u32 *local_waitbases = NULL;
609         int err, i, hwctx_syncpt_idx = -1;
610
611         if (num_syncpt_incrs > host->info.nb_pts)
612                 return -EINVAL;
613
614         job = nvhost_job_alloc(ctx->ch,
615                         ctx->hwctx,
616                         num_cmdbufs,
617                         num_relocs,
618                         num_waitchks,
619                         num_syncpt_incrs,
620                         ctx->memmgr);
621         if (!job)
622                 return -ENOMEM;
623
624         job->num_relocs = args->num_relocs;
625         job->num_waitchk = args->num_waitchks;
626         job->num_syncpts = args->num_syncpt_incrs;
627         job->priority = ctx->priority;
628         job->clientid = ctx->clientid;
629
630         while (num_cmdbufs) {
631                 struct nvhost_cmdbuf cmdbuf;
632                 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
633                 if (err)
634                         goto fail;
635                 nvhost_job_add_gather(job,
636                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
637                 num_cmdbufs--;
638                 cmdbufs++;
639         }
640
641         err = copy_from_user(job->relocarray,
642                         relocs, sizeof(*relocs) * num_relocs);
643         if (err)
644                 goto fail;
645
646         err = copy_from_user(job->relocshiftarray,
647                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
648         if (err)
649                 goto fail;
650
651         err = copy_from_user(job->waitchk,
652                         waitchks, sizeof(*waitchks) * num_waitchks);
653         if (err)
654                 goto fail;
655
656         /* mass copy waitbases */
657         if (args->waitbases) {
658                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
659                         GFP_KERNEL);
660                 err = copy_from_user(local_waitbases, waitbases,
661                         sizeof(u32) * num_syncpt_incrs);
662                 if (err) {
663                         err = -EINVAL;
664                         goto fail;
665                 }
666         }
667
668         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
669         if (!ctx->hwctx)
670                 hwctx_syncpt_idx = 0;
671
672         /*
673          * Go through each syncpoint from userspace. Here we:
674          * - Copy syncpoint information
675          * - Validate each syncpoint
676          * - Determine waitbase for each syncpoint
677          * - Determine the index of hwctx syncpoint in the table
678          */
679
680         for (i = 0; i < num_syncpt_incrs; ++i) {
681                 u32 waitbase;
682                 struct nvhost_syncpt_incr sp;
683
684                 /* Copy */
685                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
686                 if (err)
687                         goto fail;
688
689                 /* Validate */
690                 if (sp.syncpt_id > host->info.nb_pts) {
691                         err = -EINVAL;
692                         goto fail;
693                 }
694
695                 /* Determine waitbase */
696                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
697                         waitbase = local_waitbases[i];
698                 else
699                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
700                                 sp.syncpt_id);
701
702                 /* Store */
703                 job->sp[i].id = sp.syncpt_id;
704                 job->sp[i].incrs = sp.syncpt_incrs;
705                 job->sp[i].waitbase = waitbase;
706
707                 /* Find hwctx syncpoint */
708                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
709                         hwctx_syncpt_idx = i;
710         }
711
712         /* not needed anymore */
713         kfree(local_waitbases);
714         local_waitbases = NULL;
715
716         /* Is hwctx_syncpt_idx valid? */
717         if (hwctx_syncpt_idx == -1) {
718                 err = -EINVAL;
719                 goto fail;
720         }
721
722         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
723
724         trace_nvhost_channel_submit(ctx->ch->dev->name,
725                 job->num_gathers, job->num_relocs, job->num_waitchk,
726                 job->sp[job->hwctx_syncpt_idx].id,
727                 job->sp[job->hwctx_syncpt_idx].incrs);
728
729         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
730         if (err)
731                 goto fail;
732
733         if (args->timeout)
734                 job->timeout = min(ctx->timeout, args->timeout);
735         else
736                 job->timeout = ctx->timeout;
737         job->timeout_debug_dump = ctx->timeout_debug_dump;
738
739         err = nvhost_channel_submit(job);
740         if (err)
741                 goto fail_submit;
742
743         /* Deliver multiple fences back to the userspace */
744         if (fences)
745                 for (i = 0; i < num_syncpt_incrs; ++i) {
746                         u32 fence = job->sp[i].fence;
747                         err = copy_to_user(fences, &fence, sizeof(u32));
748                         if (err)
749                                 break;
750                         fences++;
751                 }
752
753         /* Deliver the fence using the old mechanism _only_ if a single
754          * syncpoint is used. */
755
756         if (num_syncpt_incrs == 1)
757                 args->fence = job->sp[job->hwctx_syncpt_idx].fence;
758         else
759                 args->fence = 0;
760
761         nvhost_job_put(job);
762
763         return 0;
764
765 fail_submit:
766         nvhost_job_unpin(job);
767 fail:
768         nvhost_job_put(job);
769         kfree(local_waitbases);
770         return err;
771 }
772
773 static int nvhost_ioctl_channel_set_ctxswitch(
774                 struct nvhost_channel_userctx *ctx,
775                 struct nvhost_set_ctxswitch_args *args)
776 {
777         struct nvhost_cmdbuf cmdbuf_save;
778         struct nvhost_cmdbuf cmdbuf_restore;
779         struct nvhost_syncpt_incr save_incr, restore_incr;
780         u32 save_waitbase, restore_waitbase;
781         struct nvhost_reloc reloc;
782         struct nvhost_hwctx_handler *ctxhandler = NULL;
783         struct nvhost_hwctx *nhwctx = NULL;
784         struct user_hwctx *hwctx;
785         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
786         int err;
787
788         /* Only channels with context support */
789         if (!ctx->hwctx)
790                 return -EFAULT;
791
792         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
793         if (args->num_cmdbufs_save != 1
794                         || args->num_cmdbufs_restore != 1
795                         || args->num_save_incrs != 1
796                         || args->num_restore_incrs != 1
797                         || args->num_relocs != 1)
798                 return -EINVAL;
799
800         err = copy_from_user(&cmdbuf_save,
801                         (void *)(uintptr_t)args->cmdbuf_save,
802                         sizeof(cmdbuf_save));
803         if (err)
804                 goto fail;
805
806         err = copy_from_user(&cmdbuf_restore,
807                         (void *)(uintptr_t)args->cmdbuf_restore,
808                         sizeof(cmdbuf_restore));
809         if (err)
810                 goto fail;
811
812         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
813                         sizeof(reloc));
814         if (err)
815                 goto fail;
816
817         err = copy_from_user(&save_incr,
818                         (void *)(uintptr_t)args->save_incrs,
819                         sizeof(save_incr));
820         if (err)
821                 goto fail;
822         err = copy_from_user(&save_waitbase,
823                         (void *)(uintptr_t)args->save_waitbases,
824                         sizeof(save_waitbase));
825
826         err = copy_from_user(&restore_incr,
827                         (void *)(uintptr_t)args->restore_incrs,
828                         sizeof(restore_incr));
829         if (err)
830                 goto fail;
831         err = copy_from_user(&restore_waitbase,
832                         (void *)(uintptr_t)args->restore_waitbases,
833                         sizeof(restore_waitbase));
834
835         if (save_incr.syncpt_id != pdata->syncpts[0]
836                         || restore_incr.syncpt_id != pdata->syncpts[0]
837                         || save_waitbase != pdata->waitbases[0]
838                         || restore_waitbase != pdata->waitbases[0]) {
839                 err = -EINVAL;
840                 goto fail;
841         }
842         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
843                         save_waitbase, ctx->ch);
844         if (!ctxhandler) {
845                 err = -ENOMEM;
846                 goto fail;
847         }
848
849         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
850         if (!nhwctx) {
851                 err = -ENOMEM;
852                 goto fail_hwctx;
853         }
854         hwctx = to_user_hwctx(nhwctx);
855
856         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
857                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
858                         cmdbuf_restore.mem, cmdbuf_restore.offset,
859                         cmdbuf_restore.words,
860                         pdata->syncpts[0], pdata->waitbases[0],
861                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
862
863         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
864         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
865                         cmdbuf_restore.offset, cmdbuf_restore.words);
866         if (err)
867                 goto fail_set_restore;
868
869         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
870                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
871         if (err)
872                 goto fail_set_save;
873
874         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
875         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
876
877         /* Free old context */
878         ctx->hwctx->h->put(ctx->hwctx);
879         ctx->hwctx = nhwctx;
880
881         return 0;
882
883 fail_set_save:
884 fail_set_restore:
885         ctxhandler->put(&hwctx->hwctx);
886 fail_hwctx:
887         user_ctxhandler_free(ctxhandler);
888 fail:
889         return err;
890 }
891
892 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
893 static int nvhost_ioctl_channel_cycle_stats(
894         struct nvhost_channel_userctx *ctx,
895         struct nvhost_cycle_stats_args *args)
896 {
897         int ret;
898         BUG_ON(!channel_op().cycle_stats);
899         ret = channel_op().cycle_stats(ctx->hwctx, args);
900         return ret;
901 }
902 #endif
903
904 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
905         struct nvhost_read_3d_reg_args *args)
906 {
907         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
908                         args->offset, &args->value);
909 }
910
911 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
912 {
913         int i;
914         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
915
916         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
917                 if (pdata->clocks[i].moduleid == moduleid)
918                         return i;
919         }
920
921         /* Old user space is sending a random number in args. Return clock
922          * zero in these cases. */
923         return 0;
924 }
925
926 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
927         struct nvhost_clk_rate_args *arg)
928 {
929         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
930                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
931         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
932                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
933         int index = moduleid ?
934                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
935
936         return nvhost_module_set_rate(ctx->ch->dev,
937                         ctx, arg->rate, index, attr);
938 }
939
940 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
941         u32 moduleid, u32 *rate)
942 {
943         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
944
945         return nvhost_module_get_rate(ctx->ch->dev,
946                         (unsigned long *)rate, index);
947 }
948
949 static int nvhost_ioctl_channel_module_regrdwr(
950         struct nvhost_channel_userctx *ctx,
951         struct nvhost_ctrl_module_regrdwr_args *args)
952 {
953         u32 num_offsets = args->num_offsets;
954         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
955         u32 __user *values = (u32 *)(uintptr_t)args->values;
956         u32 vals[64];
957         struct platform_device *ndev;
958
959         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
960                 args->num_offsets, args->write);
961
962         /* Check that there is something to read and that block size is
963          * u32 aligned */
964         if (num_offsets == 0 || args->block_size & 3)
965                 return -EINVAL;
966
967         ndev = ctx->ch->dev;
968
969         while (num_offsets--) {
970                 int err;
971                 u32 offs;
972                 int remaining = args->block_size >> 2;
973
974                 if (get_user(offs, offsets))
975                         return -EFAULT;
976
977                 offsets++;
978                 while (remaining) {
979                         int batch = min(remaining, 64);
980                         if (args->write) {
981                                 if (copy_from_user(vals, values,
982                                                 batch * sizeof(u32)))
983                                         return -EFAULT;
984
985                                 err = nvhost_write_module_regs(ndev,
986                                         offs, batch, vals);
987                                 if (err)
988                                         return err;
989                         } else {
990                                 err = nvhost_read_module_regs(ndev,
991                                                 offs, batch, vals);
992                                 if (err)
993                                         return err;
994
995                                 if (copy_to_user(values, vals,
996                                                 batch * sizeof(u32)))
997                                         return -EFAULT;
998                         }
999
1000                         remaining -= batch;
1001                         offs += batch * sizeof(u32);
1002                         values += batch;
1003                 }
1004         }
1005
1006         return 0;
1007 }
1008
1009 static u32 create_mask(u32 *words, int num)
1010 {
1011         int i;
1012         u32 word = 0;
1013         for (i = 0; i < num && words[i] && words[i] < BITS_PER_LONG; i++)
1014                 word |= BIT(words[i]);
1015
1016         return word;
1017 }
1018
1019 static long nvhost_channelctl(struct file *filp,
1020         unsigned int cmd, unsigned long arg)
1021 {
1022         struct nvhost_channel_userctx *priv = filp->private_data;
1023         struct device *dev = &priv->ch->dev->dev;
1024         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
1025         int err = 0;
1026
1027         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
1028                 (_IOC_NR(cmd) == 0) ||
1029                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
1030                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
1031                 return -EFAULT;
1032
1033         if (_IOC_DIR(cmd) & _IOC_WRITE) {
1034                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1035                         return -EFAULT;
1036         }
1037
1038         switch (cmd) {
1039         case NVHOST_IOCTL_CHANNEL_FLUSH:
1040                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
1041                 break;
1042         case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
1043                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
1044                 break;
1045         case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
1046         {
1047                 struct nvhost_submit_hdr_ext *hdr;
1048
1049                 if (priv->hdr.num_relocs ||
1050                     priv->num_relocshifts ||
1051                     priv->hdr.num_cmdbufs ||
1052                     priv->hdr.num_waitchks) {
1053                         reset_submit(priv);
1054                         dev_err(&priv->ch->dev->dev,
1055                                 "channel submit out of sync\n");
1056                         err = -EIO;
1057                         break;
1058                 }
1059
1060                 hdr = (struct nvhost_submit_hdr_ext *)buf;
1061                 if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
1062                         dev_err(&priv->ch->dev->dev,
1063                                 "submit version %d > max supported %d\n",
1064                                 hdr->submit_version,
1065                                 NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
1066                         err = -EINVAL;
1067                         break;
1068                 }
1069                 memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
1070                 err = set_submit(priv);
1071                 trace_nvhost_ioctl_channel_submit(priv->ch->dev->name,
1072                         priv->hdr.submit_version,
1073                         priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
1074                         priv->hdr.num_waitchks,
1075                         priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
1076                 break;
1077         }
1078         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
1079         {
1080                 struct nvhost_device_data *pdata = \
1081                         platform_get_drvdata(priv->ch->dev);
1082                 ((struct nvhost_get_param_args *)buf)->value =
1083                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
1084                 break;
1085         }
1086         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
1087         {
1088                 struct nvhost_device_data *pdata = \
1089                         platform_get_drvdata(priv->ch->dev);
1090                 struct nvhost_get_param_arg *arg =
1091                         (struct nvhost_get_param_arg *)buf;
1092                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
1093                                 || !pdata->syncpts[arg->param])
1094                         return -EINVAL;
1095                 arg->value = pdata->syncpts[arg->param];
1096                 break;
1097         }
1098         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
1099         {
1100                 struct nvhost_device_data *pdata = \
1101                         platform_get_drvdata(priv->ch->dev);
1102                 ((struct nvhost_get_param_args *)buf)->value =
1103                         create_mask(pdata->waitbases,
1104                                         NVHOST_MODULE_MAX_WAITBASES);
1105                 break;
1106         }
1107         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
1108         {
1109                 struct nvhost_device_data *pdata = \
1110                         platform_get_drvdata(priv->ch->dev);
1111                 struct nvhost_get_param_arg *arg =
1112                         (struct nvhost_get_param_arg *)buf;
1113                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
1114                                 || !pdata->waitbases[arg->param])
1115                         return -EINVAL;
1116                 arg->value = pdata->waitbases[arg->param];
1117                 break;
1118         }
1119         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
1120         {
1121                 struct nvhost_device_data *pdata = \
1122                         platform_get_drvdata(priv->ch->dev);
1123                 ((struct nvhost_get_param_args *)buf)->value =
1124                         create_mask(pdata->modulemutexes,
1125                                         NVHOST_MODULE_MAX_MODMUTEXES);
1126                 break;
1127         }
1128         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
1129         {
1130                 struct nvhost_device_data *pdata = \
1131                         platform_get_drvdata(priv->ch->dev);
1132                 struct nvhost_get_param_arg *arg =
1133                         (struct nvhost_get_param_arg *)buf;
1134                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
1135                                 || !pdata->modulemutexes[arg->param])
1136                         return -EINVAL;
1137                 arg->value = pdata->modulemutexes[arg->param];
1138                 break;
1139         }
1140         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
1141         {
1142                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
1143                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
1144
1145                 if (IS_ERR(new_client)) {
1146                         err = PTR_ERR(new_client);
1147                         break;
1148                 }
1149                 if (priv->memmgr)
1150                         nvhost_memmgr_put_mgr(priv->memmgr);
1151
1152                 priv->memmgr = new_client;
1153
1154                 if (priv->hwctx)
1155                         priv->hwctx->memmgr = new_client;
1156
1157                 break;
1158         }
1159         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
1160                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
1161                 break;
1162         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX_OLD:
1163                 err = nvhost_ioctl_channel_alloc_obj_ctx_old(priv, (void *)buf);
1164                 break;
1165         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
1166                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
1167                 break;
1168         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX_OLD:
1169                 err = nvhost_ioctl_channel_free_obj_ctx_old(priv, (void *)buf);
1170                 break;
1171         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
1172                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
1173                 break;
1174         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
1175                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
1176                 break;
1177         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO_OLD:
1178                 err = nvhost_ioctl_channel_submit_gpfifo_old(priv, (void *)buf);
1179                 break;
1180         case NVHOST_IOCTL_CHANNEL_WAIT:
1181                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
1182                 break;
1183         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
1184                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
1185                 break;
1186         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND_OLD:
1187                 err = nvhost_ioctl_channel_zcull_bind_old(priv, (void *)buf);
1188                 break;
1189
1190 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
1191         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
1192                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
1193                 break;
1194 #endif
1195         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
1196                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
1197                 break;
1198         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
1199         {
1200                 struct nvhost_clk_rate_args *arg =
1201                                 (struct nvhost_clk_rate_args *)buf;
1202
1203                 err = nvhost_ioctl_channel_get_rate(priv,
1204                                 arg->moduleid, &arg->rate);
1205                 break;
1206         }
1207         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
1208         {
1209                 struct nvhost_clk_rate_args *arg =
1210                                 (struct nvhost_clk_rate_args *)buf;
1211
1212                 err = nvhost_ioctl_channel_set_rate(priv, arg);
1213                 break;
1214         }
1215         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1216                 priv->timeout =
1217                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1218                 dev_dbg(&priv->ch->dev->dev,
1219                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1220                         __func__, priv->timeout, priv);
1221                 break;
1222         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1223                 ((struct nvhost_get_param_args *)buf)->value =
1224                                 priv->hwctx->has_timedout;
1225                 break;
1226         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1227                 priv->priority =
1228                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
1229                 break;
1230         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1231                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1232                 break;
1233         case NVHOST_IOCTL_CHANNEL_SUBMIT:
1234                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1235                 break;
1236         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1237                 priv->timeout = (u32)
1238                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
1239                 priv->timeout_debug_dump = !((u32)
1240                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1241                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1242                 dev_dbg(&priv->ch->dev->dev,
1243                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1244                         __func__, priv->timeout, priv);
1245                 break;
1246         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
1247                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
1248                 break;
1249         default:
1250                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
1251                 err = -ENOTTY;
1252                 break;
1253         }
1254
1255         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1256                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1257
1258         return err;
1259 }
1260
1261 static const struct file_operations nvhost_channelops = {
1262         .owner = THIS_MODULE,
1263         .release = nvhost_channelrelease,
1264         .open = nvhost_channelopen,
1265         .write = nvhost_channelwrite,
1266         .unlocked_ioctl = nvhost_channelctl
1267 };
1268
1269 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
1270 {
1271         struct nvhost_channel_userctx *userctx;
1272         struct file *f = fget(fd);
1273         if (!f)
1274                 return 0;
1275
1276         if (f->f_op != &nvhost_channelops) {
1277                 fput(f);
1278                 return 0;
1279         }
1280
1281         userctx = (struct nvhost_channel_userctx *)f->private_data;
1282         fput(f);
1283         return userctx->hwctx;
1284 }
1285
1286
1287 static const struct file_operations nvhost_asops = {
1288         .owner = THIS_MODULE,
1289         .release = nvhost_as_dev_release,
1290         .open = nvhost_as_dev_open,
1291         .unlocked_ioctl = nvhost_as_dev_ctl,
1292 };
1293
1294 static struct {
1295         int class_id;
1296         const char *dev_name;
1297 } class_id_dev_name_map[] = {
1298         /*      { NV_HOST1X_CLASS_ID, ""}, */
1299         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1300         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1301         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1302         { NV_GRAPHICS_GPU_CLASS_ID, "gpu"},
1303         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1304         { NV_TSEC_CLASS_ID, "tsec" },
1305 };
1306
1307 static struct {
1308         int module_id;
1309         const char *dev_name;
1310 } module_id_dev_name_map[] = {
1311         { NVHOST_MODULE_VI, "vi"},
1312         { NVHOST_MODULE_ISP, "isp"},
1313         { NVHOST_MODULE_MPE, "mpe"},
1314         { NVHOST_MODULE_MSENC, "msenc"},
1315         { NVHOST_MODULE_TSEC, "tsec"},
1316         { NVHOST_MODULE_GPU, "gpu"},
1317         { NVHOST_MODULE_VIC, "vic"},
1318 };
1319
1320 static const char *get_device_name_for_dev(struct platform_device *dev)
1321 {
1322         int i;
1323         /* first choice is to use the class id if specified */
1324         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1325                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1326                 if (pdata->class == class_id_dev_name_map[i].class_id)
1327                         return class_id_dev_name_map[i].dev_name;
1328         }
1329
1330         /* second choice is module name if specified */
1331         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1332                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1333                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1334                         return module_id_dev_name_map[i].dev_name;
1335         }
1336
1337         /* last choice is to just use the given dev name */
1338         return dev->name;
1339 }
1340
1341 static struct device *nvhost_client_device_create(
1342         struct platform_device *pdev, struct cdev *cdev,
1343         const char *cdev_name, int devno,
1344         const struct file_operations *ops)
1345 {
1346         struct nvhost_master *host = nvhost_get_host(pdev);
1347         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1348         const char *use_dev_name;
1349         struct device *dev;
1350         int err;
1351
1352         nvhost_dbg_fn("");
1353
1354         BUG_ON(!host);
1355
1356         cdev_init(cdev, ops);
1357         cdev->owner = THIS_MODULE;
1358
1359         err = cdev_add(cdev, devno, 1);
1360         if (err < 0) {
1361                 dev_err(&pdev->dev,
1362                         "failed to add chan %i cdev\n", pdata->index);
1363                 return NULL;
1364         }
1365         use_dev_name = get_device_name_for_dev(pdev);
1366
1367         dev = device_create(host->nvhost_class,
1368                         NULL, devno, NULL,
1369                         (pdev->id <= 0) ?
1370                         IFACE_NAME "-%s%s" :
1371                         IFACE_NAME "-%s%s.%d",
1372                         cdev_name, use_dev_name, pdev->id);
1373
1374         if (IS_ERR(dev)) {
1375                 err = PTR_ERR(dev);
1376                 dev_err(&pdev->dev,
1377                         "failed to create %s %s device for %s\n",
1378                         use_dev_name, cdev_name, pdev->name);
1379                 return NULL;
1380         }
1381
1382         return dev;
1383 }
1384
1385 int nvhost_client_user_init(struct platform_device *dev)
1386 {
1387         int err, devno;
1388         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1389         struct nvhost_channel *ch = pdata->channel;
1390
1391         BUG_ON(!ch);
1392         // reserve 3 minor #s for <dev> and as-<dev> and ctrl-<dev>
1393
1394         err = alloc_chrdev_region(&devno, 0, 3, IFACE_NAME);
1395         if (err < 0) {
1396                 dev_err(&dev->dev, "failed to allocate devno\n");
1397                 goto fail;
1398         }
1399
1400         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1401                                 "", devno, &nvhost_channelops);
1402         if (ch->node == NULL)
1403                 goto fail;
1404         ++devno;
1405         ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1406                                 "as-", devno, &nvhost_asops);
1407         if (ch->as_node == NULL)
1408                 goto fail;
1409
1410         if (pdata->ctrl_ops) {
1411                 ++devno;
1412                 pdata->ctrl_node = nvhost_client_device_create(dev,
1413                                         &pdata->ctrl_cdev, "ctrl-",
1414                                         devno, pdata->ctrl_ops);
1415                 if (pdata->ctrl_node == NULL)
1416                         goto fail;
1417         }
1418
1419         return 0;
1420 fail:
1421         return err;
1422 }
1423
1424 int nvhost_client_device_init(struct platform_device *dev)
1425 {
1426         int err;
1427         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1428         struct nvhost_channel *ch;
1429         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1430
1431         ch = nvhost_alloc_channel(dev);
1432         if (ch == NULL)
1433                 return -ENODEV;
1434
1435         /* store the pointer to this device for channel */
1436         ch->dev = dev;
1437
1438         /* Create debugfs directory for the device */
1439         nvhost_device_debug_init(dev);
1440
1441         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1442         if (err)
1443                 goto fail;
1444
1445         err = nvhost_client_user_init(dev);
1446         if (err)
1447                 goto fail;
1448
1449         if (tickctrl_op().init_channel)
1450                 tickctrl_op().init_channel(dev);
1451
1452         err = nvhost_device_list_add(dev);
1453         if (err)
1454                 goto fail;
1455
1456         if (pdata->scaling_init)
1457                 pdata->scaling_init(dev);
1458
1459         /* reset syncpoint values for this unit */
1460         nvhost_module_busy(nvhost_master->dev);
1461         nvhost_syncpt_reset_client(dev);
1462         nvhost_module_idle(nvhost_master->dev);
1463
1464         /* Initialize dma parameters */
1465         dev->dev.dma_parms = &pdata->dma_parms;
1466         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1467
1468         dev_info(&dev->dev, "initialized\n");
1469
1470         if (pdata->slave) {
1471                 pdata->slave->dev.parent = dev->dev.parent;
1472                 platform_device_register(pdata->slave);
1473         }
1474
1475         return 0;
1476
1477 fail:
1478         /* Add clean-up */
1479         nvhost_free_channel(ch);
1480         return err;
1481 }
1482 EXPORT_SYMBOL(nvhost_client_device_init);
1483
1484 int nvhost_client_device_release(struct platform_device *dev)
1485 {
1486         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1487         struct nvhost_channel *ch;
1488         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1489
1490         ch = pdata->channel;
1491
1492         /* Release nvhost module resources */
1493         nvhost_module_deinit(dev);
1494
1495         /* Remove from nvhost device list */
1496         nvhost_device_list_remove(dev);
1497
1498         /* Release chardev and device node for user space */
1499         device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1500         cdev_del(&ch->cdev);
1501
1502         /* Free nvhost channel */
1503         nvhost_free_channel(ch);
1504
1505         return 0;
1506 }
1507 EXPORT_SYMBOL(nvhost_client_device_release);
1508
1509 int nvhost_client_device_suspend(struct device *dev)
1510 {
1511         int ret = 0;
1512         struct nvhost_device_data *pdata = dev_get_drvdata(dev);
1513
1514         ret = nvhost_channel_suspend(pdata->channel);
1515         if (ret)
1516                 return ret;
1517
1518         dev_info(dev, "suspend status: %d\n", ret);
1519
1520         return ret;
1521 }
1522
1523 int nvhost_client_device_resume(struct device *dev)
1524 {
1525         dev_info(dev, "resuming\n");
1526         return 0;
1527 }
1528
1529 int nvhost_client_device_get_resources(struct platform_device *dev)
1530 {
1531         int i;
1532         void __iomem *regs = NULL;
1533         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1534
1535         for (i = 0; i < dev->num_resources; i++) {
1536                 struct resource *r = NULL;
1537
1538                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1539                 /* We've run out of mem resources */
1540                 if (!r)
1541                         break;
1542
1543                 regs = devm_request_and_ioremap(&dev->dev, r);
1544                 if (!regs)
1545                         goto fail;
1546
1547                 pdata->aperture[i] = regs;
1548         }
1549
1550         return 0;
1551
1552 fail:
1553         dev_err(&dev->dev, "failed to get register memory\n");
1554
1555         return -ENXIO;
1556 }
1557 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1558
1559 /* This is a simple wrapper around request_firmware that takes
1560  * 'fw_name' and if available applies a SOC relative path prefix to it.
1561  * The caller is responsible for calling release_firmware later.
1562  */
1563 const struct firmware *
1564 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1565 {
1566         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1567         const struct firmware *fw;
1568         char *fw_path = NULL;
1569         int path_len, err;
1570
1571         /* This field is NULL when calling from SYS_EXIT.
1572            Add a check here to prevent crash in request_firmware */
1573         if (!current->fs) {
1574                 BUG();
1575                 return NULL;
1576         }
1577
1578         if (!fw_name)
1579                 return NULL;
1580
1581         if (op->soc_name) {
1582                 path_len = strlen(fw_name) + strlen(op->soc_name);
1583                 path_len += 2; /* for the path separator and zero terminator*/
1584
1585                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1586                                      GFP_KERNEL);
1587                 if (!fw_path)
1588                         return NULL;
1589
1590                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1591                 fw_name = fw_path;
1592         }
1593
1594         err = request_firmware(&fw, fw_name, &dev->dev);
1595         kfree(fw_path);
1596         if (err) {
1597                 dev_err(&dev->dev, "failed to get firmware\n");
1598                 return NULL;
1599         }
1600
1601         /* note: caller must release_firmware */
1602         return fw;
1603 }