0b483bc9561c98d4d10999d9f7ec4a5dd4fde871
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * drivers/video/tegra/host/bus_client.c
3  *
4  * Tegra Graphics Host Client Module
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/spinlock.h>
24 #include <linux/fs.h>
25 #include <linux/cdev.h>
26 #include <linux/uaccess.h>
27 #include <linux/file.h>
28 #include <linux/clk.h>
29 #include <linux/hrtimer.h>
30 #include <linux/export.h>
31 #include <linux/firmware.h>
32 #include <linux/dma-mapping.h>
33
34 #include <trace/events/nvhost.h>
35
36 #include <linux/io.h>
37 #include <linux/string.h>
38
39 #include <linux/nvhost.h>
40 #include <linux/nvhost_ioctl.h>
41
42 #include <mach/gpufuse.h>
43 #include <mach/hardware.h>
44
45 #include "debug.h"
46 #include "bus_client.h"
47 #include "dev.h"
48 #include "class_ids.h"
49 #include "nvhost_as.h"
50 #include "nvhost_memmgr.h"
51 #include "chip_support.h"
52 #include "nvhost_acm.h"
53
54 #include "nvhost_syncpt.h"
55 #include "nvhost_channel.h"
56 #include "nvhost_job.h"
57 #include "nvhost_hwctx.h"
58 #include "user_hwctx.h"
59
60 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
61 {
62         int err = 0;
63         struct resource *r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
64         if (!r) {
65                 dev_err(&ndev->dev, "failed to get memory resource\n");
66                 return -ENODEV;
67         }
68
69         if (offset + 4 * count > resource_size(r)
70                         || (offset + 4 * count < offset))
71                 err = -EPERM;
72
73         return err;
74 }
75
76 int nvhost_read_module_regs(struct platform_device *ndev,
77                         u32 offset, int count, u32 *values)
78 {
79         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
80         void __iomem *p = pdata->aperture[0] + offset;
81         int err;
82
83         if (!pdata->aperture[0])
84                 return -ENODEV;
85
86         /* verify offset */
87         err = validate_reg(ndev, offset, count);
88         if (err)
89                 return err;
90
91         nvhost_module_busy(ndev);
92         while (count--) {
93                 *(values++) = readl(p);
94                 p += 4;
95         }
96         rmb();
97         nvhost_module_idle(ndev);
98
99         return 0;
100 }
101
102 int nvhost_write_module_regs(struct platform_device *ndev,
103                         u32 offset, int count, const u32 *values)
104 {
105         void __iomem *p;
106         int err;
107         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
108
109         if (!pdata->aperture[0])
110                 return -ENODEV;
111
112         p = pdata->aperture[0] + offset;
113
114         /* verify offset */
115         err = validate_reg(ndev, offset, count);
116         if (err)
117                 return err;
118
119         nvhost_module_busy(ndev);
120         while (count--) {
121                 writel(*(values++), p);
122                 p += 4;
123         }
124         wmb();
125         nvhost_module_idle(ndev);
126
127         return 0;
128 }
129
130 struct nvhost_channel_userctx {
131         struct nvhost_channel *ch;
132         struct nvhost_hwctx *hwctx;
133         struct nvhost_submit_hdr_ext hdr;
134         int num_relocshifts;
135         struct nvhost_job *job;
136         struct mem_mgr *memmgr;
137         u32 timeout;
138         u32 priority;
139         int clientid;
140         bool timeout_debug_dump;
141 };
142
143 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
144 {
145         struct nvhost_channel_userctx *priv = filp->private_data;
146
147         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
148
149         filp->private_data = NULL;
150
151         nvhost_module_remove_client(priv->ch->dev, priv);
152
153         if (priv->hwctx) {
154                 struct nvhost_channel *ch = priv->ch;
155                 struct nvhost_hwctx *ctx = priv->hwctx;
156
157                 mutex_lock(&ch->submitlock);
158                 if (ch->cur_ctx == ctx)
159                         ch->cur_ctx = NULL;
160                 mutex_unlock(&ch->submitlock);
161
162                 priv->hwctx->h->put(priv->hwctx);
163         }
164
165         if (priv->job)
166                 nvhost_job_put(priv->job);
167
168         nvhost_putchannel(priv->ch);
169
170         nvhost_memmgr_put_mgr(priv->memmgr);
171         kfree(priv);
172         return 0;
173 }
174
175 static int nvhost_channelopen(struct inode *inode, struct file *filp)
176 {
177         struct nvhost_channel_userctx *priv;
178         struct nvhost_channel *ch;
179         struct nvhost_device_data *pdata;
180
181         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
182         ch = nvhost_getchannel(ch);
183         if (!ch)
184                 return -ENOMEM;
185         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
186
187         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
188         if (!priv) {
189                 nvhost_putchannel(ch);
190                 return -ENOMEM;
191         }
192         filp->private_data = priv;
193         priv->ch = ch;
194         if(nvhost_module_add_client(ch->dev, priv))
195                 goto fail;
196
197         if (ch->ctxhandler && ch->ctxhandler->alloc) {
198                 nvhost_module_busy(ch->dev);
199                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
200                 nvhost_module_idle(ch->dev);
201                 if (!priv->hwctx)
202                         goto fail;
203         }
204         priv->priority = NVHOST_PRIORITY_MEDIUM;
205         priv->clientid = atomic_add_return(1,
206                         &nvhost_get_host(ch->dev)->clientid);
207         pdata = platform_get_drvdata(ch->dev);
208         priv->timeout = pdata->nvhost_timeout_default;
209         priv->timeout_debug_dump = true;
210         if (!tegra_platform_is_silicon())
211                 priv->timeout = 0;
212
213         return 0;
214 fail:
215         nvhost_channelrelease(inode, filp);
216         return -ENOMEM;
217 }
218
219 static int set_submit(struct nvhost_channel_userctx *ctx)
220 {
221         struct platform_device *ndev = ctx->ch->dev;
222         struct nvhost_master *host = nvhost_get_host(ndev);
223
224         /* submit should have at least 1 cmdbuf */
225         if (!ctx->hdr.num_cmdbufs ||
226                         !nvhost_syncpt_is_valid(&host->syncpt,
227                                 ctx->hdr.syncpt_id))
228                 return -EIO;
229
230         if (!ctx->memmgr) {
231                 dev_err(&ndev->dev, "no nvmap context set\n");
232                 return -EFAULT;
233         }
234
235         if (ctx->job) {
236                 dev_warn(&ndev->dev, "performing channel submit when a job already exists\n");
237                 nvhost_job_put(ctx->job);
238         }
239         ctx->job = nvhost_job_alloc(ctx->ch,
240                         ctx->hwctx,
241                         ctx->hdr.num_cmdbufs,
242                         ctx->hdr.num_relocs,
243                         ctx->hdr.num_waitchks,
244                         1,
245                         ctx->memmgr);
246         if (!ctx->job)
247                 return -ENOMEM;
248         ctx->job->timeout = ctx->timeout;
249         ctx->job->sp->id = ctx->hdr.syncpt_id;
250         ctx->job->sp->incrs = ctx->hdr.syncpt_incrs;
251         ctx->job->hwctx_syncpt_idx = 0;
252         ctx->job->num_syncpts = 1;
253         ctx->job->priority = ctx->priority;
254         ctx->job->clientid = ctx->clientid;
255         ctx->job->timeout_debug_dump = ctx->timeout_debug_dump;
256
257         if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
258                 ctx->num_relocshifts = ctx->hdr.num_relocs;
259
260         return 0;
261 }
262
263 static void reset_submit(struct nvhost_channel_userctx *ctx)
264 {
265         ctx->hdr.num_cmdbufs = 0;
266         ctx->hdr.num_relocs = 0;
267         ctx->num_relocshifts = 0;
268         ctx->hdr.num_waitchks = 0;
269
270         if (ctx->job) {
271                 nvhost_job_put(ctx->job);
272                 ctx->job = NULL;
273         }
274 }
275
276 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
277                                 size_t count, loff_t *offp)
278 {
279         struct nvhost_channel_userctx *priv = filp->private_data;
280         size_t remaining = count;
281         int err = 0;
282         struct nvhost_job *job = priv->job;
283         struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
284         const char *chname = priv->ch->dev->name;
285
286         if (!job)
287                 return -EIO;
288
289         while (remaining) {
290                 size_t consumed;
291                 if (!hdr->num_relocs &&
292                     !priv->num_relocshifts &&
293                     !hdr->num_cmdbufs &&
294                     !hdr->num_waitchks) {
295                         consumed = sizeof(struct nvhost_submit_hdr);
296                         if (remaining < consumed)
297                                 break;
298                         if (copy_from_user(hdr, buf, consumed)) {
299                                 err = -EFAULT;
300                                 break;
301                         }
302                         hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
303                         err = set_submit(priv);
304                         if (err)
305                                 break;
306                         trace_nvhost_channel_write_submit(chname,
307                           count, hdr->num_cmdbufs, hdr->num_relocs,
308                           hdr->syncpt_id, hdr->syncpt_incrs);
309                 } else if (hdr->num_cmdbufs) {
310                         struct nvhost_cmdbuf cmdbuf;
311                         consumed = sizeof(cmdbuf);
312                         if (remaining < consumed)
313                                 break;
314                         if (copy_from_user(&cmdbuf, buf, consumed)) {
315                                 err = -EFAULT;
316                                 break;
317                         }
318                         trace_nvhost_channel_write_cmdbuf(chname,
319                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
320                         nvhost_job_add_gather(job,
321                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
322                         hdr->num_cmdbufs--;
323                 } else if (hdr->num_relocs) {
324                         int numrelocs = remaining / sizeof(struct nvhost_reloc);
325                         if (!numrelocs)
326                                 break;
327                         numrelocs = min_t(int, numrelocs, priv->hdr.num_relocs);
328                         consumed = numrelocs * sizeof(struct nvhost_reloc);
329                         if (copy_from_user(&job->relocarray[job->num_relocs],
330                                         buf, consumed)) {
331                                 err = -EFAULT;
332                                 break;
333                         }
334                         while (numrelocs) {
335                                 struct nvhost_reloc *reloc =
336                                         &job->relocarray[job->num_relocs];
337                                 trace_nvhost_channel_write_reloc(chname,
338                                         reloc->cmdbuf_mem,
339                                         reloc->cmdbuf_offset,
340                                         reloc->target,
341                                         reloc->target_offset);
342                                 job->num_relocs++;
343                                 hdr->num_relocs--;
344                                 numrelocs--;
345                         }
346                 } else if (hdr->num_waitchks) {
347                         int numwaitchks =
348                                 (remaining / sizeof(struct nvhost_waitchk));
349                         if (!numwaitchks)
350                                 break;
351                         numwaitchks = min_t(int,
352                                 numwaitchks, hdr->num_waitchks);
353                         consumed = numwaitchks * sizeof(struct nvhost_waitchk);
354                         if (copy_from_user(&job->waitchk[job->num_waitchk],
355                                         buf, consumed)) {
356                                 err = -EFAULT;
357                                 break;
358                         }
359                         trace_nvhost_channel_write_waitchks(
360                           chname, numwaitchks);
361                         job->num_waitchk += numwaitchks;
362                         hdr->num_waitchks -= numwaitchks;
363                 } else if (priv->num_relocshifts) {
364                         int next_shift =
365                                 job->num_relocs - priv->num_relocshifts;
366                         int num =
367                                 (remaining / sizeof(struct nvhost_reloc_shift));
368                         if (!num)
369                                 break;
370                         num = min_t(int, num, priv->num_relocshifts);
371                         consumed = num * sizeof(struct nvhost_reloc_shift);
372                         if (copy_from_user(&job->relocshiftarray[next_shift],
373                                         buf, consumed)) {
374                                 err = -EFAULT;
375                                 break;
376                         }
377                         priv->num_relocshifts -= num;
378                 } else {
379                         err = -EFAULT;
380                         break;
381                 }
382                 remaining -= consumed;
383                 buf += consumed;
384         }
385
386         if (err < 0) {
387                 dev_err(&priv->ch->dev->dev, "channel write error\n");
388                 reset_submit(priv);
389                 return err;
390         }
391
392         return count - remaining;
393 }
394
395 static int nvhost_ioctl_channel_flush(
396         struct nvhost_channel_userctx *ctx,
397         struct nvhost_get_param_args *args,
398         int null_kickoff)
399 {
400         struct platform_device *ndev = to_platform_device(&ctx->ch->dev->dev);
401         int err;
402
403         trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name);
404
405         if (!ctx->job ||
406             ctx->hdr.num_relocs ||
407             ctx->hdr.num_cmdbufs ||
408             ctx->hdr.num_waitchks) {
409                 reset_submit(ctx);
410                 dev_err(&ndev->dev, "channel submit out of sync\n");
411                 return -EFAULT;
412         }
413
414         err = nvhost_job_pin(ctx->job, &nvhost_get_host(ndev)->syncpt);
415         if (err) {
416                 dev_warn(&ndev->dev, "nvhost_job_pin failed: %d\n", err);
417                 goto fail;
418         }
419
420         if (nvhost_debug_null_kickoff_pid == current->tgid)
421                 null_kickoff = 1;
422         ctx->job->null_kickoff = null_kickoff;
423
424         if ((nvhost_debug_force_timeout_pid == current->tgid) &&
425             (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
426                 ctx->timeout = nvhost_debug_force_timeout_val;
427         }
428
429         /* context switch if needed, and submit user's gathers to the channel */
430         err = nvhost_channel_submit(ctx->job);
431         args->value = ctx->job->sp->fence;
432
433 fail:
434         if (err)
435                 nvhost_job_unpin(ctx->job);
436
437         nvhost_job_put(ctx->job);
438         ctx->job = NULL;
439
440         return err;
441 }
442
443 static int nvhost_ioctl_channel_alloc_obj_ctx(
444         struct nvhost_channel_userctx *ctx,
445         struct nvhost_alloc_obj_ctx_args *args)
446 {
447         int ret;
448
449         BUG_ON(!channel_op().alloc_obj);
450         nvhost_module_busy(ctx->ch->dev);
451         ret = channel_op().alloc_obj(ctx->hwctx, args);
452         nvhost_module_idle(ctx->ch->dev);
453         return ret;
454 }
455
456 static int nvhost_ioctl_channel_alloc_obj_ctx_old(
457         struct nvhost_channel_userctx *ctx,
458         struct nvhost_alloc_obj_ctx_old_args *args)
459 {
460         struct nvhost_alloc_obj_ctx_args new_args;
461         int err;
462
463         new_args.class_num = args->class_num;
464         err = nvhost_ioctl_channel_alloc_obj_ctx(ctx, &new_args);
465         if (!err)
466                 args->obj_id = new_args.obj_id;
467         return err;
468 }
469
470 static int nvhost_ioctl_channel_free_obj_ctx(
471         struct nvhost_channel_userctx *ctx,
472         struct nvhost_free_obj_ctx_args *args)
473 {
474         int ret;
475
476         BUG_ON(!channel_op().free_obj);
477         nvhost_module_busy(ctx->ch->dev);
478         ret = channel_op().free_obj(ctx->hwctx, args);
479         nvhost_module_idle(ctx->ch->dev);
480         return ret;
481 }
482
483 static int nvhost_ioctl_channel_free_obj_ctx_old(
484         struct nvhost_channel_userctx *ctx,
485         struct nvhost_free_obj_ctx_old_args *args)
486 {
487         struct nvhost_free_obj_ctx_args new_args;
488         new_args.obj_id = args->obj_id;
489         return nvhost_ioctl_channel_free_obj_ctx(ctx, &new_args);
490 }
491
492 static int nvhost_ioctl_channel_alloc_gpfifo(
493         struct nvhost_channel_userctx *ctx,
494         struct nvhost_alloc_gpfifo_args *args)
495 {
496         int ret;
497
498         BUG_ON(!channel_op().alloc_gpfifo);
499         nvhost_module_busy(ctx->ch->dev);
500         ret = channel_op().alloc_gpfifo(ctx->hwctx, args);
501         nvhost_module_idle(ctx->ch->dev);
502         return ret;
503 }
504
505 static int nvhost_ioctl_channel_submit_gpfifo(
506         struct nvhost_channel_userctx *ctx,
507         struct nvhost_submit_gpfifo_args *args)
508 {
509         void *gpfifo;
510         u32 size;
511         int ret = 0;
512
513         size = args->num_entries * sizeof(struct nvhost_gpfifo);
514
515         gpfifo = kzalloc(size, GFP_KERNEL);
516         if (IS_ERR_OR_NULL(gpfifo))
517                 return -ENOMEM;
518
519         if (copy_from_user(gpfifo,
520                            (void __user *)(uintptr_t)args->gpfifo, size)) {
521                 ret = -EINVAL;
522                 goto clean_up;
523         }
524
525         BUG_ON(!channel_op().submit_gpfifo);
526
527         nvhost_module_busy(ctx->ch->dev);
528         ret = channel_op().submit_gpfifo(ctx->hwctx, gpfifo,
529                         args->num_entries, &args->fence, args->flags);
530         nvhost_module_idle(ctx->ch->dev);
531 clean_up:
532         kfree(gpfifo);
533         return ret;
534 }
535
536 static int nvhost_ioctl_channel_submit_gpfifo_old(
537         struct nvhost_channel_userctx *ctx,
538         struct nvhost_submit_gpfifo_old_args *args)
539 {
540         int ret;
541         struct nvhost_submit_gpfifo_args new_args;
542
543         new_args.gpfifo = (u64)(uintptr_t)args->gpfifo;
544         new_args.num_entries = args->num_entries;
545         new_args.fence = args->fence;
546         new_args.flags = args->flags;
547         ret = nvhost_ioctl_channel_submit_gpfifo(ctx, &new_args);
548         if (!ret)
549                 args->fence = new_args.fence;
550         return ret;
551 }
552
553 static int nvhost_ioctl_channel_wait(
554         struct nvhost_channel_userctx *ctx,
555         struct nvhost_wait_args *args)
556 {
557         int ret;
558
559         BUG_ON(!channel_op().wait);
560         nvhost_module_busy(ctx->ch->dev);
561         ret = channel_op().wait(ctx->hwctx, args);
562         nvhost_module_idle(ctx->ch->dev);
563         return ret;
564 }
565
566 static int nvhost_ioctl_channel_zcull_bind(
567         struct nvhost_channel_userctx *ctx,
568         struct nvhost_zcull_bind_args *args)
569 {
570         int ret;
571
572         BUG_ON(!channel_zcull_op().bind);
573         nvhost_module_busy(ctx->ch->dev);
574         ret = channel_zcull_op().bind(ctx->hwctx, args);
575         nvhost_module_idle(ctx->ch->dev);
576         return ret;
577 }
578
579 static int nvhost_ioctl_channel_zcull_bind_old(
580         struct nvhost_channel_userctx *ctx,
581         struct nvhost_zcull_bind_old_args *args)
582 {
583         struct nvhost_zcull_bind_args new_args;
584
585         new_args.gpu_va = args->gpu_va;
586         new_args.mode = args->mode;
587         return nvhost_ioctl_channel_zcull_bind(ctx, &new_args);
588 }
589
590 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
591                 struct nvhost_submit_args *args)
592 {
593         struct nvhost_job *job;
594         int num_cmdbufs = args->num_cmdbufs;
595         int num_relocs = args->num_relocs;
596         int num_waitchks = args->num_waitchks;
597         int num_syncpt_incrs = args->num_syncpt_incrs;
598         struct nvhost_cmdbuf __user *cmdbufs =
599                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
600         struct nvhost_reloc __user *relocs =
601                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
602         struct nvhost_reloc_shift __user *reloc_shifts =
603                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
604         struct nvhost_waitchk __user *waitchks =
605                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
606         struct nvhost_syncpt_incr __user *syncpt_incrs =
607                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
608         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
609         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
610
611         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
612         u32 *local_waitbases = NULL;
613         int err, i, hwctx_syncpt_idx = -1;
614
615         if (num_syncpt_incrs > host->info.nb_pts)
616                 return -EINVAL;
617
618         job = nvhost_job_alloc(ctx->ch,
619                         ctx->hwctx,
620                         num_cmdbufs,
621                         num_relocs,
622                         num_waitchks,
623                         num_syncpt_incrs,
624                         ctx->memmgr);
625         if (!job)
626                 return -ENOMEM;
627
628         job->num_relocs = args->num_relocs;
629         job->num_waitchk = args->num_waitchks;
630         job->num_syncpts = args->num_syncpt_incrs;
631         job->priority = ctx->priority;
632         job->clientid = ctx->clientid;
633
634         while (num_cmdbufs) {
635                 struct nvhost_cmdbuf cmdbuf;
636                 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
637                 if (err)
638                         goto fail;
639                 nvhost_job_add_gather(job,
640                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
641                 num_cmdbufs--;
642                 cmdbufs++;
643         }
644
645         err = copy_from_user(job->relocarray,
646                         relocs, sizeof(*relocs) * num_relocs);
647         if (err)
648                 goto fail;
649
650         err = copy_from_user(job->relocshiftarray,
651                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
652         if (err)
653                 goto fail;
654
655         err = copy_from_user(job->waitchk,
656                         waitchks, sizeof(*waitchks) * num_waitchks);
657         if (err)
658                 goto fail;
659
660         /* mass copy waitbases */
661         if (args->waitbases) {
662                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
663                         GFP_KERNEL);
664                 err = copy_from_user(local_waitbases, waitbases,
665                         sizeof(u32) * num_syncpt_incrs);
666                 if (err) {
667                         err = -EINVAL;
668                         goto fail;
669                 }
670         }
671
672         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
673         if (!ctx->hwctx)
674                 hwctx_syncpt_idx = 0;
675
676         /*
677          * Go through each syncpoint from userspace. Here we:
678          * - Copy syncpoint information
679          * - Validate each syncpoint
680          * - Determine waitbase for each syncpoint
681          * - Determine the index of hwctx syncpoint in the table
682          */
683
684         for (i = 0; i < num_syncpt_incrs; ++i) {
685                 u32 waitbase;
686                 struct nvhost_syncpt_incr sp;
687
688                 /* Copy */
689                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
690                 if (err)
691                         goto fail;
692
693                 /* Validate */
694                 if (sp.syncpt_id > host->info.nb_pts) {
695                         err = -EINVAL;
696                         goto fail;
697                 }
698
699                 /* Determine waitbase */
700                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
701                         waitbase = local_waitbases[i];
702                 else
703                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
704                                 sp.syncpt_id);
705
706                 /* Store */
707                 job->sp[i].id = sp.syncpt_id;
708                 job->sp[i].incrs = sp.syncpt_incrs;
709                 job->sp[i].waitbase = waitbase;
710
711                 /* Find hwctx syncpoint */
712                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
713                         hwctx_syncpt_idx = i;
714         }
715
716         /* not needed anymore */
717         kfree(local_waitbases);
718         local_waitbases = NULL;
719
720         /* Is hwctx_syncpt_idx valid? */
721         if (hwctx_syncpt_idx == -1) {
722                 err = -EINVAL;
723                 goto fail;
724         }
725
726         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
727
728         trace_nvhost_channel_submit(ctx->ch->dev->name,
729                 job->num_gathers, job->num_relocs, job->num_waitchk,
730                 job->sp[job->hwctx_syncpt_idx].id,
731                 job->sp[job->hwctx_syncpt_idx].incrs);
732
733         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
734         if (err)
735                 goto fail;
736
737         if (args->timeout)
738                 job->timeout = min(ctx->timeout, args->timeout);
739         else
740                 job->timeout = ctx->timeout;
741         job->timeout_debug_dump = ctx->timeout_debug_dump;
742
743         err = nvhost_channel_submit(job);
744         if (err)
745                 goto fail_submit;
746
747         /* Deliver multiple fences back to the userspace */
748         if (fences)
749                 for (i = 0; i < num_syncpt_incrs; ++i) {
750                         u32 fence = job->sp[i].fence;
751                         err = copy_to_user(fences, &fence, sizeof(u32));
752                         if (err)
753                                 break;
754                         fences++;
755                 }
756
757         /* Deliver the fence using the old mechanism _only_ if a single
758          * syncpoint is used. */
759
760         if (num_syncpt_incrs == 1)
761                 args->fence = job->sp[job->hwctx_syncpt_idx].fence;
762         else
763                 args->fence = 0;
764
765         nvhost_job_put(job);
766
767         return 0;
768
769 fail_submit:
770         nvhost_job_unpin(job);
771 fail:
772         nvhost_job_put(job);
773         kfree(local_waitbases);
774         return err;
775 }
776
777 static int nvhost_ioctl_channel_set_ctxswitch(
778                 struct nvhost_channel_userctx *ctx,
779                 struct nvhost_set_ctxswitch_args *args)
780 {
781         struct nvhost_cmdbuf cmdbuf_save;
782         struct nvhost_cmdbuf cmdbuf_restore;
783         struct nvhost_syncpt_incr save_incr, restore_incr;
784         u32 save_waitbase, restore_waitbase;
785         struct nvhost_reloc reloc;
786         struct nvhost_hwctx_handler *ctxhandler = NULL;
787         struct nvhost_hwctx *nhwctx = NULL;
788         struct user_hwctx *hwctx;
789         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
790         int err;
791
792         /* Only channels with context support */
793         if (!ctx->hwctx)
794                 return -EFAULT;
795
796         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
797         if (args->num_cmdbufs_save != 1
798                         || args->num_cmdbufs_restore != 1
799                         || args->num_save_incrs != 1
800                         || args->num_restore_incrs != 1
801                         || args->num_relocs != 1)
802                 return -EINVAL;
803
804         err = copy_from_user(&cmdbuf_save,
805                         (void *)(uintptr_t)args->cmdbuf_save,
806                         sizeof(cmdbuf_save));
807         if (err)
808                 goto fail;
809
810         err = copy_from_user(&cmdbuf_restore,
811                         (void *)(uintptr_t)args->cmdbuf_restore,
812                         sizeof(cmdbuf_restore));
813         if (err)
814                 goto fail;
815
816         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
817                         sizeof(reloc));
818         if (err)
819                 goto fail;
820
821         err = copy_from_user(&save_incr,
822                         (void *)(uintptr_t)args->save_incrs,
823                         sizeof(save_incr));
824         if (err)
825                 goto fail;
826         err = copy_from_user(&save_waitbase,
827                         (void *)(uintptr_t)args->save_waitbases,
828                         sizeof(save_waitbase));
829
830         err = copy_from_user(&restore_incr,
831                         (void *)(uintptr_t)args->restore_incrs,
832                         sizeof(restore_incr));
833         if (err)
834                 goto fail;
835         err = copy_from_user(&restore_waitbase,
836                         (void *)(uintptr_t)args->restore_waitbases,
837                         sizeof(restore_waitbase));
838
839         if (save_incr.syncpt_id != pdata->syncpts[0]
840                         || restore_incr.syncpt_id != pdata->syncpts[0]
841                         || save_waitbase != pdata->waitbases[0]
842                         || restore_waitbase != pdata->waitbases[0]) {
843                 err = -EINVAL;
844                 goto fail;
845         }
846         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
847                         save_waitbase, ctx->ch);
848         if (!ctxhandler) {
849                 err = -ENOMEM;
850                 goto fail;
851         }
852
853         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
854         if (!nhwctx) {
855                 err = -ENOMEM;
856                 goto fail_hwctx;
857         }
858         hwctx = to_user_hwctx(nhwctx);
859
860         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
861                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
862                         cmdbuf_restore.mem, cmdbuf_restore.offset,
863                         cmdbuf_restore.words,
864                         pdata->syncpts[0], pdata->waitbases[0],
865                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
866
867         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
868         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
869                         cmdbuf_restore.offset, cmdbuf_restore.words);
870         if (err)
871                 goto fail_set_restore;
872
873         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
874                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
875         if (err)
876                 goto fail_set_save;
877
878         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
879         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
880
881         /* Free old context */
882         ctx->hwctx->h->put(ctx->hwctx);
883         ctx->hwctx = nhwctx;
884
885         return 0;
886
887 fail_set_save:
888 fail_set_restore:
889         ctxhandler->put(&hwctx->hwctx);
890 fail_hwctx:
891         user_ctxhandler_free(ctxhandler);
892 fail:
893         return err;
894 }
895
896 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
897 static int nvhost_ioctl_channel_cycle_stats(
898         struct nvhost_channel_userctx *ctx,
899         struct nvhost_cycle_stats_args *args)
900 {
901         int ret;
902         BUG_ON(!channel_op().cycle_stats);
903         ret = channel_op().cycle_stats(ctx->hwctx, args);
904         return ret;
905 }
906 #endif
907
908 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
909         struct nvhost_read_3d_reg_args *args)
910 {
911         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
912                         args->offset, &args->value);
913 }
914
915 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
916 {
917         int i;
918         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
919
920         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
921                 if (pdata->clocks[i].moduleid == moduleid)
922                         return i;
923         }
924
925         /* Old user space is sending a random number in args. Return clock
926          * zero in these cases. */
927         return 0;
928 }
929
930 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
931         struct nvhost_clk_rate_args *arg)
932 {
933         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
934                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
935         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
936                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
937         int index = moduleid ?
938                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
939
940         return nvhost_module_set_rate(ctx->ch->dev,
941                         ctx, arg->rate, index, attr);
942 }
943
944 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
945         u32 moduleid, u32 *rate)
946 {
947         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
948
949         return nvhost_module_get_rate(ctx->ch->dev,
950                         (unsigned long *)rate, index);
951 }
952
953 static int nvhost_ioctl_channel_module_regrdwr(
954         struct nvhost_channel_userctx *ctx,
955         struct nvhost_ctrl_module_regrdwr_args *args)
956 {
957         u32 num_offsets = args->num_offsets;
958         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
959         u32 __user *values = (u32 *)(uintptr_t)args->values;
960         u32 vals[64];
961         struct platform_device *ndev;
962
963         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
964                 args->num_offsets, args->write);
965
966         /* Check that there is something to read and that block size is
967          * u32 aligned */
968         if (num_offsets == 0 || args->block_size & 3)
969                 return -EINVAL;
970
971         ndev = ctx->ch->dev;
972
973         while (num_offsets--) {
974                 int err;
975                 u32 offs;
976                 int remaining = args->block_size >> 2;
977
978                 if (get_user(offs, offsets))
979                         return -EFAULT;
980
981                 offsets++;
982                 while (remaining) {
983                         int batch = min(remaining, 64);
984                         if (args->write) {
985                                 if (copy_from_user(vals, values,
986                                                 batch * sizeof(u32)))
987                                         return -EFAULT;
988
989                                 err = nvhost_write_module_regs(ndev,
990                                         offs, batch, vals);
991                                 if (err)
992                                         return err;
993                         } else {
994                                 err = nvhost_read_module_regs(ndev,
995                                                 offs, batch, vals);
996                                 if (err)
997                                         return err;
998
999                                 if (copy_to_user(values, vals,
1000                                                 batch * sizeof(u32)))
1001                                         return -EFAULT;
1002                         }
1003
1004                         remaining -= batch;
1005                         offs += batch * sizeof(u32);
1006                         values += batch;
1007                 }
1008         }
1009
1010         return 0;
1011 }
1012
1013 static u32 create_mask(u32 *words, int num)
1014 {
1015         int i;
1016         u32 word = 0;
1017         for (i = 0; i < num && words[i] && words[i] < BITS_PER_LONG; i++)
1018                 word |= BIT(words[i]);
1019
1020         return word;
1021 }
1022
1023 static long nvhost_channelctl(struct file *filp,
1024         unsigned int cmd, unsigned long arg)
1025 {
1026         struct nvhost_channel_userctx *priv = filp->private_data;
1027         struct device *dev = &priv->ch->dev->dev;
1028         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
1029         int err = 0;
1030
1031         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
1032                 (_IOC_NR(cmd) == 0) ||
1033                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
1034                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
1035                 return -EFAULT;
1036
1037         if (_IOC_DIR(cmd) & _IOC_WRITE) {
1038                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1039                         return -EFAULT;
1040         }
1041
1042         switch (cmd) {
1043         case NVHOST_IOCTL_CHANNEL_FLUSH:
1044                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
1045                 break;
1046         case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
1047                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
1048                 break;
1049         case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
1050         {
1051                 struct nvhost_submit_hdr_ext *hdr;
1052
1053                 if (priv->hdr.num_relocs ||
1054                     priv->num_relocshifts ||
1055                     priv->hdr.num_cmdbufs ||
1056                     priv->hdr.num_waitchks) {
1057                         reset_submit(priv);
1058                         dev_err(&priv->ch->dev->dev,
1059                                 "channel submit out of sync\n");
1060                         err = -EIO;
1061                         break;
1062                 }
1063
1064                 hdr = (struct nvhost_submit_hdr_ext *)buf;
1065                 if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
1066                         dev_err(&priv->ch->dev->dev,
1067                                 "submit version %d > max supported %d\n",
1068                                 hdr->submit_version,
1069                                 NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
1070                         err = -EINVAL;
1071                         break;
1072                 }
1073                 memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
1074                 err = set_submit(priv);
1075                 trace_nvhost_ioctl_channel_submit(priv->ch->dev->name,
1076                         priv->hdr.submit_version,
1077                         priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
1078                         priv->hdr.num_waitchks,
1079                         priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
1080                 break;
1081         }
1082         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
1083         {
1084                 struct nvhost_device_data *pdata = \
1085                         platform_get_drvdata(priv->ch->dev);
1086                 ((struct nvhost_get_param_args *)buf)->value =
1087                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
1088                 break;
1089         }
1090         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
1091         {
1092                 struct nvhost_device_data *pdata = \
1093                         platform_get_drvdata(priv->ch->dev);
1094                 struct nvhost_get_param_arg *arg =
1095                         (struct nvhost_get_param_arg *)buf;
1096                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
1097                                 || !pdata->syncpts[arg->param])
1098                         return -EINVAL;
1099                 arg->value = pdata->syncpts[arg->param];
1100                 break;
1101         }
1102         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
1103         {
1104                 struct nvhost_device_data *pdata = \
1105                         platform_get_drvdata(priv->ch->dev);
1106                 ((struct nvhost_get_param_args *)buf)->value =
1107                         create_mask(pdata->waitbases,
1108                                         NVHOST_MODULE_MAX_WAITBASES);
1109                 break;
1110         }
1111         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
1112         {
1113                 struct nvhost_device_data *pdata = \
1114                         platform_get_drvdata(priv->ch->dev);
1115                 struct nvhost_get_param_arg *arg =
1116                         (struct nvhost_get_param_arg *)buf;
1117                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
1118                                 || !pdata->waitbases[arg->param])
1119                         return -EINVAL;
1120                 arg->value = pdata->waitbases[arg->param];
1121                 break;
1122         }
1123         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
1124         {
1125                 struct nvhost_device_data *pdata = \
1126                         platform_get_drvdata(priv->ch->dev);
1127                 ((struct nvhost_get_param_args *)buf)->value =
1128                         create_mask(pdata->modulemutexes,
1129                                         NVHOST_MODULE_MAX_MODMUTEXES);
1130                 break;
1131         }
1132         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
1133         {
1134                 struct nvhost_device_data *pdata = \
1135                         platform_get_drvdata(priv->ch->dev);
1136                 struct nvhost_get_param_arg *arg =
1137                         (struct nvhost_get_param_arg *)buf;
1138                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
1139                                 || !pdata->modulemutexes[arg->param])
1140                         return -EINVAL;
1141                 arg->value = pdata->modulemutexes[arg->param];
1142                 break;
1143         }
1144         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
1145         {
1146                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
1147                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
1148
1149                 if (IS_ERR(new_client)) {
1150                         err = PTR_ERR(new_client);
1151                         break;
1152                 }
1153                 if (priv->memmgr)
1154                         nvhost_memmgr_put_mgr(priv->memmgr);
1155
1156                 priv->memmgr = new_client;
1157
1158                 if (priv->hwctx)
1159                         priv->hwctx->memmgr = new_client;
1160
1161                 break;
1162         }
1163         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
1164                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
1165                 break;
1166         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX_OLD:
1167                 err = nvhost_ioctl_channel_alloc_obj_ctx_old(priv, (void *)buf);
1168                 break;
1169         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
1170                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
1171                 break;
1172         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX_OLD:
1173                 err = nvhost_ioctl_channel_free_obj_ctx_old(priv, (void *)buf);
1174                 break;
1175         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
1176                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
1177                 break;
1178         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
1179                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
1180                 break;
1181         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO_OLD:
1182                 err = nvhost_ioctl_channel_submit_gpfifo_old(priv, (void *)buf);
1183                 break;
1184         case NVHOST_IOCTL_CHANNEL_WAIT:
1185                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
1186                 break;
1187         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
1188                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
1189                 break;
1190         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND_OLD:
1191                 err = nvhost_ioctl_channel_zcull_bind_old(priv, (void *)buf);
1192                 break;
1193
1194 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
1195         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
1196                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
1197                 break;
1198 #endif
1199         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
1200                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
1201                 break;
1202         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
1203         {
1204                 struct nvhost_clk_rate_args *arg =
1205                                 (struct nvhost_clk_rate_args *)buf;
1206
1207                 err = nvhost_ioctl_channel_get_rate(priv,
1208                                 arg->moduleid, &arg->rate);
1209                 break;
1210         }
1211         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
1212         {
1213                 struct nvhost_clk_rate_args *arg =
1214                                 (struct nvhost_clk_rate_args *)buf;
1215
1216                 err = nvhost_ioctl_channel_set_rate(priv, arg);
1217                 break;
1218         }
1219         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1220                 priv->timeout =
1221                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1222                 dev_dbg(&priv->ch->dev->dev,
1223                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1224                         __func__, priv->timeout, priv);
1225                 break;
1226         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1227                 ((struct nvhost_get_param_args *)buf)->value =
1228                                 priv->hwctx->has_timedout;
1229                 break;
1230         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1231                 priv->priority =
1232                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
1233                 break;
1234         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1235                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1236                 break;
1237         case NVHOST_IOCTL_CHANNEL_SUBMIT:
1238                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1239                 break;
1240         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1241                 priv->timeout = (u32)
1242                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
1243                 priv->timeout_debug_dump = !((u32)
1244                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1245                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1246                 dev_dbg(&priv->ch->dev->dev,
1247                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1248                         __func__, priv->timeout, priv);
1249                 break;
1250         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
1251                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
1252                 break;
1253         default:
1254                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
1255                 err = -ENOTTY;
1256                 break;
1257         }
1258
1259         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1260                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1261
1262         return err;
1263 }
1264
1265 static const struct file_operations nvhost_channelops = {
1266         .owner = THIS_MODULE,
1267         .release = nvhost_channelrelease,
1268         .open = nvhost_channelopen,
1269         .write = nvhost_channelwrite,
1270         .unlocked_ioctl = nvhost_channelctl
1271 };
1272
1273 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
1274 {
1275         struct nvhost_channel_userctx *userctx;
1276         struct file *f = fget(fd);
1277         if (!f)
1278                 return 0;
1279
1280         if (f->f_op != &nvhost_channelops) {
1281                 fput(f);
1282                 return 0;
1283         }
1284
1285         userctx = (struct nvhost_channel_userctx *)f->private_data;
1286         fput(f);
1287         return userctx->hwctx;
1288 }
1289
1290
1291 static const struct file_operations nvhost_asops = {
1292         .owner = THIS_MODULE,
1293         .release = nvhost_as_dev_release,
1294         .open = nvhost_as_dev_open,
1295         .unlocked_ioctl = nvhost_as_dev_ctl,
1296 };
1297
1298 static struct {
1299         int class_id;
1300         const char *dev_name;
1301 } class_id_dev_name_map[] = {
1302         /*      { NV_HOST1X_CLASS_ID, ""}, */
1303         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1304         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1305         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1306         { NV_GRAPHICS_GPU_CLASS_ID, "gpu"},
1307         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1308         { NV_TSEC_CLASS_ID, "tsec" },
1309 };
1310
1311 static struct {
1312         int module_id;
1313         const char *dev_name;
1314 } module_id_dev_name_map[] = {
1315         { NVHOST_MODULE_VI, "vi"},
1316         { NVHOST_MODULE_ISP, "isp"},
1317         { NVHOST_MODULE_MPE, "mpe"},
1318         { NVHOST_MODULE_MSENC, "msenc"},
1319         { NVHOST_MODULE_TSEC, "tsec"},
1320         { NVHOST_MODULE_GPU, "gpu"},
1321         { NVHOST_MODULE_VIC, "vic"},
1322 };
1323
1324 static const char *get_device_name_for_dev(struct platform_device *dev)
1325 {
1326         int i;
1327         /* first choice is to use the class id if specified */
1328         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1329                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1330                 if (pdata->class == class_id_dev_name_map[i].class_id)
1331                         return class_id_dev_name_map[i].dev_name;
1332         }
1333
1334         /* second choice is module name if specified */
1335         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1336                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1337                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1338                         return module_id_dev_name_map[i].dev_name;
1339         }
1340
1341         /* last choice is to just use the given dev name */
1342         return dev->name;
1343 }
1344
1345 static struct device *nvhost_client_device_create(
1346         struct platform_device *pdev, struct cdev *cdev,
1347         const char *cdev_name, int devno,
1348         const struct file_operations *ops)
1349 {
1350         struct nvhost_master *host = nvhost_get_host(pdev);
1351         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1352         const char *use_dev_name;
1353         struct device *dev;
1354         int err;
1355
1356         nvhost_dbg_fn("");
1357
1358         BUG_ON(!host);
1359
1360         cdev_init(cdev, ops);
1361         cdev->owner = THIS_MODULE;
1362
1363         err = cdev_add(cdev, devno, 1);
1364         if (err < 0) {
1365                 dev_err(&pdev->dev,
1366                         "failed to add chan %i cdev\n", pdata->index);
1367                 return NULL;
1368         }
1369         use_dev_name = get_device_name_for_dev(pdev);
1370
1371         dev = device_create(host->nvhost_class,
1372                         NULL, devno, NULL,
1373                         (pdev->id <= 0) ?
1374                         IFACE_NAME "-%s%s" :
1375                         IFACE_NAME "-%s%s.%d",
1376                         cdev_name, use_dev_name, pdev->id);
1377
1378         if (IS_ERR(dev)) {
1379                 err = PTR_ERR(dev);
1380                 dev_err(&pdev->dev,
1381                         "failed to create %s %s device for %s\n",
1382                         use_dev_name, cdev_name, pdev->name);
1383                 return NULL;
1384         }
1385
1386         return dev;
1387 }
1388
1389 int nvhost_client_user_init(struct platform_device *dev)
1390 {
1391         int err, devno;
1392         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1393         struct nvhost_channel *ch = pdata->channel;
1394
1395         BUG_ON(!ch);
1396         // reserve 3 minor #s for <dev> and as-<dev> and ctrl-<dev>
1397
1398         err = alloc_chrdev_region(&devno, 0, 3, IFACE_NAME);
1399         if (err < 0) {
1400                 dev_err(&dev->dev, "failed to allocate devno\n");
1401                 goto fail;
1402         }
1403
1404         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1405                                 "", devno, &nvhost_channelops);
1406         if (ch->node == NULL)
1407                 goto fail;
1408         ++devno;
1409         ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1410                                 "as-", devno, &nvhost_asops);
1411         if (ch->as_node == NULL)
1412                 goto fail;
1413
1414         if (pdata->ctrl_ops) {
1415                 ++devno;
1416                 pdata->ctrl_node = nvhost_client_device_create(dev,
1417                                         &pdata->ctrl_cdev, "ctrl-",
1418                                         devno, pdata->ctrl_ops);
1419                 if (pdata->ctrl_node == NULL)
1420                         goto fail;
1421         }
1422
1423         return 0;
1424 fail:
1425         return err;
1426 }
1427
1428 int nvhost_client_device_init(struct platform_device *dev)
1429 {
1430         int err;
1431         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1432         struct nvhost_channel *ch;
1433         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1434
1435         ch = nvhost_alloc_channel(dev);
1436         if (ch == NULL)
1437                 return -ENODEV;
1438
1439         /* store the pointer to this device for channel */
1440         ch->dev = dev;
1441
1442         /* Create debugfs directory for the device */
1443         nvhost_device_debug_init(dev);
1444
1445         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1446         if (err)
1447                 goto fail;
1448
1449         err = nvhost_client_user_init(dev);
1450         if (err)
1451                 goto fail;
1452
1453         if (tickctrl_op().init_channel)
1454                 tickctrl_op().init_channel(dev);
1455
1456         err = nvhost_device_list_add(dev);
1457         if (err)
1458                 goto fail;
1459
1460         if (pdata->scaling_init)
1461                 pdata->scaling_init(dev);
1462
1463         /* reset syncpoint values for this unit */
1464         nvhost_module_busy(nvhost_master->dev);
1465         nvhost_syncpt_reset_client(dev);
1466         nvhost_module_idle(nvhost_master->dev);
1467
1468         /* Initialize dma parameters */
1469         dev->dev.dma_parms = &pdata->dma_parms;
1470         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1471
1472         dev_info(&dev->dev, "initialized\n");
1473
1474         if (pdata->slave) {
1475                 pdata->slave->dev.parent = dev->dev.parent;
1476                 platform_device_register(pdata->slave);
1477         }
1478
1479         return 0;
1480
1481 fail:
1482         /* Add clean-up */
1483         nvhost_free_channel(ch);
1484         return err;
1485 }
1486 EXPORT_SYMBOL(nvhost_client_device_init);
1487
1488 int nvhost_client_device_release(struct platform_device *dev)
1489 {
1490         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1491         struct nvhost_channel *ch;
1492         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1493
1494         ch = pdata->channel;
1495
1496         /* Release nvhost module resources */
1497         nvhost_module_deinit(dev);
1498
1499         /* Remove from nvhost device list */
1500         nvhost_device_list_remove(dev);
1501
1502         /* Release chardev and device node for user space */
1503         device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1504         cdev_del(&ch->cdev);
1505
1506         /* Free nvhost channel */
1507         nvhost_free_channel(ch);
1508
1509         return 0;
1510 }
1511 EXPORT_SYMBOL(nvhost_client_device_release);
1512
1513 int nvhost_client_device_suspend(struct device *dev)
1514 {
1515         int ret = 0;
1516         struct nvhost_device_data *pdata = dev_get_drvdata(dev);
1517
1518         ret = nvhost_channel_suspend(pdata->channel);
1519         if (ret)
1520                 return ret;
1521
1522         dev_info(dev, "suspend status: %d\n", ret);
1523
1524         return ret;
1525 }
1526
1527 int nvhost_client_device_resume(struct device *dev)
1528 {
1529         dev_info(dev, "resuming\n");
1530         return 0;
1531 }
1532
1533 int nvhost_client_device_get_resources(struct platform_device *dev)
1534 {
1535         int i;
1536         void __iomem *regs = NULL;
1537         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1538
1539         for (i = 0; i < dev->num_resources; i++) {
1540                 struct resource *r = NULL;
1541
1542                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1543                 /* We've run out of mem resources */
1544                 if (!r)
1545                         break;
1546
1547                 regs = devm_request_and_ioremap(&dev->dev, r);
1548                 if (!regs)
1549                         goto fail;
1550
1551                 pdata->aperture[i] = regs;
1552         }
1553
1554         return 0;
1555
1556 fail:
1557         dev_err(&dev->dev, "failed to get register memory\n");
1558
1559         return -ENXIO;
1560 }
1561 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1562
1563 /* This is a simple wrapper around request_firmware that takes
1564  * 'fw_name' and if available applies a SOC relative path prefix to it.
1565  * The caller is responsible for calling release_firmware later.
1566  */
1567 const struct firmware *
1568 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1569 {
1570         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1571         const struct firmware *fw;
1572         char *fw_path = NULL;
1573         int path_len, err;
1574
1575         /* This field is NULL when calling from SYS_EXIT.
1576            Add a check here to prevent crash in request_firmware */
1577         if (!current->fs) {
1578                 BUG();
1579                 return NULL;
1580         }
1581
1582         if (!fw_name)
1583                 return NULL;
1584
1585         if (op->soc_name) {
1586                 path_len = strlen(fw_name) + strlen(op->soc_name);
1587                 path_len += 2; /* for the path separator and zero terminator*/
1588
1589                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1590                                      GFP_KERNEL);
1591                 if (!fw_path)
1592                         return NULL;
1593
1594                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1595                 fw_name = fw_path;
1596         }
1597
1598         err = request_firmware(&fw, fw_name, &dev->dev);
1599         kfree(fw_path);
1600         if (err) {
1601                 dev_err(&dev->dev, "failed to get firmware\n");
1602                 return NULL;
1603         }
1604
1605         /* note: caller must release_firmware */
1606         return fw;
1607 }