video: tegra: host: Fix checks for linsim to !silicon
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * drivers/video/tegra/host/bus_client.c
3  *
4  * Tegra Graphics Host Client Module
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/spinlock.h>
24 #include <linux/fs.h>
25 #include <linux/cdev.h>
26 #include <linux/uaccess.h>
27 #include <linux/file.h>
28 #include <linux/clk.h>
29 #include <linux/hrtimer.h>
30 #include <linux/export.h>
31 #include <linux/firmware.h>
32
33 #include <trace/events/nvhost.h>
34
35 #include <linux/io.h>
36 #include <linux/string.h>
37
38 #include <linux/nvhost.h>
39 #include <linux/nvhost_ioctl.h>
40
41 #include <mach/gpufuse.h>
42 #include <mach/hardware.h>
43
44 #include "debug.h"
45 #include "bus_client.h"
46 #include "dev.h"
47 #include "class_ids.h"
48 #include "nvhost_as.h"
49 #include "nvhost_memmgr.h"
50 #include "chip_support.h"
51 #include "nvhost_acm.h"
52
53 #include "nvhost_syncpt.h"
54 #include "nvhost_channel.h"
55 #include "nvhost_job.h"
56 #include "nvhost_hwctx.h"
57 #include "user_hwctx.h"
58
59 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
60 {
61         struct resource *r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
62         int err = 0;
63
64         if (offset + 4 * count > resource_size(r)
65                         || (offset + 4 * count < offset))
66                 err = -EPERM;
67
68         return err;
69 }
70
71 int nvhost_read_module_regs(struct platform_device *ndev,
72                         u32 offset, int count, u32 *values)
73 {
74         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
75         void __iomem *p = pdata->aperture[0] + offset;
76         int err;
77
78         if (!pdata->aperture[0])
79                 return -ENODEV;
80
81         /* verify offset */
82         err = validate_reg(ndev, offset, count);
83         if (err)
84                 return err;
85
86         nvhost_module_busy(ndev);
87         while (count--) {
88                 *(values++) = readl(p);
89                 p += 4;
90         }
91         rmb();
92         nvhost_module_idle(ndev);
93
94         return 0;
95 }
96
97 int nvhost_write_module_regs(struct platform_device *ndev,
98                         u32 offset, int count, const u32 *values)
99 {
100         void __iomem *p;
101         int err;
102         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
103
104         if (!pdata->aperture[0])
105                 return -ENODEV;
106
107         p = pdata->aperture[0] + offset;
108
109         /* verify offset */
110         err = validate_reg(ndev, offset, count);
111         if (err)
112                 return err;
113
114         nvhost_module_busy(ndev);
115         while (count--) {
116                 writel(*(values++), p);
117                 p += 4;
118         }
119         wmb();
120         nvhost_module_idle(ndev);
121
122         return 0;
123 }
124
125 struct nvhost_channel_userctx {
126         struct nvhost_channel *ch;
127         struct nvhost_hwctx *hwctx;
128         struct nvhost_submit_hdr_ext hdr;
129         int num_relocshifts;
130         struct nvhost_job *job;
131         struct mem_mgr *memmgr;
132         u32 timeout;
133         u32 priority;
134         int clientid;
135         bool timeout_debug_dump;
136 };
137
138 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
139 {
140         struct nvhost_channel_userctx *priv = filp->private_data;
141
142         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
143
144         filp->private_data = NULL;
145
146         nvhost_module_remove_client(priv->ch->dev, priv);
147
148         if (priv->hwctx)
149                 priv->hwctx->h->put(priv->hwctx);
150
151         if (priv->job)
152                 nvhost_job_put(priv->job);
153
154         nvhost_putchannel(priv->ch, priv->hwctx);
155
156         nvhost_memmgr_put_mgr(priv->memmgr);
157         kfree(priv);
158         return 0;
159 }
160
161 static int nvhost_channelopen(struct inode *inode, struct file *filp)
162 {
163         struct nvhost_channel_userctx *priv;
164         struct nvhost_channel *ch;
165
166         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
167         ch = nvhost_getchannel(ch);
168         if (!ch)
169                 return -ENOMEM;
170         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
171
172         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
173         if (!priv) {
174                 nvhost_putchannel(ch, NULL);
175                 return -ENOMEM;
176         }
177         filp->private_data = priv;
178         priv->ch = ch;
179         if(nvhost_module_add_client(ch->dev, priv))
180                 goto fail;
181
182         if (ch->ctxhandler && ch->ctxhandler->alloc) {
183                 nvhost_module_busy(ch->dev);
184                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
185                 nvhost_module_idle(ch->dev);
186                 if (!priv->hwctx)
187                         goto fail;
188         }
189         priv->priority = NVHOST_PRIORITY_MEDIUM;
190         priv->clientid = atomic_add_return(1,
191                         &nvhost_get_host(ch->dev)->clientid);
192         priv->timeout = CONFIG_TEGRA_GRHOST_DEFAULT_TIMEOUT;
193         priv->timeout_debug_dump = true;
194         if (!tegra_platform_is_silicon())
195                 priv->timeout = 0;
196
197         return 0;
198 fail:
199         nvhost_channelrelease(inode, filp);
200         return -ENOMEM;
201 }
202
203 static int set_submit(struct nvhost_channel_userctx *ctx)
204 {
205         struct platform_device *ndev = ctx->ch->dev;
206         struct nvhost_master *host = nvhost_get_host(ndev);
207
208         /* submit should have at least 1 cmdbuf */
209         if (!ctx->hdr.num_cmdbufs ||
210                         !nvhost_syncpt_is_valid(&host->syncpt,
211                                 ctx->hdr.syncpt_id))
212                 return -EIO;
213
214         if (!ctx->memmgr) {
215                 dev_err(&ndev->dev, "no nvmap context set\n");
216                 return -EFAULT;
217         }
218
219         if (ctx->job) {
220                 dev_warn(&ndev->dev, "performing channel submit when a job already exists\n");
221                 nvhost_job_put(ctx->job);
222         }
223         ctx->job = nvhost_job_alloc(ctx->ch,
224                         ctx->hwctx,
225                         ctx->hdr.num_cmdbufs,
226                         ctx->hdr.num_relocs,
227                         ctx->hdr.num_waitchks,
228                         1,
229                         ctx->memmgr);
230         if (!ctx->job)
231                 return -ENOMEM;
232         ctx->job->timeout = ctx->timeout;
233         ctx->job->sp->id = ctx->hdr.syncpt_id;
234         ctx->job->sp->incrs = ctx->hdr.syncpt_incrs;
235         ctx->job->hwctx_syncpt_idx = 0;
236         ctx->job->num_syncpts = 1;
237         ctx->job->priority = ctx->priority;
238         ctx->job->clientid = ctx->clientid;
239         ctx->job->timeout_debug_dump = ctx->timeout_debug_dump;
240
241         if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
242                 ctx->num_relocshifts = ctx->hdr.num_relocs;
243
244         return 0;
245 }
246
247 static void reset_submit(struct nvhost_channel_userctx *ctx)
248 {
249         ctx->hdr.num_cmdbufs = 0;
250         ctx->hdr.num_relocs = 0;
251         ctx->num_relocshifts = 0;
252         ctx->hdr.num_waitchks = 0;
253
254         if (ctx->job) {
255                 nvhost_job_put(ctx->job);
256                 ctx->job = NULL;
257         }
258 }
259
260 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
261                                 size_t count, loff_t *offp)
262 {
263         struct nvhost_channel_userctx *priv = filp->private_data;
264         size_t remaining = count;
265         int err = 0;
266         struct nvhost_job *job = priv->job;
267         struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
268         const char *chname = priv->ch->dev->name;
269
270         if (!job)
271                 return -EIO;
272
273         while (remaining) {
274                 size_t consumed;
275                 if (!hdr->num_relocs &&
276                     !priv->num_relocshifts &&
277                     !hdr->num_cmdbufs &&
278                     !hdr->num_waitchks) {
279                         consumed = sizeof(struct nvhost_submit_hdr);
280                         if (remaining < consumed)
281                                 break;
282                         if (copy_from_user(hdr, buf, consumed)) {
283                                 err = -EFAULT;
284                                 break;
285                         }
286                         hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
287                         err = set_submit(priv);
288                         if (err)
289                                 break;
290                         trace_nvhost_channel_write_submit(chname,
291                           count, hdr->num_cmdbufs, hdr->num_relocs,
292                           hdr->syncpt_id, hdr->syncpt_incrs);
293                 } else if (hdr->num_cmdbufs) {
294                         struct nvhost_cmdbuf cmdbuf;
295                         consumed = sizeof(cmdbuf);
296                         if (remaining < consumed)
297                                 break;
298                         if (copy_from_user(&cmdbuf, buf, consumed)) {
299                                 err = -EFAULT;
300                                 break;
301                         }
302                         trace_nvhost_channel_write_cmdbuf(chname,
303                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
304                         nvhost_job_add_gather(job,
305                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
306                         hdr->num_cmdbufs--;
307                 } else if (hdr->num_relocs) {
308                         int numrelocs = remaining / sizeof(struct nvhost_reloc);
309                         if (!numrelocs)
310                                 break;
311                         numrelocs = min_t(int, numrelocs, priv->hdr.num_relocs);
312                         consumed = numrelocs * sizeof(struct nvhost_reloc);
313                         if (copy_from_user(&job->relocarray[job->num_relocs],
314                                         buf, consumed)) {
315                                 err = -EFAULT;
316                                 break;
317                         }
318                         while (numrelocs) {
319                                 struct nvhost_reloc *reloc =
320                                         &job->relocarray[job->num_relocs];
321                                 trace_nvhost_channel_write_reloc(chname,
322                                         reloc->cmdbuf_mem,
323                                         reloc->cmdbuf_offset,
324                                         reloc->target,
325                                         reloc->target_offset);
326                                 job->num_relocs++;
327                                 hdr->num_relocs--;
328                                 numrelocs--;
329                         }
330                 } else if (hdr->num_waitchks) {
331                         int numwaitchks =
332                                 (remaining / sizeof(struct nvhost_waitchk));
333                         if (!numwaitchks)
334                                 break;
335                         numwaitchks = min_t(int,
336                                 numwaitchks, hdr->num_waitchks);
337                         consumed = numwaitchks * sizeof(struct nvhost_waitchk);
338                         if (copy_from_user(&job->waitchk[job->num_waitchk],
339                                         buf, consumed)) {
340                                 err = -EFAULT;
341                                 break;
342                         }
343                         trace_nvhost_channel_write_waitchks(
344                           chname, numwaitchks);
345                         job->num_waitchk += numwaitchks;
346                         hdr->num_waitchks -= numwaitchks;
347                 } else if (priv->num_relocshifts) {
348                         int next_shift =
349                                 job->num_relocs - priv->num_relocshifts;
350                         int num =
351                                 (remaining / sizeof(struct nvhost_reloc_shift));
352                         if (!num)
353                                 break;
354                         num = min_t(int, num, priv->num_relocshifts);
355                         consumed = num * sizeof(struct nvhost_reloc_shift);
356                         if (copy_from_user(&job->relocshiftarray[next_shift],
357                                         buf, consumed)) {
358                                 err = -EFAULT;
359                                 break;
360                         }
361                         priv->num_relocshifts -= num;
362                 } else {
363                         err = -EFAULT;
364                         break;
365                 }
366                 remaining -= consumed;
367                 buf += consumed;
368         }
369
370         if (err < 0) {
371                 dev_err(&priv->ch->dev->dev, "channel write error\n");
372                 reset_submit(priv);
373                 return err;
374         }
375
376         return count - remaining;
377 }
378
379 static int nvhost_ioctl_channel_flush(
380         struct nvhost_channel_userctx *ctx,
381         struct nvhost_get_param_args *args,
382         int null_kickoff)
383 {
384         struct platform_device *ndev = to_platform_device(&ctx->ch->dev->dev);
385         int err;
386
387         trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name);
388
389         if (!ctx->job ||
390             ctx->hdr.num_relocs ||
391             ctx->hdr.num_cmdbufs ||
392             ctx->hdr.num_waitchks) {
393                 reset_submit(ctx);
394                 dev_err(&ndev->dev, "channel submit out of sync\n");
395                 return -EFAULT;
396         }
397
398         err = nvhost_job_pin(ctx->job, &nvhost_get_host(ndev)->syncpt);
399         if (err) {
400                 dev_warn(&ndev->dev, "nvhost_job_pin failed: %d\n", err);
401                 goto fail;
402         }
403
404         if (nvhost_debug_null_kickoff_pid == current->tgid)
405                 null_kickoff = 1;
406         ctx->job->null_kickoff = null_kickoff;
407
408         if ((nvhost_debug_force_timeout_pid == current->tgid) &&
409             (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
410                 ctx->timeout = nvhost_debug_force_timeout_val;
411         }
412
413         /* context switch if needed, and submit user's gathers to the channel */
414         err = nvhost_channel_submit(ctx->job);
415         args->value = ctx->job->sp->fence;
416
417 fail:
418         if (err)
419                 nvhost_job_unpin(ctx->job);
420
421         nvhost_job_put(ctx->job);
422         ctx->job = NULL;
423
424         return err;
425 }
426
427 static int nvhost_ioctl_channel_alloc_obj_ctx(
428         struct nvhost_channel_userctx *ctx,
429         struct nvhost_alloc_obj_ctx_args *args)
430 {
431         int ret;
432
433         BUG_ON(!channel_op().alloc_obj);
434         nvhost_module_busy(ctx->ch->dev);
435         ret = channel_op().alloc_obj(ctx->hwctx, args);
436         nvhost_module_idle(ctx->ch->dev);
437         return ret;
438 }
439
440 static int nvhost_ioctl_channel_alloc_obj_ctx_old(
441         struct nvhost_channel_userctx *ctx,
442         struct nvhost_alloc_obj_ctx_old_args *args)
443 {
444         struct nvhost_alloc_obj_ctx_args new_args;
445         int err;
446
447         new_args.class_num = args->class_num;
448         err = nvhost_ioctl_channel_alloc_obj_ctx(ctx, &new_args);
449         if (!err)
450                 args->obj_id = new_args.obj_id;
451         return err;
452 }
453
454 static int nvhost_ioctl_channel_free_obj_ctx(
455         struct nvhost_channel_userctx *ctx,
456         struct nvhost_free_obj_ctx_args *args)
457 {
458         int ret;
459
460         BUG_ON(!channel_op().free_obj);
461         nvhost_module_busy(ctx->ch->dev);
462         ret = channel_op().free_obj(ctx->hwctx, args);
463         nvhost_module_idle(ctx->ch->dev);
464         return ret;
465 }
466
467 static int nvhost_ioctl_channel_free_obj_ctx_old(
468         struct nvhost_channel_userctx *ctx,
469         struct nvhost_free_obj_ctx_old_args *args)
470 {
471         struct nvhost_free_obj_ctx_args new_args;
472         new_args.obj_id = args->obj_id;
473         return nvhost_ioctl_channel_free_obj_ctx(ctx, &new_args);
474 }
475
476 static int nvhost_ioctl_channel_alloc_gpfifo(
477         struct nvhost_channel_userctx *ctx,
478         struct nvhost_alloc_gpfifo_args *args)
479 {
480         int ret;
481
482         BUG_ON(!channel_op().alloc_gpfifo);
483         nvhost_module_busy(ctx->ch->dev);
484         ret = channel_op().alloc_gpfifo(ctx->hwctx, args);
485         nvhost_module_idle(ctx->ch->dev);
486         return ret;
487 }
488
489 static int nvhost_ioctl_channel_submit_gpfifo(
490         struct nvhost_channel_userctx *ctx,
491         struct nvhost_submit_gpfifo_args *args)
492 {
493         void *gpfifo;
494         u32 size;
495         int ret = 0;
496
497         size = args->num_entries * sizeof(struct nvhost_gpfifo);
498
499         gpfifo = kzalloc(size, GFP_KERNEL);
500         if (IS_ERR_OR_NULL(gpfifo))
501                 return -ENOMEM;
502
503         if (copy_from_user(gpfifo,
504                            (void __user *)(uintptr_t)args->gpfifo, size)) {
505                 ret = -EINVAL;
506                 goto clean_up;
507         }
508
509         BUG_ON(!channel_op().submit_gpfifo);
510
511         nvhost_module_busy(ctx->ch->dev);
512         ret = channel_op().submit_gpfifo(ctx->hwctx, gpfifo,
513                         args->num_entries, &args->fence, args->flags);
514         nvhost_module_idle(ctx->ch->dev);
515 clean_up:
516         kfree(gpfifo);
517         return ret;
518 }
519
520 static int nvhost_ioctl_channel_submit_gpfifo_old(
521         struct nvhost_channel_userctx *ctx,
522         struct nvhost_submit_gpfifo_old_args *args)
523 {
524         int ret;
525         struct nvhost_submit_gpfifo_args new_args;
526
527         new_args.gpfifo = (u64)(uintptr_t)args->gpfifo;
528         new_args.num_entries = args->num_entries;
529         new_args.flags = args->flags;
530         ret = nvhost_ioctl_channel_submit_gpfifo(ctx, &new_args);
531         if (!ret)
532                 args->fence = new_args.fence;
533         return ret;
534 }
535
536 static int nvhost_ioctl_channel_wait(
537         struct nvhost_channel_userctx *ctx,
538         struct nvhost_wait_args *args)
539 {
540         int ret;
541
542         BUG_ON(!channel_op().wait);
543         nvhost_module_busy(ctx->ch->dev);
544         ret = channel_op().wait(ctx->hwctx, args);
545         nvhost_module_idle(ctx->ch->dev);
546         return ret;
547 }
548
549 static int nvhost_ioctl_channel_zcull_bind(
550         struct nvhost_channel_userctx *ctx,
551         struct nvhost_zcull_bind_args *args)
552 {
553         int ret;
554
555         BUG_ON(!channel_zcull_op().bind);
556         nvhost_module_busy(ctx->ch->dev);
557         ret = channel_zcull_op().bind(ctx->hwctx, args);
558         nvhost_module_idle(ctx->ch->dev);
559         return ret;
560 }
561
562 static int nvhost_ioctl_channel_zcull_bind_old(
563         struct nvhost_channel_userctx *ctx,
564         struct nvhost_zcull_bind_old_args *args)
565 {
566         struct nvhost_zcull_bind_args new_args;
567
568         new_args.gpu_va = args->gpu_va;
569         new_args.mode = args->mode;
570         return nvhost_ioctl_channel_zcull_bind(ctx, &new_args);
571 }
572
573 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
574                 struct nvhost_submit_args *args)
575 {
576         struct nvhost_job *job;
577         int num_cmdbufs = args->num_cmdbufs;
578         int num_relocs = args->num_relocs;
579         int num_waitchks = args->num_waitchks;
580         int num_syncpt_incrs = args->num_syncpt_incrs;
581         struct nvhost_cmdbuf __user *cmdbufs =
582                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
583         struct nvhost_reloc __user *relocs =
584                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
585         struct nvhost_reloc_shift __user *reloc_shifts =
586                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
587         struct nvhost_waitchk __user *waitchks =
588                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
589         struct nvhost_syncpt_incr __user *syncpt_incrs =
590                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
591         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
592         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
593
594         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
595         u32 *local_waitbases = NULL;
596         int err, i, hwctx_syncpt_idx = -1;
597
598         if (num_syncpt_incrs > host->info.nb_pts)
599                 return -EINVAL;
600
601         job = nvhost_job_alloc(ctx->ch,
602                         ctx->hwctx,
603                         num_cmdbufs,
604                         num_relocs,
605                         num_waitchks,
606                         num_syncpt_incrs,
607                         ctx->memmgr);
608         if (!job)
609                 return -ENOMEM;
610
611         job->num_relocs = args->num_relocs;
612         job->num_waitchk = args->num_waitchks;
613         job->num_syncpts = args->num_syncpt_incrs;
614         job->priority = ctx->priority;
615         job->clientid = ctx->clientid;
616
617         while (num_cmdbufs) {
618                 struct nvhost_cmdbuf cmdbuf;
619                 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
620                 if (err)
621                         goto fail;
622                 nvhost_job_add_gather(job,
623                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
624                 num_cmdbufs--;
625                 cmdbufs++;
626         }
627
628         err = copy_from_user(job->relocarray,
629                         relocs, sizeof(*relocs) * num_relocs);
630         if (err)
631                 goto fail;
632
633         err = copy_from_user(job->relocshiftarray,
634                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
635         if (err)
636                 goto fail;
637
638         err = copy_from_user(job->waitchk,
639                         waitchks, sizeof(*waitchks) * num_waitchks);
640         if (err)
641                 goto fail;
642
643         /* mass copy waitbases */
644         if (args->waitbases) {
645                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
646                         GFP_KERNEL);
647                 err = copy_from_user(local_waitbases, waitbases,
648                         sizeof(u32) * num_syncpt_incrs);
649                 if (err) {
650                         err = -EINVAL;
651                         goto fail;
652                 }
653         }
654
655         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
656         if (!ctx->hwctx)
657                 hwctx_syncpt_idx = 0;
658
659         /*
660          * Go through each syncpoint from userspace. Here we:
661          * - Copy syncpoint information
662          * - Validate each syncpoint
663          * - Determine waitbase for each syncpoint
664          * - Determine the index of hwctx syncpoint in the table
665          */
666
667         for (i = 0; i < num_syncpt_incrs; ++i) {
668                 u32 waitbase;
669                 struct nvhost_syncpt_incr sp;
670
671                 /* Copy */
672                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
673                 if (err)
674                         goto fail;
675
676                 /* Validate */
677                 if (sp.syncpt_id > host->info.nb_pts) {
678                         err = -EINVAL;
679                         goto fail;
680                 }
681
682                 /* Determine waitbase */
683                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
684                         waitbase = local_waitbases[i];
685                 else
686                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
687                                 sp.syncpt_id);
688
689                 /* Store */
690                 job->sp[i].id = sp.syncpt_id;
691                 job->sp[i].incrs = sp.syncpt_incrs;
692                 job->sp[i].waitbase = waitbase;
693
694                 /* Find hwctx syncpoint */
695                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
696                         hwctx_syncpt_idx = i;
697         }
698
699         /* not needed anymore */
700         kfree(local_waitbases);
701         local_waitbases = NULL;
702
703         /* Is hwctx_syncpt_idx valid? */
704         if (hwctx_syncpt_idx == -1) {
705                 err = -EINVAL;
706                 goto fail;
707         }
708
709         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
710
711         trace_nvhost_channel_submit(ctx->ch->dev->name,
712                 job->num_gathers, job->num_relocs, job->num_waitchk,
713                 job->sp[job->hwctx_syncpt_idx].id,
714                 job->sp[job->hwctx_syncpt_idx].incrs);
715
716         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
717         if (err)
718                 goto fail;
719
720         if (args->timeout)
721                 job->timeout = min(ctx->timeout, args->timeout);
722         else
723                 job->timeout = ctx->timeout;
724         job->timeout_debug_dump = ctx->timeout_debug_dump;
725
726         err = nvhost_channel_submit(job);
727         if (err)
728                 goto fail_submit;
729
730         /* Deliver multiple fences back to the userspace */
731         if (fences)
732                 for (i = 0; i < num_syncpt_incrs; ++i) {
733                         u32 fence = job->sp[i].fence;
734                         err = copy_to_user(fences, &fence, sizeof(u32));
735                         if (err)
736                                 break;
737                         fences++;
738                 }
739
740         args->fence = job->sp[job->hwctx_syncpt_idx].fence;
741
742         nvhost_job_put(job);
743
744         return 0;
745
746 fail_submit:
747         nvhost_job_unpin(job);
748 fail:
749         nvhost_job_put(job);
750         kfree(local_waitbases);
751         return err;
752 }
753
754 static int nvhost_ioctl_channel_set_ctxswitch(
755                 struct nvhost_channel_userctx *ctx,
756                 struct nvhost_set_ctxswitch_args *args)
757 {
758         struct nvhost_cmdbuf cmdbuf_save;
759         struct nvhost_cmdbuf cmdbuf_restore;
760         struct nvhost_syncpt_incr save_incr, restore_incr;
761         u32 save_waitbase, restore_waitbase;
762         struct nvhost_reloc reloc;
763         struct nvhost_hwctx_handler *ctxhandler = NULL;
764         struct nvhost_hwctx *nhwctx = NULL;
765         struct user_hwctx *hwctx;
766         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
767         int err;
768
769         /* Only channels with context support */
770         if (!ctx->hwctx)
771                 return -EFAULT;
772
773         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
774         if (args->num_cmdbufs_save != 1
775                         || args->num_cmdbufs_restore != 1
776                         || args->num_save_incrs != 1
777                         || args->num_restore_incrs != 1
778                         || args->num_relocs != 1)
779                 return -EINVAL;
780
781         err = copy_from_user(&cmdbuf_save,
782                         (void *)(uintptr_t)args->cmdbuf_save,
783                         sizeof(cmdbuf_save));
784         if (err)
785                 goto fail;
786
787         err = copy_from_user(&cmdbuf_restore,
788                         (void *)(uintptr_t)args->cmdbuf_restore,
789                         sizeof(cmdbuf_restore));
790         if (err)
791                 goto fail;
792
793         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
794                         sizeof(reloc));
795         if (err)
796                 goto fail;
797
798         err = copy_from_user(&save_incr,
799                         (void *)(uintptr_t)args->save_incrs,
800                         sizeof(save_incr));
801         if (err)
802                 goto fail;
803         err = copy_from_user(&save_waitbase,
804                         (void *)(uintptr_t)args->save_waitbases,
805                         sizeof(save_waitbase));
806
807         err = copy_from_user(&restore_incr,
808                         (void *)(uintptr_t)args->restore_incrs,
809                         sizeof(restore_incr));
810         if (err)
811                 goto fail;
812         err = copy_from_user(&restore_waitbase,
813                         (void *)(uintptr_t)args->restore_waitbases,
814                         sizeof(restore_waitbase));
815
816         if (save_incr.syncpt_id != pdata->syncpts[0]
817                         || restore_incr.syncpt_id != pdata->syncpts[0]
818                         || save_waitbase != pdata->waitbases[0]
819                         || restore_waitbase != pdata->waitbases[0]) {
820                 err = -EINVAL;
821                 goto fail;
822         }
823         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
824                         save_waitbase, ctx->ch);
825         if (!ctxhandler) {
826                 err = -ENOMEM;
827                 goto fail;
828         }
829
830         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
831         if (!nhwctx) {
832                 err = -ENOMEM;
833                 goto fail_hwctx;
834         }
835         hwctx = to_user_hwctx(nhwctx);
836
837         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
838                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
839                         cmdbuf_restore.mem, cmdbuf_restore.offset,
840                         cmdbuf_restore.words,
841                         pdata->syncpts[0], pdata->waitbases[0],
842                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
843
844         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
845         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
846                         cmdbuf_restore.offset, cmdbuf_restore.words);
847         if (err)
848                 goto fail_set_restore;
849
850         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
851                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
852         if (err)
853                 goto fail_set_save;
854
855         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
856         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
857
858         /* Free old context */
859         ctx->hwctx->h->put(ctx->hwctx);
860         ctx->hwctx = nhwctx;
861
862         return 0;
863
864 fail_set_save:
865 fail_set_restore:
866         ctxhandler->put(&hwctx->hwctx);
867 fail_hwctx:
868         user_ctxhandler_free(ctxhandler);
869 fail:
870         return err;
871 }
872
873 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
874 static int nvhost_ioctl_channel_cycle_stats(
875         struct nvhost_channel_userctx *ctx,
876         struct nvhost_cycle_stats_args *args)
877 {
878         int ret;
879         BUG_ON(!channel_op().cycle_stats);
880         ret = channel_op().cycle_stats(ctx->hwctx, args);
881         return ret;
882 }
883 #endif
884
885 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
886         struct nvhost_read_3d_reg_args *args)
887 {
888         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
889                         args->offset, &args->value);
890 }
891
892 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
893 {
894         int i;
895         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
896
897         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
898                 if (pdata->clocks[i].moduleid == moduleid)
899                         return i;
900         }
901
902         /* Old user space is sending a random number in args. Return clock
903          * zero in these cases. */
904         return 0;
905 }
906
907 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
908         struct nvhost_clk_rate_args *arg)
909 {
910         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
911                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
912         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
913                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
914         int index = moduleid ?
915                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
916
917         return nvhost_module_set_rate(ctx->ch->dev,
918                         ctx, arg->rate, index, attr);
919 }
920
921 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
922         u32 moduleid, u32 *rate)
923 {
924         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
925
926         return nvhost_module_get_rate(ctx->ch->dev,
927                         (unsigned long *)rate, index);
928 }
929
930 static int nvhost_ioctl_channel_module_regrdwr(
931         struct nvhost_channel_userctx *ctx,
932         struct nvhost_ctrl_module_regrdwr_args *args)
933 {
934         u32 num_offsets = args->num_offsets;
935         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
936         u32 __user *values = (u32 *)(uintptr_t)args->values;
937         u32 vals[64];
938         struct platform_device *ndev;
939
940         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
941                 args->num_offsets, args->write);
942
943         /* Check that there is something to read and that block size is
944          * u32 aligned */
945         if (num_offsets == 0 || args->block_size & 3)
946                 return -EINVAL;
947
948         ndev = ctx->ch->dev;
949
950         while (num_offsets--) {
951                 int err;
952                 u32 offs;
953                 int remaining = args->block_size >> 2;
954
955                 if (get_user(offs, offsets))
956                         return -EFAULT;
957
958                 offsets++;
959                 while (remaining) {
960                         int batch = min(remaining, 64);
961                         if (args->write) {
962                                 if (copy_from_user(vals, values,
963                                                 batch * sizeof(u32)))
964                                         return -EFAULT;
965
966                                 err = nvhost_write_module_regs(ndev,
967                                         offs, batch, vals);
968                                 if (err)
969                                         return err;
970                         } else {
971                                 err = nvhost_read_module_regs(ndev,
972                                                 offs, batch, vals);
973                                 if (err)
974                                         return err;
975
976                                 if (copy_to_user(values, vals,
977                                                 batch * sizeof(u32)))
978                                         return -EFAULT;
979                         }
980
981                         remaining -= batch;
982                         offs += batch * sizeof(u32);
983                         values += batch;
984                 }
985         }
986
987         return 0;
988 }
989
990 static u32 create_mask(u32 *words, int num)
991 {
992         int i;
993         u32 word = 0;
994         for (i = 0; i < num && words[i] && words[i] < BITS_PER_LONG; i++)
995                 word |= BIT(words[i]);
996
997         return word;
998 }
999
1000 static long nvhost_channelctl(struct file *filp,
1001         unsigned int cmd, unsigned long arg)
1002 {
1003         struct nvhost_channel_userctx *priv = filp->private_data;
1004         struct device *dev = &priv->ch->dev->dev;
1005         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
1006         int err = 0;
1007
1008         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
1009                 (_IOC_NR(cmd) == 0) ||
1010                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
1011                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
1012                 return -EFAULT;
1013
1014         if (_IOC_DIR(cmd) & _IOC_WRITE) {
1015                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1016                         return -EFAULT;
1017         }
1018
1019         switch (cmd) {
1020         case NVHOST_IOCTL_CHANNEL_FLUSH:
1021                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
1022                 break;
1023         case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
1024                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
1025                 break;
1026         case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
1027         {
1028                 struct nvhost_submit_hdr_ext *hdr;
1029
1030                 if (priv->hdr.num_relocs ||
1031                     priv->num_relocshifts ||
1032                     priv->hdr.num_cmdbufs ||
1033                     priv->hdr.num_waitchks) {
1034                         reset_submit(priv);
1035                         dev_err(&priv->ch->dev->dev,
1036                                 "channel submit out of sync\n");
1037                         err = -EIO;
1038                         break;
1039                 }
1040
1041                 hdr = (struct nvhost_submit_hdr_ext *)buf;
1042                 if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
1043                         dev_err(&priv->ch->dev->dev,
1044                                 "submit version %d > max supported %d\n",
1045                                 hdr->submit_version,
1046                                 NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
1047                         err = -EINVAL;
1048                         break;
1049                 }
1050                 memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
1051                 err = set_submit(priv);
1052                 trace_nvhost_ioctl_channel_submit(priv->ch->dev->name,
1053                         priv->hdr.submit_version,
1054                         priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
1055                         priv->hdr.num_waitchks,
1056                         priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
1057                 break;
1058         }
1059         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
1060         {
1061                 struct nvhost_device_data *pdata = \
1062                         platform_get_drvdata(priv->ch->dev);
1063                 ((struct nvhost_get_param_args *)buf)->value =
1064                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
1065                 break;
1066         }
1067         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
1068         {
1069                 struct nvhost_device_data *pdata = \
1070                         platform_get_drvdata(priv->ch->dev);
1071                 struct nvhost_get_param_arg *arg =
1072                         (struct nvhost_get_param_arg *)buf;
1073                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
1074                                 || !pdata->syncpts[arg->param])
1075                         return -EINVAL;
1076                 arg->value = pdata->syncpts[arg->param];
1077                 break;
1078         }
1079         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
1080         {
1081                 struct nvhost_device_data *pdata = \
1082                         platform_get_drvdata(priv->ch->dev);
1083                 ((struct nvhost_get_param_args *)buf)->value =
1084                         create_mask(pdata->waitbases,
1085                                         NVHOST_MODULE_MAX_WAITBASES);
1086                 break;
1087         }
1088         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
1089         {
1090                 struct nvhost_device_data *pdata = \
1091                         platform_get_drvdata(priv->ch->dev);
1092                 struct nvhost_get_param_arg *arg =
1093                         (struct nvhost_get_param_arg *)buf;
1094                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
1095                                 || !pdata->waitbases[arg->param])
1096                         return -EINVAL;
1097                 arg->value = pdata->waitbases[arg->param];
1098                 break;
1099         }
1100         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
1101         {
1102                 struct nvhost_device_data *pdata = \
1103                         platform_get_drvdata(priv->ch->dev);
1104                 ((struct nvhost_get_param_args *)buf)->value =
1105                         create_mask(pdata->modulemutexes,
1106                                         NVHOST_MODULE_MAX_MODMUTEXES);
1107                 break;
1108         }
1109         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
1110         {
1111                 struct nvhost_device_data *pdata = \
1112                         platform_get_drvdata(priv->ch->dev);
1113                 struct nvhost_get_param_arg *arg =
1114                         (struct nvhost_get_param_arg *)buf;
1115                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
1116                                 || !pdata->modulemutexes[arg->param])
1117                         return -EINVAL;
1118                 arg->value = pdata->modulemutexes[arg->param];
1119                 break;
1120         }
1121         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
1122         {
1123                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
1124                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
1125
1126                 if (IS_ERR(new_client)) {
1127                         err = PTR_ERR(new_client);
1128                         break;
1129                 }
1130                 if (priv->memmgr)
1131                         nvhost_memmgr_put_mgr(priv->memmgr);
1132
1133                 priv->memmgr = new_client;
1134
1135                 if (priv->hwctx)
1136                         priv->hwctx->memmgr = new_client;
1137
1138                 break;
1139         }
1140         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
1141                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
1142                 break;
1143         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX_OLD:
1144                 err = nvhost_ioctl_channel_alloc_obj_ctx_old(priv, (void *)buf);
1145                 break;
1146         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
1147                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
1148                 break;
1149         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX_OLD:
1150                 err = nvhost_ioctl_channel_free_obj_ctx_old(priv, (void *)buf);
1151                 break;
1152         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
1153                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
1154                 break;
1155         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
1156                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
1157                 break;
1158         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO_OLD:
1159                 err = nvhost_ioctl_channel_submit_gpfifo_old(priv, (void *)buf);
1160                 break;
1161         case NVHOST_IOCTL_CHANNEL_WAIT:
1162                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
1163                 break;
1164         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
1165                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
1166                 break;
1167         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND_OLD:
1168                 err = nvhost_ioctl_channel_zcull_bind_old(priv, (void *)buf);
1169                 break;
1170
1171 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
1172         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
1173                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
1174                 break;
1175 #endif
1176         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
1177                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
1178                 break;
1179         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
1180         {
1181                 struct nvhost_clk_rate_args *arg =
1182                                 (struct nvhost_clk_rate_args *)buf;
1183
1184                 err = nvhost_ioctl_channel_get_rate(priv,
1185                                 arg->moduleid, &arg->rate);
1186                 break;
1187         }
1188         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
1189         {
1190                 struct nvhost_clk_rate_args *arg =
1191                                 (struct nvhost_clk_rate_args *)buf;
1192
1193                 err = nvhost_ioctl_channel_set_rate(priv, arg);
1194                 break;
1195         }
1196         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1197                 priv->timeout =
1198                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1199                 dev_dbg(&priv->ch->dev->dev,
1200                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1201                         __func__, priv->timeout, priv);
1202                 break;
1203         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1204                 ((struct nvhost_get_param_args *)buf)->value =
1205                                 priv->hwctx->has_timedout;
1206                 break;
1207         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1208                 priv->priority =
1209                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
1210                 break;
1211         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1212                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1213                 break;
1214         case NVHOST_IOCTL_CHANNEL_SUBMIT:
1215                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1216                 break;
1217         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1218                 priv->timeout = (u32)
1219                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
1220                 priv->timeout_debug_dump = !((u32)
1221                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1222                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1223                 dev_dbg(&priv->ch->dev->dev,
1224                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1225                         __func__, priv->timeout, priv);
1226                 break;
1227         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
1228                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
1229                 break;
1230         default:
1231                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
1232                 err = -ENOTTY;
1233                 break;
1234         }
1235
1236         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1237                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1238
1239         return err;
1240 }
1241
1242 static const struct file_operations nvhost_channelops = {
1243         .owner = THIS_MODULE,
1244         .release = nvhost_channelrelease,
1245         .open = nvhost_channelopen,
1246         .write = nvhost_channelwrite,
1247         .unlocked_ioctl = nvhost_channelctl
1248 };
1249
1250 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
1251 {
1252         struct nvhost_channel_userctx *userctx;
1253         struct file *f = fget(fd);
1254         if (!f)
1255                 return 0;
1256
1257         if (f->f_op != &nvhost_channelops) {
1258                 fput(f);
1259                 return 0;
1260         }
1261
1262         userctx = (struct nvhost_channel_userctx *)f->private_data;
1263         fput(f);
1264         return userctx->hwctx;
1265 }
1266
1267
1268 static const struct file_operations nvhost_asops = {
1269         .owner = THIS_MODULE,
1270         .release = nvhost_as_dev_release,
1271         .open = nvhost_as_dev_open,
1272         .unlocked_ioctl = nvhost_as_dev_ctl,
1273 };
1274
1275 static struct {
1276         int class_id;
1277         const char *dev_name;
1278 } class_id_dev_name_map[] = {
1279         /*      { NV_HOST1X_CLASS_ID, ""}, */
1280         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1281         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1282         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1283         { NV_GRAPHICS_GPU_CLASS_ID, "gr3d"},  /* TBD: move to "gpu" */
1284         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1285         { NV_TSEC_CLASS_ID, "tsec" },
1286 };
1287
1288 static struct {
1289         int module_id;
1290         const char *dev_name;
1291 } module_id_dev_name_map[] = {
1292         { NVHOST_MODULE_VI, "vi"},
1293         { NVHOST_MODULE_ISP, "isp"},
1294         { NVHOST_MODULE_MPE, "mpe"},
1295         { NVHOST_MODULE_MSENC, "msenc"},
1296         { NVHOST_MODULE_TSEC, "tsec"},
1297         { NVHOST_MODULE_GPU, "gpu"},
1298         { NVHOST_MODULE_VIC, "vic"},
1299 };
1300
1301 static const char *get_device_name_for_dev(struct platform_device *dev)
1302 {
1303         int i;
1304         /* first choice is to use the class id if specified */
1305         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1306                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1307                 if (pdata->class == class_id_dev_name_map[i].class_id)
1308                         return class_id_dev_name_map[i].dev_name;
1309         }
1310
1311         /* second choice is module name if specified */
1312         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1313                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1314                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1315                         return module_id_dev_name_map[i].dev_name;
1316         }
1317
1318         /* last choice is to just use the given dev name */
1319         return dev->name;
1320 }
1321
1322 static struct device *nvhost_client_device_create(
1323         struct platform_device *pdev, struct cdev *cdev,
1324         const char *cdev_name, int devno,
1325         const struct file_operations *ops)
1326 {
1327         struct nvhost_master *host = nvhost_get_host(pdev);
1328         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1329         const char *use_dev_name;
1330         struct device *dev;
1331         int err;
1332
1333         nvhost_dbg_fn("");
1334
1335         BUG_ON(!host);
1336
1337         cdev_init(cdev, ops);
1338         cdev->owner = THIS_MODULE;
1339
1340         err = cdev_add(cdev, devno, 1);
1341         if (err < 0) {
1342                 dev_err(&pdev->dev,
1343                         "failed to add chan %i cdev\n", pdata->index);
1344                 return NULL;
1345         }
1346         use_dev_name = get_device_name_for_dev(pdev);
1347
1348         dev = device_create(host->nvhost_class,
1349                         NULL, devno, NULL,
1350                         (pdev->id <= 0) ?
1351                         IFACE_NAME "-%s%s" :
1352                         IFACE_NAME "-%s%s.%d",
1353                         cdev_name, use_dev_name, pdev->id);
1354
1355         if (IS_ERR(dev)) {
1356                 err = PTR_ERR(dev);
1357                 dev_err(&pdev->dev,
1358                         "failed to create %s %s device for %s\n",
1359                         use_dev_name, cdev_name, pdev->name);
1360                 return NULL;
1361         }
1362
1363         return dev;
1364 }
1365
1366 int nvhost_client_user_init(struct platform_device *dev)
1367 {
1368         int err, devno;
1369         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1370         struct nvhost_channel *ch = pdata->channel;
1371
1372         BUG_ON(!ch);
1373         // reserve 3 minor #s for <dev> and as-<dev> and ctrl-<dev>
1374
1375         err = alloc_chrdev_region(&devno, 0, 3, IFACE_NAME);
1376         if (err < 0) {
1377                 dev_err(&dev->dev, "failed to allocate devno\n");
1378                 goto fail;
1379         }
1380
1381         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1382                                 "", devno, &nvhost_channelops);
1383         if (ch->node == NULL)
1384                 goto fail;
1385         ++devno;
1386         ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1387                                 "as-", devno, &nvhost_asops);
1388         if (ch->as_node == NULL)
1389                 goto fail;
1390
1391         if (pdata->ctrl_ops) {
1392                 ++devno;
1393                 pdata->ctrl_node = nvhost_client_device_create(dev,
1394                                         &pdata->ctrl_cdev, "ctrl-",
1395                                         devno, pdata->ctrl_ops);
1396                 if (pdata->ctrl_node == NULL)
1397                         goto fail;
1398         }
1399
1400         return 0;
1401 fail:
1402         return err;
1403 }
1404
1405 int nvhost_client_device_init(struct platform_device *dev)
1406 {
1407         int err;
1408         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1409         struct nvhost_channel *ch;
1410         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1411
1412         ch = nvhost_alloc_channel(dev);
1413         if (ch == NULL)
1414                 return -ENODEV;
1415
1416         /* store the pointer to this device for channel */
1417         ch->dev = dev;
1418
1419         /* Create debugfs directory for the device */
1420         nvhost_device_debug_init(dev);
1421
1422         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1423         if (err)
1424                 goto fail;
1425
1426         err = nvhost_client_user_init(dev);
1427         if (err)
1428                 goto fail;
1429
1430         if (tickctrl_op().init_channel)
1431                 tickctrl_op().init_channel(dev);
1432
1433         err = nvhost_device_list_add(dev);
1434         if (err)
1435                 goto fail;
1436
1437         if (pdata->scaling_init)
1438                 pdata->scaling_init(dev);
1439
1440         /* reset syncpoint values for this unit */
1441         nvhost_module_busy(nvhost_master->dev);
1442         nvhost_syncpt_reset_client(dev);
1443         nvhost_module_idle(nvhost_master->dev);
1444
1445         dev_info(&dev->dev, "initialized\n");
1446
1447         return 0;
1448
1449 fail:
1450         /* Add clean-up */
1451         nvhost_free_channel(ch);
1452         return err;
1453 }
1454 EXPORT_SYMBOL(nvhost_client_device_init);
1455
1456 int nvhost_client_device_release(struct platform_device *dev)
1457 {
1458         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1459         struct nvhost_channel *ch;
1460         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1461
1462         ch = pdata->channel;
1463
1464         /* Release nvhost module resources */
1465         nvhost_module_deinit(dev);
1466
1467         /* Remove from nvhost device list */
1468         nvhost_device_list_remove(dev);
1469
1470         /* Release chardev and device node for user space */
1471         device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1472         cdev_del(&ch->cdev);
1473
1474         /* Free nvhost channel */
1475         nvhost_free_channel(ch);
1476
1477         return 0;
1478 }
1479 EXPORT_SYMBOL(nvhost_client_device_release);
1480
1481 int nvhost_client_device_suspend(struct device *dev)
1482 {
1483         int ret = 0;
1484         struct nvhost_device_data *pdata = dev_get_drvdata(dev);
1485
1486         ret = nvhost_channel_suspend(pdata->channel);
1487         if (ret)
1488                 return ret;
1489
1490         dev_info(dev, "suspend status: %d\n", ret);
1491
1492         return ret;
1493 }
1494
1495 int nvhost_client_device_resume(struct device *dev)
1496 {
1497         dev_info(dev, "resuming\n");
1498         return 0;
1499 }
1500
1501 int nvhost_client_device_get_resources(struct platform_device *dev)
1502 {
1503         int i;
1504         void __iomem *regs = NULL;
1505         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1506
1507         for (i = 0; i < dev->num_resources; i++) {
1508                 struct resource *r = NULL;
1509
1510                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1511                 /* We've run out of mem resources */
1512                 if (!r)
1513                         break;
1514
1515                 regs = devm_request_and_ioremap(&dev->dev, r);
1516                 if (!regs)
1517                         goto fail;
1518
1519                 pdata->aperture[i] = regs;
1520         }
1521
1522         return 0;
1523
1524 fail:
1525         dev_err(&dev->dev, "failed to get register memory\n");
1526
1527         return -ENXIO;
1528 }
1529 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1530
1531 /* This is a simple wrapper around request_firmware that takes
1532  * 'fw_name' and if available applies a SOC relative path prefix to it.
1533  * The caller is responsible for calling release_firmware later.
1534  */
1535 const struct firmware *
1536 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1537 {
1538         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1539         const struct firmware *fw;
1540         char *fw_path = NULL;
1541         int path_len, err;
1542
1543         /* This field is NULL when calling from SYS_EXIT.
1544            Add a check here to prevent crash in request_firmware */
1545         if (!current->fs) {
1546                 BUG();
1547                 return NULL;
1548         }
1549
1550         if (!fw_name)
1551                 return NULL;
1552
1553         if (op->soc_name) {
1554                 path_len = strlen(fw_name) + strlen(op->soc_name);
1555                 path_len += 2; /* for the path separator and zero terminator*/
1556
1557                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1558                                      GFP_KERNEL);
1559                 if (!fw_path)
1560                         return NULL;
1561
1562                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1563                 fw_name = fw_path;
1564         }
1565
1566         err = request_firmware(&fw, fw_name, &dev->dev);
1567         kfree(fw_path);
1568         if (err) {
1569                 dev_err(&dev->dev, "failed to get firmware\n");
1570                 return NULL;
1571         }
1572
1573         /* note: caller must release_firmware */
1574         return fw;
1575 }