video: tegra: host: Fix race in hwctx release
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * drivers/video/tegra/host/bus_client.c
3  *
4  * Tegra Graphics Host Client Module
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/spinlock.h>
24 #include <linux/fs.h>
25 #include <linux/cdev.h>
26 #include <linux/uaccess.h>
27 #include <linux/file.h>
28 #include <linux/clk.h>
29 #include <linux/hrtimer.h>
30 #include <linux/export.h>
31 #include <linux/firmware.h>
32
33 #include <trace/events/nvhost.h>
34
35 #include <linux/io.h>
36 #include <linux/string.h>
37
38 #include <linux/nvhost.h>
39 #include <linux/nvhost_ioctl.h>
40
41 #include <mach/gpufuse.h>
42 #include <mach/hardware.h>
43
44 #include "debug.h"
45 #include "bus_client.h"
46 #include "dev.h"
47 #include "class_ids.h"
48 #include "nvhost_as.h"
49 #include "nvhost_memmgr.h"
50 #include "chip_support.h"
51 #include "nvhost_acm.h"
52
53 #include "nvhost_syncpt.h"
54 #include "nvhost_channel.h"
55 #include "nvhost_job.h"
56 #include "nvhost_hwctx.h"
57 #include "user_hwctx.h"
58
59 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
60 {
61         struct resource *r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
62         int err = 0;
63
64         if (offset + 4 * count > resource_size(r)
65                         || (offset + 4 * count < offset))
66                 err = -EPERM;
67
68         return err;
69 }
70
71 int nvhost_read_module_regs(struct platform_device *ndev,
72                         u32 offset, int count, u32 *values)
73 {
74         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
75         void __iomem *p = pdata->aperture[0] + offset;
76         int err;
77
78         if (!pdata->aperture[0])
79                 return -ENODEV;
80
81         /* verify offset */
82         err = validate_reg(ndev, offset, count);
83         if (err)
84                 return err;
85
86         nvhost_module_busy(ndev);
87         while (count--) {
88                 *(values++) = readl(p);
89                 p += 4;
90         }
91         rmb();
92         nvhost_module_idle(ndev);
93
94         return 0;
95 }
96
97 int nvhost_write_module_regs(struct platform_device *ndev,
98                         u32 offset, int count, const u32 *values)
99 {
100         void __iomem *p;
101         int err;
102         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
103
104         if (!pdata->aperture[0])
105                 return -ENODEV;
106
107         p = pdata->aperture[0] + offset;
108
109         /* verify offset */
110         err = validate_reg(ndev, offset, count);
111         if (err)
112                 return err;
113
114         nvhost_module_busy(ndev);
115         while (count--) {
116                 writel(*(values++), p);
117                 p += 4;
118         }
119         wmb();
120         nvhost_module_idle(ndev);
121
122         return 0;
123 }
124
125 struct nvhost_channel_userctx {
126         struct nvhost_channel *ch;
127         struct nvhost_hwctx *hwctx;
128         struct nvhost_submit_hdr_ext hdr;
129         int num_relocshifts;
130         struct nvhost_job *job;
131         struct mem_mgr *memmgr;
132         u32 timeout;
133         u32 priority;
134         int clientid;
135         bool timeout_debug_dump;
136 };
137
138 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
139 {
140         struct nvhost_channel_userctx *priv = filp->private_data;
141
142         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
143
144         filp->private_data = NULL;
145
146         nvhost_module_remove_client(priv->ch->dev, priv);
147
148         if (priv->hwctx) {
149                 struct nvhost_channel *ch = priv->ch;
150                 struct nvhost_hwctx *ctx = priv->hwctx;
151
152                 mutex_lock(&ch->submitlock);
153                 if (ch->cur_ctx == ctx)
154                         ch->cur_ctx = NULL;
155                 mutex_unlock(&ch->submitlock);
156
157                 priv->hwctx->h->put(priv->hwctx);
158         }
159
160         if (priv->job)
161                 nvhost_job_put(priv->job);
162
163         nvhost_putchannel(priv->ch);
164
165         nvhost_memmgr_put_mgr(priv->memmgr);
166         kfree(priv);
167         return 0;
168 }
169
170 static int nvhost_channelopen(struct inode *inode, struct file *filp)
171 {
172         struct nvhost_channel_userctx *priv;
173         struct nvhost_channel *ch;
174
175         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
176         ch = nvhost_getchannel(ch);
177         if (!ch)
178                 return -ENOMEM;
179         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
180
181         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
182         if (!priv) {
183                 nvhost_putchannel(ch);
184                 return -ENOMEM;
185         }
186         filp->private_data = priv;
187         priv->ch = ch;
188         if(nvhost_module_add_client(ch->dev, priv))
189                 goto fail;
190
191         if (ch->ctxhandler && ch->ctxhandler->alloc) {
192                 nvhost_module_busy(ch->dev);
193                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
194                 nvhost_module_idle(ch->dev);
195                 if (!priv->hwctx)
196                         goto fail;
197         }
198         priv->priority = NVHOST_PRIORITY_MEDIUM;
199         priv->clientid = atomic_add_return(1,
200                         &nvhost_get_host(ch->dev)->clientid);
201         priv->timeout = CONFIG_TEGRA_GRHOST_DEFAULT_TIMEOUT;
202         priv->timeout_debug_dump = true;
203         if (!tegra_platform_is_silicon())
204                 priv->timeout = 0;
205
206         return 0;
207 fail:
208         nvhost_channelrelease(inode, filp);
209         return -ENOMEM;
210 }
211
212 static int set_submit(struct nvhost_channel_userctx *ctx)
213 {
214         struct platform_device *ndev = ctx->ch->dev;
215         struct nvhost_master *host = nvhost_get_host(ndev);
216
217         /* submit should have at least 1 cmdbuf */
218         if (!ctx->hdr.num_cmdbufs ||
219                         !nvhost_syncpt_is_valid(&host->syncpt,
220                                 ctx->hdr.syncpt_id))
221                 return -EIO;
222
223         if (!ctx->memmgr) {
224                 dev_err(&ndev->dev, "no nvmap context set\n");
225                 return -EFAULT;
226         }
227
228         if (ctx->job) {
229                 dev_warn(&ndev->dev, "performing channel submit when a job already exists\n");
230                 nvhost_job_put(ctx->job);
231         }
232         ctx->job = nvhost_job_alloc(ctx->ch,
233                         ctx->hwctx,
234                         ctx->hdr.num_cmdbufs,
235                         ctx->hdr.num_relocs,
236                         ctx->hdr.num_waitchks,
237                         1,
238                         ctx->memmgr);
239         if (!ctx->job)
240                 return -ENOMEM;
241         ctx->job->timeout = ctx->timeout;
242         ctx->job->sp->id = ctx->hdr.syncpt_id;
243         ctx->job->sp->incrs = ctx->hdr.syncpt_incrs;
244         ctx->job->hwctx_syncpt_idx = 0;
245         ctx->job->num_syncpts = 1;
246         ctx->job->priority = ctx->priority;
247         ctx->job->clientid = ctx->clientid;
248         ctx->job->timeout_debug_dump = ctx->timeout_debug_dump;
249
250         if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
251                 ctx->num_relocshifts = ctx->hdr.num_relocs;
252
253         return 0;
254 }
255
256 static void reset_submit(struct nvhost_channel_userctx *ctx)
257 {
258         ctx->hdr.num_cmdbufs = 0;
259         ctx->hdr.num_relocs = 0;
260         ctx->num_relocshifts = 0;
261         ctx->hdr.num_waitchks = 0;
262
263         if (ctx->job) {
264                 nvhost_job_put(ctx->job);
265                 ctx->job = NULL;
266         }
267 }
268
269 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
270                                 size_t count, loff_t *offp)
271 {
272         struct nvhost_channel_userctx *priv = filp->private_data;
273         size_t remaining = count;
274         int err = 0;
275         struct nvhost_job *job = priv->job;
276         struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
277         const char *chname = priv->ch->dev->name;
278
279         if (!job)
280                 return -EIO;
281
282         while (remaining) {
283                 size_t consumed;
284                 if (!hdr->num_relocs &&
285                     !priv->num_relocshifts &&
286                     !hdr->num_cmdbufs &&
287                     !hdr->num_waitchks) {
288                         consumed = sizeof(struct nvhost_submit_hdr);
289                         if (remaining < consumed)
290                                 break;
291                         if (copy_from_user(hdr, buf, consumed)) {
292                                 err = -EFAULT;
293                                 break;
294                         }
295                         hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
296                         err = set_submit(priv);
297                         if (err)
298                                 break;
299                         trace_nvhost_channel_write_submit(chname,
300                           count, hdr->num_cmdbufs, hdr->num_relocs,
301                           hdr->syncpt_id, hdr->syncpt_incrs);
302                 } else if (hdr->num_cmdbufs) {
303                         struct nvhost_cmdbuf cmdbuf;
304                         consumed = sizeof(cmdbuf);
305                         if (remaining < consumed)
306                                 break;
307                         if (copy_from_user(&cmdbuf, buf, consumed)) {
308                                 err = -EFAULT;
309                                 break;
310                         }
311                         trace_nvhost_channel_write_cmdbuf(chname,
312                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
313                         nvhost_job_add_gather(job,
314                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
315                         hdr->num_cmdbufs--;
316                 } else if (hdr->num_relocs) {
317                         int numrelocs = remaining / sizeof(struct nvhost_reloc);
318                         if (!numrelocs)
319                                 break;
320                         numrelocs = min_t(int, numrelocs, priv->hdr.num_relocs);
321                         consumed = numrelocs * sizeof(struct nvhost_reloc);
322                         if (copy_from_user(&job->relocarray[job->num_relocs],
323                                         buf, consumed)) {
324                                 err = -EFAULT;
325                                 break;
326                         }
327                         while (numrelocs) {
328                                 struct nvhost_reloc *reloc =
329                                         &job->relocarray[job->num_relocs];
330                                 trace_nvhost_channel_write_reloc(chname,
331                                         reloc->cmdbuf_mem,
332                                         reloc->cmdbuf_offset,
333                                         reloc->target,
334                                         reloc->target_offset);
335                                 job->num_relocs++;
336                                 hdr->num_relocs--;
337                                 numrelocs--;
338                         }
339                 } else if (hdr->num_waitchks) {
340                         int numwaitchks =
341                                 (remaining / sizeof(struct nvhost_waitchk));
342                         if (!numwaitchks)
343                                 break;
344                         numwaitchks = min_t(int,
345                                 numwaitchks, hdr->num_waitchks);
346                         consumed = numwaitchks * sizeof(struct nvhost_waitchk);
347                         if (copy_from_user(&job->waitchk[job->num_waitchk],
348                                         buf, consumed)) {
349                                 err = -EFAULT;
350                                 break;
351                         }
352                         trace_nvhost_channel_write_waitchks(
353                           chname, numwaitchks);
354                         job->num_waitchk += numwaitchks;
355                         hdr->num_waitchks -= numwaitchks;
356                 } else if (priv->num_relocshifts) {
357                         int next_shift =
358                                 job->num_relocs - priv->num_relocshifts;
359                         int num =
360                                 (remaining / sizeof(struct nvhost_reloc_shift));
361                         if (!num)
362                                 break;
363                         num = min_t(int, num, priv->num_relocshifts);
364                         consumed = num * sizeof(struct nvhost_reloc_shift);
365                         if (copy_from_user(&job->relocshiftarray[next_shift],
366                                         buf, consumed)) {
367                                 err = -EFAULT;
368                                 break;
369                         }
370                         priv->num_relocshifts -= num;
371                 } else {
372                         err = -EFAULT;
373                         break;
374                 }
375                 remaining -= consumed;
376                 buf += consumed;
377         }
378
379         if (err < 0) {
380                 dev_err(&priv->ch->dev->dev, "channel write error\n");
381                 reset_submit(priv);
382                 return err;
383         }
384
385         return count - remaining;
386 }
387
388 static int nvhost_ioctl_channel_flush(
389         struct nvhost_channel_userctx *ctx,
390         struct nvhost_get_param_args *args,
391         int null_kickoff)
392 {
393         struct platform_device *ndev = to_platform_device(&ctx->ch->dev->dev);
394         int err;
395
396         trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name);
397
398         if (!ctx->job ||
399             ctx->hdr.num_relocs ||
400             ctx->hdr.num_cmdbufs ||
401             ctx->hdr.num_waitchks) {
402                 reset_submit(ctx);
403                 dev_err(&ndev->dev, "channel submit out of sync\n");
404                 return -EFAULT;
405         }
406
407         err = nvhost_job_pin(ctx->job, &nvhost_get_host(ndev)->syncpt);
408         if (err) {
409                 dev_warn(&ndev->dev, "nvhost_job_pin failed: %d\n", err);
410                 goto fail;
411         }
412
413         if (nvhost_debug_null_kickoff_pid == current->tgid)
414                 null_kickoff = 1;
415         ctx->job->null_kickoff = null_kickoff;
416
417         if ((nvhost_debug_force_timeout_pid == current->tgid) &&
418             (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
419                 ctx->timeout = nvhost_debug_force_timeout_val;
420         }
421
422         /* context switch if needed, and submit user's gathers to the channel */
423         err = nvhost_channel_submit(ctx->job);
424         args->value = ctx->job->sp->fence;
425
426 fail:
427         if (err)
428                 nvhost_job_unpin(ctx->job);
429
430         nvhost_job_put(ctx->job);
431         ctx->job = NULL;
432
433         return err;
434 }
435
436 static int nvhost_ioctl_channel_alloc_obj_ctx(
437         struct nvhost_channel_userctx *ctx,
438         struct nvhost_alloc_obj_ctx_args *args)
439 {
440         int ret;
441
442         BUG_ON(!channel_op().alloc_obj);
443         nvhost_module_busy(ctx->ch->dev);
444         ret = channel_op().alloc_obj(ctx->hwctx, args);
445         nvhost_module_idle(ctx->ch->dev);
446         return ret;
447 }
448
449 static int nvhost_ioctl_channel_alloc_obj_ctx_old(
450         struct nvhost_channel_userctx *ctx,
451         struct nvhost_alloc_obj_ctx_old_args *args)
452 {
453         struct nvhost_alloc_obj_ctx_args new_args;
454         int err;
455
456         new_args.class_num = args->class_num;
457         err = nvhost_ioctl_channel_alloc_obj_ctx(ctx, &new_args);
458         if (!err)
459                 args->obj_id = new_args.obj_id;
460         return err;
461 }
462
463 static int nvhost_ioctl_channel_free_obj_ctx(
464         struct nvhost_channel_userctx *ctx,
465         struct nvhost_free_obj_ctx_args *args)
466 {
467         int ret;
468
469         BUG_ON(!channel_op().free_obj);
470         nvhost_module_busy(ctx->ch->dev);
471         ret = channel_op().free_obj(ctx->hwctx, args);
472         nvhost_module_idle(ctx->ch->dev);
473         return ret;
474 }
475
476 static int nvhost_ioctl_channel_free_obj_ctx_old(
477         struct nvhost_channel_userctx *ctx,
478         struct nvhost_free_obj_ctx_old_args *args)
479 {
480         struct nvhost_free_obj_ctx_args new_args;
481         new_args.obj_id = args->obj_id;
482         return nvhost_ioctl_channel_free_obj_ctx(ctx, &new_args);
483 }
484
485 static int nvhost_ioctl_channel_alloc_gpfifo(
486         struct nvhost_channel_userctx *ctx,
487         struct nvhost_alloc_gpfifo_args *args)
488 {
489         int ret;
490
491         BUG_ON(!channel_op().alloc_gpfifo);
492         nvhost_module_busy(ctx->ch->dev);
493         ret = channel_op().alloc_gpfifo(ctx->hwctx, args);
494         nvhost_module_idle(ctx->ch->dev);
495         return ret;
496 }
497
498 static int nvhost_ioctl_channel_submit_gpfifo(
499         struct nvhost_channel_userctx *ctx,
500         struct nvhost_submit_gpfifo_args *args)
501 {
502         void *gpfifo;
503         u32 size;
504         int ret = 0;
505
506         size = args->num_entries * sizeof(struct nvhost_gpfifo);
507
508         gpfifo = kzalloc(size, GFP_KERNEL);
509         if (IS_ERR_OR_NULL(gpfifo))
510                 return -ENOMEM;
511
512         if (copy_from_user(gpfifo,
513                            (void __user *)(uintptr_t)args->gpfifo, size)) {
514                 ret = -EINVAL;
515                 goto clean_up;
516         }
517
518         BUG_ON(!channel_op().submit_gpfifo);
519
520         nvhost_module_busy(ctx->ch->dev);
521         ret = channel_op().submit_gpfifo(ctx->hwctx, gpfifo,
522                         args->num_entries, &args->fence, args->flags);
523         nvhost_module_idle(ctx->ch->dev);
524 clean_up:
525         kfree(gpfifo);
526         return ret;
527 }
528
529 static int nvhost_ioctl_channel_submit_gpfifo_old(
530         struct nvhost_channel_userctx *ctx,
531         struct nvhost_submit_gpfifo_old_args *args)
532 {
533         int ret;
534         struct nvhost_submit_gpfifo_args new_args;
535
536         new_args.gpfifo = (u64)(uintptr_t)args->gpfifo;
537         new_args.num_entries = args->num_entries;
538         new_args.fence = args->fence;
539         new_args.flags = args->flags;
540         ret = nvhost_ioctl_channel_submit_gpfifo(ctx, &new_args);
541         if (!ret)
542                 args->fence = new_args.fence;
543         return ret;
544 }
545
546 static int nvhost_ioctl_channel_wait(
547         struct nvhost_channel_userctx *ctx,
548         struct nvhost_wait_args *args)
549 {
550         int ret;
551
552         BUG_ON(!channel_op().wait);
553         nvhost_module_busy(ctx->ch->dev);
554         ret = channel_op().wait(ctx->hwctx, args);
555         nvhost_module_idle(ctx->ch->dev);
556         return ret;
557 }
558
559 static int nvhost_ioctl_channel_zcull_bind(
560         struct nvhost_channel_userctx *ctx,
561         struct nvhost_zcull_bind_args *args)
562 {
563         int ret;
564
565         BUG_ON(!channel_zcull_op().bind);
566         nvhost_module_busy(ctx->ch->dev);
567         ret = channel_zcull_op().bind(ctx->hwctx, args);
568         nvhost_module_idle(ctx->ch->dev);
569         return ret;
570 }
571
572 static int nvhost_ioctl_channel_zcull_bind_old(
573         struct nvhost_channel_userctx *ctx,
574         struct nvhost_zcull_bind_old_args *args)
575 {
576         struct nvhost_zcull_bind_args new_args;
577
578         new_args.gpu_va = args->gpu_va;
579         new_args.mode = args->mode;
580         return nvhost_ioctl_channel_zcull_bind(ctx, &new_args);
581 }
582
583 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
584                 struct nvhost_submit_args *args)
585 {
586         struct nvhost_job *job;
587         int num_cmdbufs = args->num_cmdbufs;
588         int num_relocs = args->num_relocs;
589         int num_waitchks = args->num_waitchks;
590         int num_syncpt_incrs = args->num_syncpt_incrs;
591         struct nvhost_cmdbuf __user *cmdbufs =
592                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
593         struct nvhost_reloc __user *relocs =
594                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
595         struct nvhost_reloc_shift __user *reloc_shifts =
596                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
597         struct nvhost_waitchk __user *waitchks =
598                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
599         struct nvhost_syncpt_incr __user *syncpt_incrs =
600                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
601         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
602         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
603
604         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
605         u32 *local_waitbases = NULL;
606         int err, i, hwctx_syncpt_idx = -1;
607
608         if (num_syncpt_incrs > host->info.nb_pts)
609                 return -EINVAL;
610
611         job = nvhost_job_alloc(ctx->ch,
612                         ctx->hwctx,
613                         num_cmdbufs,
614                         num_relocs,
615                         num_waitchks,
616                         num_syncpt_incrs,
617                         ctx->memmgr);
618         if (!job)
619                 return -ENOMEM;
620
621         job->num_relocs = args->num_relocs;
622         job->num_waitchk = args->num_waitchks;
623         job->num_syncpts = args->num_syncpt_incrs;
624         job->priority = ctx->priority;
625         job->clientid = ctx->clientid;
626
627         while (num_cmdbufs) {
628                 struct nvhost_cmdbuf cmdbuf;
629                 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
630                 if (err)
631                         goto fail;
632                 nvhost_job_add_gather(job,
633                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
634                 num_cmdbufs--;
635                 cmdbufs++;
636         }
637
638         err = copy_from_user(job->relocarray,
639                         relocs, sizeof(*relocs) * num_relocs);
640         if (err)
641                 goto fail;
642
643         err = copy_from_user(job->relocshiftarray,
644                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
645         if (err)
646                 goto fail;
647
648         err = copy_from_user(job->waitchk,
649                         waitchks, sizeof(*waitchks) * num_waitchks);
650         if (err)
651                 goto fail;
652
653         /* mass copy waitbases */
654         if (args->waitbases) {
655                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
656                         GFP_KERNEL);
657                 err = copy_from_user(local_waitbases, waitbases,
658                         sizeof(u32) * num_syncpt_incrs);
659                 if (err) {
660                         err = -EINVAL;
661                         goto fail;
662                 }
663         }
664
665         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
666         if (!ctx->hwctx)
667                 hwctx_syncpt_idx = 0;
668
669         /*
670          * Go through each syncpoint from userspace. Here we:
671          * - Copy syncpoint information
672          * - Validate each syncpoint
673          * - Determine waitbase for each syncpoint
674          * - Determine the index of hwctx syncpoint in the table
675          */
676
677         for (i = 0; i < num_syncpt_incrs; ++i) {
678                 u32 waitbase;
679                 struct nvhost_syncpt_incr sp;
680
681                 /* Copy */
682                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
683                 if (err)
684                         goto fail;
685
686                 /* Validate */
687                 if (sp.syncpt_id > host->info.nb_pts) {
688                         err = -EINVAL;
689                         goto fail;
690                 }
691
692                 /* Determine waitbase */
693                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
694                         waitbase = local_waitbases[i];
695                 else
696                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
697                                 sp.syncpt_id);
698
699                 /* Store */
700                 job->sp[i].id = sp.syncpt_id;
701                 job->sp[i].incrs = sp.syncpt_incrs;
702                 job->sp[i].waitbase = waitbase;
703
704                 /* Find hwctx syncpoint */
705                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
706                         hwctx_syncpt_idx = i;
707         }
708
709         /* not needed anymore */
710         kfree(local_waitbases);
711         local_waitbases = NULL;
712
713         /* Is hwctx_syncpt_idx valid? */
714         if (hwctx_syncpt_idx == -1) {
715                 err = -EINVAL;
716                 goto fail;
717         }
718
719         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
720
721         trace_nvhost_channel_submit(ctx->ch->dev->name,
722                 job->num_gathers, job->num_relocs, job->num_waitchk,
723                 job->sp[job->hwctx_syncpt_idx].id,
724                 job->sp[job->hwctx_syncpt_idx].incrs);
725
726         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
727         if (err)
728                 goto fail;
729
730         if (args->timeout)
731                 job->timeout = min(ctx->timeout, args->timeout);
732         else
733                 job->timeout = ctx->timeout;
734         job->timeout_debug_dump = ctx->timeout_debug_dump;
735
736         err = nvhost_channel_submit(job);
737         if (err)
738                 goto fail_submit;
739
740         /* Deliver multiple fences back to the userspace */
741         if (fences)
742                 for (i = 0; i < num_syncpt_incrs; ++i) {
743                         u32 fence = job->sp[i].fence;
744                         err = copy_to_user(fences, &fence, sizeof(u32));
745                         if (err)
746                                 break;
747                         fences++;
748                 }
749
750         /* Deliver the fence using the old mechanism _only_ if a single
751          * syncpoint is used. */
752
753         if (num_syncpt_incrs == 1)
754                 args->fence = job->sp[job->hwctx_syncpt_idx].fence;
755         else
756                 args->fence = 0;
757
758         nvhost_job_put(job);
759
760         return 0;
761
762 fail_submit:
763         nvhost_job_unpin(job);
764 fail:
765         nvhost_job_put(job);
766         kfree(local_waitbases);
767         return err;
768 }
769
770 static int nvhost_ioctl_channel_set_ctxswitch(
771                 struct nvhost_channel_userctx *ctx,
772                 struct nvhost_set_ctxswitch_args *args)
773 {
774         struct nvhost_cmdbuf cmdbuf_save;
775         struct nvhost_cmdbuf cmdbuf_restore;
776         struct nvhost_syncpt_incr save_incr, restore_incr;
777         u32 save_waitbase, restore_waitbase;
778         struct nvhost_reloc reloc;
779         struct nvhost_hwctx_handler *ctxhandler = NULL;
780         struct nvhost_hwctx *nhwctx = NULL;
781         struct user_hwctx *hwctx;
782         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
783         int err;
784
785         /* Only channels with context support */
786         if (!ctx->hwctx)
787                 return -EFAULT;
788
789         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
790         if (args->num_cmdbufs_save != 1
791                         || args->num_cmdbufs_restore != 1
792                         || args->num_save_incrs != 1
793                         || args->num_restore_incrs != 1
794                         || args->num_relocs != 1)
795                 return -EINVAL;
796
797         err = copy_from_user(&cmdbuf_save,
798                         (void *)(uintptr_t)args->cmdbuf_save,
799                         sizeof(cmdbuf_save));
800         if (err)
801                 goto fail;
802
803         err = copy_from_user(&cmdbuf_restore,
804                         (void *)(uintptr_t)args->cmdbuf_restore,
805                         sizeof(cmdbuf_restore));
806         if (err)
807                 goto fail;
808
809         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
810                         sizeof(reloc));
811         if (err)
812                 goto fail;
813
814         err = copy_from_user(&save_incr,
815                         (void *)(uintptr_t)args->save_incrs,
816                         sizeof(save_incr));
817         if (err)
818                 goto fail;
819         err = copy_from_user(&save_waitbase,
820                         (void *)(uintptr_t)args->save_waitbases,
821                         sizeof(save_waitbase));
822
823         err = copy_from_user(&restore_incr,
824                         (void *)(uintptr_t)args->restore_incrs,
825                         sizeof(restore_incr));
826         if (err)
827                 goto fail;
828         err = copy_from_user(&restore_waitbase,
829                         (void *)(uintptr_t)args->restore_waitbases,
830                         sizeof(restore_waitbase));
831
832         if (save_incr.syncpt_id != pdata->syncpts[0]
833                         || restore_incr.syncpt_id != pdata->syncpts[0]
834                         || save_waitbase != pdata->waitbases[0]
835                         || restore_waitbase != pdata->waitbases[0]) {
836                 err = -EINVAL;
837                 goto fail;
838         }
839         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
840                         save_waitbase, ctx->ch);
841         if (!ctxhandler) {
842                 err = -ENOMEM;
843                 goto fail;
844         }
845
846         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
847         if (!nhwctx) {
848                 err = -ENOMEM;
849                 goto fail_hwctx;
850         }
851         hwctx = to_user_hwctx(nhwctx);
852
853         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
854                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
855                         cmdbuf_restore.mem, cmdbuf_restore.offset,
856                         cmdbuf_restore.words,
857                         pdata->syncpts[0], pdata->waitbases[0],
858                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
859
860         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
861         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
862                         cmdbuf_restore.offset, cmdbuf_restore.words);
863         if (err)
864                 goto fail_set_restore;
865
866         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
867                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
868         if (err)
869                 goto fail_set_save;
870
871         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
872         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
873
874         /* Free old context */
875         ctx->hwctx->h->put(ctx->hwctx);
876         ctx->hwctx = nhwctx;
877
878         return 0;
879
880 fail_set_save:
881 fail_set_restore:
882         ctxhandler->put(&hwctx->hwctx);
883 fail_hwctx:
884         user_ctxhandler_free(ctxhandler);
885 fail:
886         return err;
887 }
888
889 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
890 static int nvhost_ioctl_channel_cycle_stats(
891         struct nvhost_channel_userctx *ctx,
892         struct nvhost_cycle_stats_args *args)
893 {
894         int ret;
895         BUG_ON(!channel_op().cycle_stats);
896         ret = channel_op().cycle_stats(ctx->hwctx, args);
897         return ret;
898 }
899 #endif
900
901 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
902         struct nvhost_read_3d_reg_args *args)
903 {
904         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
905                         args->offset, &args->value);
906 }
907
908 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
909 {
910         int i;
911         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
912
913         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
914                 if (pdata->clocks[i].moduleid == moduleid)
915                         return i;
916         }
917
918         /* Old user space is sending a random number in args. Return clock
919          * zero in these cases. */
920         return 0;
921 }
922
923 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
924         struct nvhost_clk_rate_args *arg)
925 {
926         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
927                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
928         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
929                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
930         int index = moduleid ?
931                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
932
933         return nvhost_module_set_rate(ctx->ch->dev,
934                         ctx, arg->rate, index, attr);
935 }
936
937 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
938         u32 moduleid, u32 *rate)
939 {
940         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
941
942         return nvhost_module_get_rate(ctx->ch->dev,
943                         (unsigned long *)rate, index);
944 }
945
946 static int nvhost_ioctl_channel_module_regrdwr(
947         struct nvhost_channel_userctx *ctx,
948         struct nvhost_ctrl_module_regrdwr_args *args)
949 {
950         u32 num_offsets = args->num_offsets;
951         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
952         u32 __user *values = (u32 *)(uintptr_t)args->values;
953         u32 vals[64];
954         struct platform_device *ndev;
955
956         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
957                 args->num_offsets, args->write);
958
959         /* Check that there is something to read and that block size is
960          * u32 aligned */
961         if (num_offsets == 0 || args->block_size & 3)
962                 return -EINVAL;
963
964         ndev = ctx->ch->dev;
965
966         while (num_offsets--) {
967                 int err;
968                 u32 offs;
969                 int remaining = args->block_size >> 2;
970
971                 if (get_user(offs, offsets))
972                         return -EFAULT;
973
974                 offsets++;
975                 while (remaining) {
976                         int batch = min(remaining, 64);
977                         if (args->write) {
978                                 if (copy_from_user(vals, values,
979                                                 batch * sizeof(u32)))
980                                         return -EFAULT;
981
982                                 err = nvhost_write_module_regs(ndev,
983                                         offs, batch, vals);
984                                 if (err)
985                                         return err;
986                         } else {
987                                 err = nvhost_read_module_regs(ndev,
988                                                 offs, batch, vals);
989                                 if (err)
990                                         return err;
991
992                                 if (copy_to_user(values, vals,
993                                                 batch * sizeof(u32)))
994                                         return -EFAULT;
995                         }
996
997                         remaining -= batch;
998                         offs += batch * sizeof(u32);
999                         values += batch;
1000                 }
1001         }
1002
1003         return 0;
1004 }
1005
1006 static u32 create_mask(u32 *words, int num)
1007 {
1008         int i;
1009         u32 word = 0;
1010         for (i = 0; i < num && words[i] && words[i] < BITS_PER_LONG; i++)
1011                 word |= BIT(words[i]);
1012
1013         return word;
1014 }
1015
1016 static long nvhost_channelctl(struct file *filp,
1017         unsigned int cmd, unsigned long arg)
1018 {
1019         struct nvhost_channel_userctx *priv = filp->private_data;
1020         struct device *dev = &priv->ch->dev->dev;
1021         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
1022         int err = 0;
1023
1024         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
1025                 (_IOC_NR(cmd) == 0) ||
1026                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
1027                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
1028                 return -EFAULT;
1029
1030         if (_IOC_DIR(cmd) & _IOC_WRITE) {
1031                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1032                         return -EFAULT;
1033         }
1034
1035         switch (cmd) {
1036         case NVHOST_IOCTL_CHANNEL_FLUSH:
1037                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
1038                 break;
1039         case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
1040                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
1041                 break;
1042         case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
1043         {
1044                 struct nvhost_submit_hdr_ext *hdr;
1045
1046                 if (priv->hdr.num_relocs ||
1047                     priv->num_relocshifts ||
1048                     priv->hdr.num_cmdbufs ||
1049                     priv->hdr.num_waitchks) {
1050                         reset_submit(priv);
1051                         dev_err(&priv->ch->dev->dev,
1052                                 "channel submit out of sync\n");
1053                         err = -EIO;
1054                         break;
1055                 }
1056
1057                 hdr = (struct nvhost_submit_hdr_ext *)buf;
1058                 if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
1059                         dev_err(&priv->ch->dev->dev,
1060                                 "submit version %d > max supported %d\n",
1061                                 hdr->submit_version,
1062                                 NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
1063                         err = -EINVAL;
1064                         break;
1065                 }
1066                 memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
1067                 err = set_submit(priv);
1068                 trace_nvhost_ioctl_channel_submit(priv->ch->dev->name,
1069                         priv->hdr.submit_version,
1070                         priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
1071                         priv->hdr.num_waitchks,
1072                         priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
1073                 break;
1074         }
1075         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
1076         {
1077                 struct nvhost_device_data *pdata = \
1078                         platform_get_drvdata(priv->ch->dev);
1079                 ((struct nvhost_get_param_args *)buf)->value =
1080                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
1081                 break;
1082         }
1083         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
1084         {
1085                 struct nvhost_device_data *pdata = \
1086                         platform_get_drvdata(priv->ch->dev);
1087                 struct nvhost_get_param_arg *arg =
1088                         (struct nvhost_get_param_arg *)buf;
1089                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
1090                                 || !pdata->syncpts[arg->param])
1091                         return -EINVAL;
1092                 arg->value = pdata->syncpts[arg->param];
1093                 break;
1094         }
1095         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
1096         {
1097                 struct nvhost_device_data *pdata = \
1098                         platform_get_drvdata(priv->ch->dev);
1099                 ((struct nvhost_get_param_args *)buf)->value =
1100                         create_mask(pdata->waitbases,
1101                                         NVHOST_MODULE_MAX_WAITBASES);
1102                 break;
1103         }
1104         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
1105         {
1106                 struct nvhost_device_data *pdata = \
1107                         platform_get_drvdata(priv->ch->dev);
1108                 struct nvhost_get_param_arg *arg =
1109                         (struct nvhost_get_param_arg *)buf;
1110                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
1111                                 || !pdata->waitbases[arg->param])
1112                         return -EINVAL;
1113                 arg->value = pdata->waitbases[arg->param];
1114                 break;
1115         }
1116         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
1117         {
1118                 struct nvhost_device_data *pdata = \
1119                         platform_get_drvdata(priv->ch->dev);
1120                 ((struct nvhost_get_param_args *)buf)->value =
1121                         create_mask(pdata->modulemutexes,
1122                                         NVHOST_MODULE_MAX_MODMUTEXES);
1123                 break;
1124         }
1125         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
1126         {
1127                 struct nvhost_device_data *pdata = \
1128                         platform_get_drvdata(priv->ch->dev);
1129                 struct nvhost_get_param_arg *arg =
1130                         (struct nvhost_get_param_arg *)buf;
1131                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
1132                                 || !pdata->modulemutexes[arg->param])
1133                         return -EINVAL;
1134                 arg->value = pdata->modulemutexes[arg->param];
1135                 break;
1136         }
1137         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
1138         {
1139                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
1140                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
1141
1142                 if (IS_ERR(new_client)) {
1143                         err = PTR_ERR(new_client);
1144                         break;
1145                 }
1146                 if (priv->memmgr)
1147                         nvhost_memmgr_put_mgr(priv->memmgr);
1148
1149                 priv->memmgr = new_client;
1150
1151                 if (priv->hwctx)
1152                         priv->hwctx->memmgr = new_client;
1153
1154                 break;
1155         }
1156         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
1157                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
1158                 break;
1159         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX_OLD:
1160                 err = nvhost_ioctl_channel_alloc_obj_ctx_old(priv, (void *)buf);
1161                 break;
1162         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
1163                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
1164                 break;
1165         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX_OLD:
1166                 err = nvhost_ioctl_channel_free_obj_ctx_old(priv, (void *)buf);
1167                 break;
1168         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
1169                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
1170                 break;
1171         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
1172                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
1173                 break;
1174         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO_OLD:
1175                 err = nvhost_ioctl_channel_submit_gpfifo_old(priv, (void *)buf);
1176                 break;
1177         case NVHOST_IOCTL_CHANNEL_WAIT:
1178                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
1179                 break;
1180         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
1181                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
1182                 break;
1183         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND_OLD:
1184                 err = nvhost_ioctl_channel_zcull_bind_old(priv, (void *)buf);
1185                 break;
1186
1187 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
1188         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
1189                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
1190                 break;
1191 #endif
1192         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
1193                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
1194                 break;
1195         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
1196         {
1197                 struct nvhost_clk_rate_args *arg =
1198                                 (struct nvhost_clk_rate_args *)buf;
1199
1200                 err = nvhost_ioctl_channel_get_rate(priv,
1201                                 arg->moduleid, &arg->rate);
1202                 break;
1203         }
1204         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
1205         {
1206                 struct nvhost_clk_rate_args *arg =
1207                                 (struct nvhost_clk_rate_args *)buf;
1208
1209                 err = nvhost_ioctl_channel_set_rate(priv, arg);
1210                 break;
1211         }
1212         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1213                 priv->timeout =
1214                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1215                 dev_dbg(&priv->ch->dev->dev,
1216                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1217                         __func__, priv->timeout, priv);
1218                 break;
1219         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1220                 ((struct nvhost_get_param_args *)buf)->value =
1221                                 priv->hwctx->has_timedout;
1222                 break;
1223         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1224                 priv->priority =
1225                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
1226                 break;
1227         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1228                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1229                 break;
1230         case NVHOST_IOCTL_CHANNEL_SUBMIT:
1231                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1232                 break;
1233         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1234                 priv->timeout = (u32)
1235                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
1236                 priv->timeout_debug_dump = !((u32)
1237                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1238                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1239                 dev_dbg(&priv->ch->dev->dev,
1240                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1241                         __func__, priv->timeout, priv);
1242                 break;
1243         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
1244                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
1245                 break;
1246         default:
1247                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
1248                 err = -ENOTTY;
1249                 break;
1250         }
1251
1252         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1253                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1254
1255         return err;
1256 }
1257
1258 static const struct file_operations nvhost_channelops = {
1259         .owner = THIS_MODULE,
1260         .release = nvhost_channelrelease,
1261         .open = nvhost_channelopen,
1262         .write = nvhost_channelwrite,
1263         .unlocked_ioctl = nvhost_channelctl
1264 };
1265
1266 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
1267 {
1268         struct nvhost_channel_userctx *userctx;
1269         struct file *f = fget(fd);
1270         if (!f)
1271                 return 0;
1272
1273         if (f->f_op != &nvhost_channelops) {
1274                 fput(f);
1275                 return 0;
1276         }
1277
1278         userctx = (struct nvhost_channel_userctx *)f->private_data;
1279         fput(f);
1280         return userctx->hwctx;
1281 }
1282
1283
1284 static const struct file_operations nvhost_asops = {
1285         .owner = THIS_MODULE,
1286         .release = nvhost_as_dev_release,
1287         .open = nvhost_as_dev_open,
1288         .unlocked_ioctl = nvhost_as_dev_ctl,
1289 };
1290
1291 static struct {
1292         int class_id;
1293         const char *dev_name;
1294 } class_id_dev_name_map[] = {
1295         /*      { NV_HOST1X_CLASS_ID, ""}, */
1296         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1297         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1298         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1299         { NV_GRAPHICS_GPU_CLASS_ID, "gpu"},
1300         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1301         { NV_TSEC_CLASS_ID, "tsec" },
1302 };
1303
1304 static struct {
1305         int module_id;
1306         const char *dev_name;
1307 } module_id_dev_name_map[] = {
1308         { NVHOST_MODULE_VI, "vi"},
1309         { NVHOST_MODULE_ISP, "isp"},
1310         { NVHOST_MODULE_MPE, "mpe"},
1311         { NVHOST_MODULE_MSENC, "msenc"},
1312         { NVHOST_MODULE_TSEC, "tsec"},
1313         { NVHOST_MODULE_GPU, "gpu"},
1314         { NVHOST_MODULE_VIC, "vic"},
1315 };
1316
1317 static const char *get_device_name_for_dev(struct platform_device *dev)
1318 {
1319         int i;
1320         /* first choice is to use the class id if specified */
1321         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1322                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1323                 if (pdata->class == class_id_dev_name_map[i].class_id)
1324                         return class_id_dev_name_map[i].dev_name;
1325         }
1326
1327         /* second choice is module name if specified */
1328         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1329                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1330                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1331                         return module_id_dev_name_map[i].dev_name;
1332         }
1333
1334         /* last choice is to just use the given dev name */
1335         return dev->name;
1336 }
1337
1338 static struct device *nvhost_client_device_create(
1339         struct platform_device *pdev, struct cdev *cdev,
1340         const char *cdev_name, int devno,
1341         const struct file_operations *ops)
1342 {
1343         struct nvhost_master *host = nvhost_get_host(pdev);
1344         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1345         const char *use_dev_name;
1346         struct device *dev;
1347         int err;
1348
1349         nvhost_dbg_fn("");
1350
1351         BUG_ON(!host);
1352
1353         cdev_init(cdev, ops);
1354         cdev->owner = THIS_MODULE;
1355
1356         err = cdev_add(cdev, devno, 1);
1357         if (err < 0) {
1358                 dev_err(&pdev->dev,
1359                         "failed to add chan %i cdev\n", pdata->index);
1360                 return NULL;
1361         }
1362         use_dev_name = get_device_name_for_dev(pdev);
1363
1364         dev = device_create(host->nvhost_class,
1365                         NULL, devno, NULL,
1366                         (pdev->id <= 0) ?
1367                         IFACE_NAME "-%s%s" :
1368                         IFACE_NAME "-%s%s.%d",
1369                         cdev_name, use_dev_name, pdev->id);
1370
1371         if (IS_ERR(dev)) {
1372                 err = PTR_ERR(dev);
1373                 dev_err(&pdev->dev,
1374                         "failed to create %s %s device for %s\n",
1375                         use_dev_name, cdev_name, pdev->name);
1376                 return NULL;
1377         }
1378
1379         return dev;
1380 }
1381
1382 int nvhost_client_user_init(struct platform_device *dev)
1383 {
1384         int err, devno;
1385         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1386         struct nvhost_channel *ch = pdata->channel;
1387
1388         BUG_ON(!ch);
1389         // reserve 3 minor #s for <dev> and as-<dev> and ctrl-<dev>
1390
1391         err = alloc_chrdev_region(&devno, 0, 3, IFACE_NAME);
1392         if (err < 0) {
1393                 dev_err(&dev->dev, "failed to allocate devno\n");
1394                 goto fail;
1395         }
1396
1397         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1398                                 "", devno, &nvhost_channelops);
1399         if (ch->node == NULL)
1400                 goto fail;
1401         ++devno;
1402         ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1403                                 "as-", devno, &nvhost_asops);
1404         if (ch->as_node == NULL)
1405                 goto fail;
1406
1407         if (pdata->ctrl_ops) {
1408                 ++devno;
1409                 pdata->ctrl_node = nvhost_client_device_create(dev,
1410                                         &pdata->ctrl_cdev, "ctrl-",
1411                                         devno, pdata->ctrl_ops);
1412                 if (pdata->ctrl_node == NULL)
1413                         goto fail;
1414         }
1415
1416         return 0;
1417 fail:
1418         return err;
1419 }
1420
1421 int nvhost_client_device_init(struct platform_device *dev)
1422 {
1423         int err;
1424         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1425         struct nvhost_channel *ch;
1426         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1427
1428         ch = nvhost_alloc_channel(dev);
1429         if (ch == NULL)
1430                 return -ENODEV;
1431
1432         /* store the pointer to this device for channel */
1433         ch->dev = dev;
1434
1435         /* Create debugfs directory for the device */
1436         nvhost_device_debug_init(dev);
1437
1438         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1439         if (err)
1440                 goto fail;
1441
1442         err = nvhost_client_user_init(dev);
1443         if (err)
1444                 goto fail;
1445
1446         if (tickctrl_op().init_channel)
1447                 tickctrl_op().init_channel(dev);
1448
1449         err = nvhost_device_list_add(dev);
1450         if (err)
1451                 goto fail;
1452
1453         if (pdata->scaling_init)
1454                 pdata->scaling_init(dev);
1455
1456         /* reset syncpoint values for this unit */
1457         nvhost_module_busy(nvhost_master->dev);
1458         nvhost_syncpt_reset_client(dev);
1459         nvhost_module_idle(nvhost_master->dev);
1460
1461         dev_info(&dev->dev, "initialized\n");
1462
1463         return 0;
1464
1465 fail:
1466         /* Add clean-up */
1467         nvhost_free_channel(ch);
1468         return err;
1469 }
1470 EXPORT_SYMBOL(nvhost_client_device_init);
1471
1472 int nvhost_client_device_release(struct platform_device *dev)
1473 {
1474         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1475         struct nvhost_channel *ch;
1476         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1477
1478         ch = pdata->channel;
1479
1480         /* Release nvhost module resources */
1481         nvhost_module_deinit(dev);
1482
1483         /* Remove from nvhost device list */
1484         nvhost_device_list_remove(dev);
1485
1486         /* Release chardev and device node for user space */
1487         device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1488         cdev_del(&ch->cdev);
1489
1490         /* Free nvhost channel */
1491         nvhost_free_channel(ch);
1492
1493         return 0;
1494 }
1495 EXPORT_SYMBOL(nvhost_client_device_release);
1496
1497 int nvhost_client_device_suspend(struct device *dev)
1498 {
1499         int ret = 0;
1500         struct nvhost_device_data *pdata = dev_get_drvdata(dev);
1501
1502         ret = nvhost_channel_suspend(pdata->channel);
1503         if (ret)
1504                 return ret;
1505
1506         dev_info(dev, "suspend status: %d\n", ret);
1507
1508         return ret;
1509 }
1510
1511 int nvhost_client_device_resume(struct device *dev)
1512 {
1513         dev_info(dev, "resuming\n");
1514         return 0;
1515 }
1516
1517 int nvhost_client_device_get_resources(struct platform_device *dev)
1518 {
1519         int i;
1520         void __iomem *regs = NULL;
1521         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1522
1523         for (i = 0; i < dev->num_resources; i++) {
1524                 struct resource *r = NULL;
1525
1526                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1527                 /* We've run out of mem resources */
1528                 if (!r)
1529                         break;
1530
1531                 regs = devm_request_and_ioremap(&dev->dev, r);
1532                 if (!regs)
1533                         goto fail;
1534
1535                 pdata->aperture[i] = regs;
1536         }
1537
1538         return 0;
1539
1540 fail:
1541         dev_err(&dev->dev, "failed to get register memory\n");
1542
1543         return -ENXIO;
1544 }
1545 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1546
1547 /* This is a simple wrapper around request_firmware that takes
1548  * 'fw_name' and if available applies a SOC relative path prefix to it.
1549  * The caller is responsible for calling release_firmware later.
1550  */
1551 const struct firmware *
1552 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1553 {
1554         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1555         const struct firmware *fw;
1556         char *fw_path = NULL;
1557         int path_len, err;
1558
1559         /* This field is NULL when calling from SYS_EXIT.
1560            Add a check here to prevent crash in request_firmware */
1561         if (!current->fs) {
1562                 BUG();
1563                 return NULL;
1564         }
1565
1566         if (!fw_name)
1567                 return NULL;
1568
1569         if (op->soc_name) {
1570                 path_len = strlen(fw_name) + strlen(op->soc_name);
1571                 path_len += 2; /* for the path separator and zero terminator*/
1572
1573                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1574                                      GFP_KERNEL);
1575                 if (!fw_path)
1576                         return NULL;
1577
1578                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1579                 fw_name = fw_path;
1580         }
1581
1582         err = request_firmware(&fw, fw_name, &dev->dev);
1583         kfree(fw_path);
1584         if (err) {
1585                 dev_err(&dev->dev, "failed to get firmware\n");
1586                 return NULL;
1587         }
1588
1589         /* note: caller must release_firmware */
1590         return fw;
1591 }