video: tegra: host: Enable VI 2nd lvl clk gating
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * drivers/video/tegra/host/bus_client.c
3  *
4  * Tegra Graphics Host Client Module
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/spinlock.h>
24 #include <linux/fs.h>
25 #include <linux/cdev.h>
26 #include <linux/uaccess.h>
27 #include <linux/file.h>
28 #include <linux/clk.h>
29 #include <linux/hrtimer.h>
30 #include <linux/export.h>
31 #include <linux/firmware.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/tegra-soc.h>
34
35 #include <trace/events/nvhost.h>
36
37 #include <linux/io.h>
38 #include <linux/string.h>
39
40 #include <linux/nvhost.h>
41 #include <linux/nvhost_ioctl.h>
42
43 #include <mach/gpufuse.h>
44
45 #include "debug.h"
46 #include "bus_client.h"
47 #include "dev.h"
48 #include "class_ids.h"
49 #include "nvhost_as.h"
50 #include "nvhost_memmgr.h"
51 #include "chip_support.h"
52 #include "nvhost_acm.h"
53
54 #include "nvhost_syncpt.h"
55 #include "nvhost_channel.h"
56 #include "nvhost_job.h"
57 #include "nvhost_hwctx.h"
58 #include "user_hwctx.h"
59
60 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
61 {
62         int err = 0;
63         struct resource *r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
64         if (!r) {
65                 dev_err(&ndev->dev, "failed to get memory resource\n");
66                 return -ENODEV;
67         }
68
69         if (offset + 4 * count > resource_size(r)
70                         || (offset + 4 * count < offset))
71                 err = -EPERM;
72
73         return err;
74 }
75
76 int nvhost_read_module_regs(struct platform_device *ndev,
77                         u32 offset, int count, u32 *values)
78 {
79         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
80         void __iomem *p = pdata->aperture[0] + offset;
81         int err;
82
83         if (!pdata->aperture[0])
84                 return -ENODEV;
85
86         /* verify offset */
87         err = validate_reg(ndev, offset, count);
88         if (err)
89                 return err;
90
91         nvhost_module_busy(ndev);
92         while (count--) {
93                 *(values++) = readl(p);
94                 p += 4;
95         }
96         rmb();
97         nvhost_module_idle(ndev);
98
99         return 0;
100 }
101
102 int nvhost_write_module_regs(struct platform_device *ndev,
103                         u32 offset, int count, const u32 *values)
104 {
105         void __iomem *p;
106         int err;
107         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
108
109         if (!pdata->aperture[0])
110                 return -ENODEV;
111
112         p = pdata->aperture[0] + offset;
113
114         /* verify offset */
115         err = validate_reg(ndev, offset, count);
116         if (err)
117                 return err;
118
119         nvhost_module_busy(ndev);
120         while (count--) {
121                 writel(*(values++), p);
122                 p += 4;
123         }
124         wmb();
125         nvhost_module_idle(ndev);
126
127         return 0;
128 }
129
130 bool nvhost_client_can_writel(struct platform_device *pdev)
131 {
132         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
133         return !!pdata->aperture[0];
134 }
135 EXPORT_SYMBOL(nvhost_client_can_writel);
136
137 void nvhost_client_writel(struct platform_device *pdev, u32 val, u32 reg)
138 {
139         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
140         writel(val, pdata->aperture[0] + reg * 4);
141 }
142
143 u32 nvhost_client_readl(struct platform_device *pdev, u32 reg)
144 {
145         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
146         return readl(pdata->aperture[0] + reg * 4);
147 }
148
149 struct nvhost_channel_userctx {
150         struct nvhost_channel *ch;
151         struct nvhost_hwctx *hwctx;
152         struct nvhost_job *job;
153         struct mem_mgr *memmgr;
154         u32 timeout;
155         u32 priority;
156         int clientid;
157         bool timeout_debug_dump;
158 };
159
160 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
161 {
162         struct nvhost_channel_userctx *priv = filp->private_data;
163
164         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
165
166         filp->private_data = NULL;
167
168         nvhost_module_remove_client(priv->ch->dev, priv);
169
170         if (priv->hwctx) {
171                 struct nvhost_channel *ch = priv->ch;
172                 struct nvhost_hwctx *ctx = priv->hwctx;
173
174                 mutex_lock(&ch->submitlock);
175                 if (ch->cur_ctx == ctx)
176                         ch->cur_ctx = NULL;
177                 mutex_unlock(&ch->submitlock);
178
179                 priv->hwctx->h->put(priv->hwctx);
180         }
181
182         if (priv->job)
183                 nvhost_job_put(priv->job);
184
185         nvhost_putchannel(priv->ch);
186
187         nvhost_memmgr_put_mgr(priv->memmgr);
188         kfree(priv);
189         return 0;
190 }
191
192 static int nvhost_channelopen(struct inode *inode, struct file *filp)
193 {
194         struct nvhost_channel_userctx *priv;
195         struct nvhost_channel *ch;
196         struct nvhost_device_data *pdata;
197
198         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
199         ch = nvhost_getchannel(ch);
200         if (!ch)
201                 return -ENOMEM;
202         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
203
204         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
205         if (!priv) {
206                 nvhost_putchannel(ch);
207                 return -ENOMEM;
208         }
209         filp->private_data = priv;
210         priv->ch = ch;
211         if(nvhost_module_add_client(ch->dev, priv))
212                 goto fail;
213
214         if (ch->ctxhandler && ch->ctxhandler->alloc) {
215                 nvhost_module_busy(ch->dev);
216                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
217                 nvhost_module_idle(ch->dev);
218                 if (!priv->hwctx)
219                         goto fail;
220         }
221         priv->priority = NVHOST_PRIORITY_MEDIUM;
222         priv->clientid = atomic_add_return(1,
223                         &nvhost_get_host(ch->dev)->clientid);
224         pdata = platform_get_drvdata(ch->dev);
225         priv->timeout = pdata->nvhost_timeout_default;
226         priv->timeout_debug_dump = true;
227         if (!tegra_platform_is_silicon())
228                 priv->timeout = 0;
229
230         return 0;
231 fail:
232         nvhost_channelrelease(inode, filp);
233         return -ENOMEM;
234 }
235
236 static int nvhost_ioctl_channel_alloc_obj_ctx(
237         struct nvhost_channel_userctx *ctx,
238         struct nvhost_alloc_obj_ctx_args *args)
239 {
240         int ret;
241
242         BUG_ON(!channel_op().alloc_obj);
243         nvhost_module_busy(ctx->ch->dev);
244         ret = channel_op().alloc_obj(ctx->hwctx, args);
245         nvhost_module_idle(ctx->ch->dev);
246         return ret;
247 }
248
249 static int nvhost_ioctl_channel_free_obj_ctx(
250         struct nvhost_channel_userctx *ctx,
251         struct nvhost_free_obj_ctx_args *args)
252 {
253         int ret;
254
255         BUG_ON(!channel_op().free_obj);
256         nvhost_module_busy(ctx->ch->dev);
257         ret = channel_op().free_obj(ctx->hwctx, args);
258         nvhost_module_idle(ctx->ch->dev);
259         return ret;
260 }
261
262 static int nvhost_ioctl_channel_alloc_gpfifo(
263         struct nvhost_channel_userctx *ctx,
264         struct nvhost_alloc_gpfifo_args *args)
265 {
266         int ret;
267
268         BUG_ON(!channel_op().alloc_gpfifo);
269         nvhost_module_busy(ctx->ch->dev);
270         ret = channel_op().alloc_gpfifo(ctx->hwctx, args);
271         nvhost_module_idle(ctx->ch->dev);
272         return ret;
273 }
274
275 static int nvhost_ioctl_channel_submit_gpfifo(
276         struct nvhost_channel_userctx *ctx,
277         struct nvhost_submit_gpfifo_args *args)
278 {
279         void *gpfifo;
280         u32 size;
281         int ret = 0;
282
283         if (!ctx->hwctx || ctx->hwctx->has_timedout)
284                 return -ETIMEDOUT;
285
286         size = args->num_entries * sizeof(struct nvhost_gpfifo);
287
288         gpfifo = kzalloc(size, GFP_KERNEL);
289         if (!gpfifo)
290                 return -ENOMEM;
291
292         if (copy_from_user(gpfifo,
293                            (void __user *)(uintptr_t)args->gpfifo, size)) {
294                 ret = -EINVAL;
295                 goto clean_up;
296         }
297
298         BUG_ON(!channel_op().submit_gpfifo);
299
300         nvhost_module_busy(ctx->ch->dev);
301         ret = channel_op().submit_gpfifo(ctx->hwctx, gpfifo,
302                         args->num_entries, &args->fence, args->flags);
303         nvhost_module_idle(ctx->ch->dev);
304 clean_up:
305         kfree(gpfifo);
306         return ret;
307 }
308
309 static int nvhost_ioctl_channel_wait(
310         struct nvhost_channel_userctx *ctx,
311         struct nvhost_wait_args *args)
312 {
313         int ret;
314
315         BUG_ON(!channel_op().wait);
316         nvhost_module_busy(ctx->ch->dev);
317         ret = channel_op().wait(ctx->hwctx, args);
318         nvhost_module_idle(ctx->ch->dev);
319         return ret;
320 }
321
322 static int nvhost_ioctl_channel_zcull_bind(
323         struct nvhost_channel_userctx *ctx,
324         struct nvhost_zcull_bind_args *args)
325 {
326         int ret;
327
328         BUG_ON(!channel_zcull_op().bind);
329         nvhost_module_busy(ctx->ch->dev);
330         ret = channel_zcull_op().bind(ctx->hwctx, args);
331         nvhost_module_idle(ctx->ch->dev);
332         return ret;
333 }
334
335 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
336                 struct nvhost_submit_args *args)
337 {
338         struct nvhost_job *job;
339         int num_cmdbufs = args->num_cmdbufs;
340         int num_relocs = args->num_relocs;
341         int num_waitchks = args->num_waitchks;
342         int num_syncpt_incrs = args->num_syncpt_incrs;
343         struct nvhost_cmdbuf __user *cmdbufs =
344                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
345         struct nvhost_reloc __user *relocs =
346                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
347         struct nvhost_reloc_shift __user *reloc_shifts =
348                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
349         struct nvhost_waitchk __user *waitchks =
350                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
351         struct nvhost_syncpt_incr __user *syncpt_incrs =
352                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
353         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
354         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
355
356         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
357         u32 *local_waitbases = NULL;
358         int err, i, hwctx_syncpt_idx = -1;
359
360         if (num_syncpt_incrs > host->info.nb_pts)
361                 return -EINVAL;
362
363         job = nvhost_job_alloc(ctx->ch,
364                         ctx->hwctx,
365                         num_cmdbufs,
366                         num_relocs,
367                         num_waitchks,
368                         num_syncpt_incrs,
369                         ctx->memmgr);
370         if (!job)
371                 return -ENOMEM;
372
373         job->num_relocs = args->num_relocs;
374         job->num_waitchk = args->num_waitchks;
375         job->num_syncpts = args->num_syncpt_incrs;
376         job->priority = ctx->priority;
377         job->clientid = ctx->clientid;
378
379         while (num_cmdbufs) {
380                 struct nvhost_cmdbuf cmdbuf;
381                 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
382                 if (err)
383                         goto fail;
384                 nvhost_job_add_gather(job,
385                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
386                 num_cmdbufs--;
387                 cmdbufs++;
388         }
389
390         err = copy_from_user(job->relocarray,
391                         relocs, sizeof(*relocs) * num_relocs);
392         if (err)
393                 goto fail;
394
395         err = copy_from_user(job->relocshiftarray,
396                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
397         if (err)
398                 goto fail;
399
400         err = copy_from_user(job->waitchk,
401                         waitchks, sizeof(*waitchks) * num_waitchks);
402         if (err)
403                 goto fail;
404
405         /* mass copy waitbases */
406         if (args->waitbases) {
407                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
408                         GFP_KERNEL);
409                 if (!local_waitbases) {
410                         err = -ENOMEM;
411                         goto fail;
412                 }
413
414                 err = copy_from_user(local_waitbases, waitbases,
415                         sizeof(u32) * num_syncpt_incrs);
416                 if (err) {
417                         err = -EINVAL;
418                         goto fail;
419                 }
420         }
421
422         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
423         if (!ctx->hwctx)
424                 hwctx_syncpt_idx = 0;
425
426         /*
427          * Go through each syncpoint from userspace. Here we:
428          * - Copy syncpoint information
429          * - Validate each syncpoint
430          * - Determine waitbase for each syncpoint
431          * - Determine the index of hwctx syncpoint in the table
432          */
433
434         for (i = 0; i < num_syncpt_incrs; ++i) {
435                 u32 waitbase;
436                 struct nvhost_syncpt_incr sp;
437
438                 /* Copy */
439                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
440                 if (err)
441                         goto fail;
442
443                 /* Validate */
444                 if (sp.syncpt_id > host->info.nb_pts) {
445                         err = -EINVAL;
446                         goto fail;
447                 }
448
449                 /* Determine waitbase */
450                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
451                         waitbase = local_waitbases[i];
452                 else
453                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
454                                 sp.syncpt_id);
455
456                 /* Store */
457                 job->sp[i].id = sp.syncpt_id;
458                 job->sp[i].incrs = sp.syncpt_incrs;
459                 job->sp[i].waitbase = waitbase;
460
461                 /* Find hwctx syncpoint */
462                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
463                         hwctx_syncpt_idx = i;
464         }
465
466         /* not needed anymore */
467         kfree(local_waitbases);
468         local_waitbases = NULL;
469
470         /* Is hwctx_syncpt_idx valid? */
471         if (hwctx_syncpt_idx == -1) {
472                 err = -EINVAL;
473                 goto fail;
474         }
475
476         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
477
478         trace_nvhost_channel_submit(ctx->ch->dev->name,
479                 job->num_gathers, job->num_relocs, job->num_waitchk,
480                 job->sp[job->hwctx_syncpt_idx].id,
481                 job->sp[job->hwctx_syncpt_idx].incrs);
482
483         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
484         if (err)
485                 goto fail;
486
487         if (args->timeout)
488                 job->timeout = min(ctx->timeout, args->timeout);
489         else
490                 job->timeout = ctx->timeout;
491         job->timeout_debug_dump = ctx->timeout_debug_dump;
492
493         err = nvhost_channel_submit(job);
494         if (err)
495                 goto fail_submit;
496
497         /* Deliver multiple fences back to the userspace */
498         if (fences)
499                 for (i = 0; i < num_syncpt_incrs; ++i) {
500                         u32 fence = job->sp[i].fence;
501                         err = copy_to_user(fences, &fence, sizeof(u32));
502                         if (err)
503                                 break;
504                         fences++;
505                 }
506
507         /* Deliver the fence using the old mechanism _only_ if a single
508          * syncpoint is used. */
509
510         if (num_syncpt_incrs == 1)
511                 args->fence = job->sp[job->hwctx_syncpt_idx].fence;
512         else
513                 args->fence = 0;
514
515         nvhost_job_put(job);
516
517         return 0;
518
519 fail_submit:
520         nvhost_job_unpin(job);
521 fail:
522         nvhost_job_put(job);
523         kfree(local_waitbases);
524         return err;
525 }
526
527 static int nvhost_ioctl_channel_set_ctxswitch(
528                 struct nvhost_channel_userctx *ctx,
529                 struct nvhost_set_ctxswitch_args *args)
530 {
531         struct nvhost_cmdbuf cmdbuf_save;
532         struct nvhost_cmdbuf cmdbuf_restore;
533         struct nvhost_syncpt_incr save_incr, restore_incr;
534         u32 save_waitbase, restore_waitbase;
535         struct nvhost_reloc reloc;
536         struct nvhost_hwctx_handler *ctxhandler = NULL;
537         struct nvhost_hwctx *nhwctx = NULL;
538         struct user_hwctx *hwctx;
539         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
540         int err;
541
542         /* Only channels with context support */
543         if (!ctx->hwctx)
544                 return -EFAULT;
545
546         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
547         if (args->num_cmdbufs_save != 1
548                         || args->num_cmdbufs_restore != 1
549                         || args->num_save_incrs != 1
550                         || args->num_restore_incrs != 1
551                         || args->num_relocs != 1)
552                 return -EINVAL;
553
554         err = copy_from_user(&cmdbuf_save,
555                         (void *)(uintptr_t)args->cmdbuf_save,
556                         sizeof(cmdbuf_save));
557         if (err)
558                 goto fail;
559
560         err = copy_from_user(&cmdbuf_restore,
561                         (void *)(uintptr_t)args->cmdbuf_restore,
562                         sizeof(cmdbuf_restore));
563         if (err)
564                 goto fail;
565
566         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
567                         sizeof(reloc));
568         if (err)
569                 goto fail;
570
571         err = copy_from_user(&save_incr,
572                         (void *)(uintptr_t)args->save_incrs,
573                         sizeof(save_incr));
574         if (err)
575                 goto fail;
576         err = copy_from_user(&save_waitbase,
577                         (void *)(uintptr_t)args->save_waitbases,
578                         sizeof(save_waitbase));
579
580         err = copy_from_user(&restore_incr,
581                         (void *)(uintptr_t)args->restore_incrs,
582                         sizeof(restore_incr));
583         if (err)
584                 goto fail;
585         err = copy_from_user(&restore_waitbase,
586                         (void *)(uintptr_t)args->restore_waitbases,
587                         sizeof(restore_waitbase));
588
589         if (save_incr.syncpt_id != pdata->syncpts[0]
590                         || restore_incr.syncpt_id != pdata->syncpts[0]
591                         || save_waitbase != pdata->waitbases[0]
592                         || restore_waitbase != pdata->waitbases[0]) {
593                 err = -EINVAL;
594                 goto fail;
595         }
596         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
597                         save_waitbase, ctx->ch);
598         if (!ctxhandler) {
599                 err = -ENOMEM;
600                 goto fail;
601         }
602
603         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
604         if (!nhwctx) {
605                 err = -ENOMEM;
606                 goto fail_hwctx;
607         }
608         hwctx = to_user_hwctx(nhwctx);
609
610         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
611                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
612                         cmdbuf_restore.mem, cmdbuf_restore.offset,
613                         cmdbuf_restore.words,
614                         pdata->syncpts[0], pdata->waitbases[0],
615                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
616
617         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
618         if (!nhwctx->memmgr)
619                 goto fail_set_restore;
620
621         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
622                         cmdbuf_restore.offset, cmdbuf_restore.words);
623         if (err)
624                 goto fail_set_restore;
625
626         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
627                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
628         if (err)
629                 goto fail_set_save;
630
631         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
632         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
633
634         /* Free old context */
635         ctx->hwctx->h->put(ctx->hwctx);
636         ctx->hwctx = nhwctx;
637
638         return 0;
639
640 fail_set_save:
641 fail_set_restore:
642         ctxhandler->put(&hwctx->hwctx);
643 fail_hwctx:
644         user_ctxhandler_free(ctxhandler);
645 fail:
646         return err;
647 }
648
649 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
650 static int nvhost_ioctl_channel_cycle_stats(
651         struct nvhost_channel_userctx *ctx,
652         struct nvhost_cycle_stats_args *args)
653 {
654         int ret;
655         BUG_ON(!channel_op().cycle_stats);
656         ret = channel_op().cycle_stats(ctx->hwctx, args);
657         return ret;
658 }
659 #endif
660
661 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
662         struct nvhost_read_3d_reg_args *args)
663 {
664         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
665                         args->offset, &args->value);
666 }
667
668 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
669 {
670         int i;
671         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
672
673         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
674                 if (pdata->clocks[i].moduleid == moduleid)
675                         return i;
676         }
677
678         /* Old user space is sending a random number in args. Return clock
679          * zero in these cases. */
680         return 0;
681 }
682
683 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
684         struct nvhost_clk_rate_args *arg)
685 {
686         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
687                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
688         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
689                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
690         int index = moduleid ?
691                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
692
693         return nvhost_module_set_rate(ctx->ch->dev,
694                         ctx, arg->rate, index, attr);
695 }
696
697 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
698         u32 moduleid, u32 *rate)
699 {
700         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
701
702         return nvhost_module_get_rate(ctx->ch->dev,
703                         (unsigned long *)rate, index);
704 }
705
706 static int nvhost_ioctl_channel_module_regrdwr(
707         struct nvhost_channel_userctx *ctx,
708         struct nvhost_ctrl_module_regrdwr_args *args)
709 {
710         u32 num_offsets = args->num_offsets;
711         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
712         u32 __user *values = (u32 *)(uintptr_t)args->values;
713         u32 vals[64];
714         struct platform_device *ndev;
715
716         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
717                 args->num_offsets, args->write);
718
719         /* Check that there is something to read and that block size is
720          * u32 aligned */
721         if (num_offsets == 0 || args->block_size & 3)
722                 return -EINVAL;
723
724         ndev = ctx->ch->dev;
725
726         while (num_offsets--) {
727                 int err;
728                 u32 offs;
729                 int remaining = args->block_size >> 2;
730
731                 if (get_user(offs, offsets))
732                         return -EFAULT;
733
734                 offsets++;
735                 while (remaining) {
736                         int batch = min(remaining, 64);
737                         if (args->write) {
738                                 if (copy_from_user(vals, values,
739                                                 batch * sizeof(u32)))
740                                         return -EFAULT;
741
742                                 err = nvhost_write_module_regs(ndev,
743                                         offs, batch, vals);
744                                 if (err)
745                                         return err;
746                         } else {
747                                 err = nvhost_read_module_regs(ndev,
748                                                 offs, batch, vals);
749                                 if (err)
750                                         return err;
751
752                                 if (copy_to_user(values, vals,
753                                                 batch * sizeof(u32)))
754                                         return -EFAULT;
755                         }
756
757                         remaining -= batch;
758                         offs += batch * sizeof(u32);
759                         values += batch;
760                 }
761         }
762
763         return 0;
764 }
765
766 static u32 create_mask(u32 *words, int num)
767 {
768         int i;
769         u32 word = 0;
770         for (i = 0; i < num && words[i] && words[i] < BITS_PER_LONG; i++)
771                 word |= BIT(words[i]);
772
773         return word;
774 }
775
776 static long nvhost_channelctl(struct file *filp,
777         unsigned int cmd, unsigned long arg)
778 {
779         struct nvhost_channel_userctx *priv = filp->private_data;
780         struct device *dev = &priv->ch->dev->dev;
781         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
782         int err = 0;
783
784         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
785                 (_IOC_NR(cmd) == 0) ||
786                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
787                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
788                 return -EFAULT;
789
790         if (_IOC_DIR(cmd) & _IOC_WRITE) {
791                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
792                         return -EFAULT;
793         }
794
795         switch (cmd) {
796         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
797         {
798                 struct nvhost_device_data *pdata = \
799                         platform_get_drvdata(priv->ch->dev);
800                 ((struct nvhost_get_param_args *)buf)->value =
801                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
802                 break;
803         }
804         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
805         {
806                 struct nvhost_device_data *pdata = \
807                         platform_get_drvdata(priv->ch->dev);
808                 struct nvhost_get_param_arg *arg =
809                         (struct nvhost_get_param_arg *)buf;
810                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
811                                 || !pdata->syncpts[arg->param])
812                         return -EINVAL;
813                 arg->value = pdata->syncpts[arg->param];
814                 break;
815         }
816         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
817         {
818                 struct nvhost_device_data *pdata = \
819                         platform_get_drvdata(priv->ch->dev);
820                 ((struct nvhost_get_param_args *)buf)->value =
821                         create_mask(pdata->waitbases,
822                                         NVHOST_MODULE_MAX_WAITBASES);
823                 break;
824         }
825         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
826         {
827                 struct nvhost_device_data *pdata = \
828                         platform_get_drvdata(priv->ch->dev);
829                 struct nvhost_get_param_arg *arg =
830                         (struct nvhost_get_param_arg *)buf;
831                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
832                                 || !pdata->waitbases[arg->param])
833                         return -EINVAL;
834                 arg->value = pdata->waitbases[arg->param];
835                 break;
836         }
837         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
838         {
839                 struct nvhost_device_data *pdata = \
840                         platform_get_drvdata(priv->ch->dev);
841                 ((struct nvhost_get_param_args *)buf)->value =
842                         create_mask(pdata->modulemutexes,
843                                         NVHOST_MODULE_MAX_MODMUTEXES);
844                 break;
845         }
846         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
847         {
848                 struct nvhost_device_data *pdata = \
849                         platform_get_drvdata(priv->ch->dev);
850                 struct nvhost_get_param_arg *arg =
851                         (struct nvhost_get_param_arg *)buf;
852                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
853                                 || !pdata->modulemutexes[arg->param])
854                         return -EINVAL;
855                 arg->value = pdata->modulemutexes[arg->param];
856                 break;
857         }
858         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
859         {
860                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
861                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
862
863                 if (IS_ERR(new_client)) {
864                         err = PTR_ERR(new_client);
865                         break;
866                 }
867                 if (priv->memmgr)
868                         nvhost_memmgr_put_mgr(priv->memmgr);
869
870                 priv->memmgr = new_client;
871
872                 if (priv->hwctx)
873                         priv->hwctx->memmgr = new_client;
874
875                 break;
876         }
877         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
878                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
879                 break;
880         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
881                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
882                 break;
883         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
884                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
885                 break;
886         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
887                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
888                 break;
889         case NVHOST_IOCTL_CHANNEL_WAIT:
890                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
891                 break;
892         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
893                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
894                 break;
895 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
896         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
897                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
898                 break;
899 #endif
900         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
901                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
902                 break;
903         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
904         {
905                 struct nvhost_clk_rate_args *arg =
906                                 (struct nvhost_clk_rate_args *)buf;
907
908                 err = nvhost_ioctl_channel_get_rate(priv,
909                                 arg->moduleid, &arg->rate);
910                 break;
911         }
912         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
913         {
914                 struct nvhost_clk_rate_args *arg =
915                                 (struct nvhost_clk_rate_args *)buf;
916
917                 err = nvhost_ioctl_channel_set_rate(priv, arg);
918                 break;
919         }
920         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
921                 priv->timeout =
922                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
923                 dev_dbg(&priv->ch->dev->dev,
924                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
925                         __func__, priv->timeout, priv);
926                 break;
927         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
928                 ((struct nvhost_get_param_args *)buf)->value =
929                                 priv->hwctx->has_timedout;
930                 break;
931         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
932                 priv->priority =
933                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
934                 break;
935         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
936                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
937                 break;
938         case NVHOST_IOCTL_CHANNEL_SUBMIT:
939                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
940                 break;
941         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
942                 priv->timeout = (u32)
943                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
944                 priv->timeout_debug_dump = !((u32)
945                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
946                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
947                 dev_dbg(&priv->ch->dev->dev,
948                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
949                         __func__, priv->timeout, priv);
950                 break;
951         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
952                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
953                 break;
954         default:
955                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
956                 err = -ENOTTY;
957                 break;
958         }
959
960         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
961                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
962
963         return err;
964 }
965
966 static const struct file_operations nvhost_channelops = {
967         .owner = THIS_MODULE,
968         .release = nvhost_channelrelease,
969         .open = nvhost_channelopen,
970         .unlocked_ioctl = nvhost_channelctl
971 };
972
973 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
974 {
975         struct nvhost_channel_userctx *userctx;
976         struct file *f = fget(fd);
977         if (!f)
978                 return 0;
979
980         if (f->f_op != &nvhost_channelops) {
981                 fput(f);
982                 return 0;
983         }
984
985         userctx = (struct nvhost_channel_userctx *)f->private_data;
986         fput(f);
987         return userctx->hwctx;
988 }
989
990
991 static const struct file_operations nvhost_asops = {
992         .owner = THIS_MODULE,
993         .release = nvhost_as_dev_release,
994         .open = nvhost_as_dev_open,
995         .unlocked_ioctl = nvhost_as_dev_ctl,
996 };
997
998 static struct {
999         int class_id;
1000         const char *dev_name;
1001 } class_id_dev_name_map[] = {
1002         /*      { NV_HOST1X_CLASS_ID, ""}, */
1003         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1004         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1005         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1006         { NV_GRAPHICS_GPU_CLASS_ID, "gpu"},
1007         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1008         { NV_TSEC_CLASS_ID, "tsec" },
1009 };
1010
1011 static struct {
1012         int module_id;
1013         const char *dev_name;
1014 } module_id_dev_name_map[] = {
1015         { NVHOST_MODULE_VI, "vi"},
1016         { NVHOST_MODULE_ISP, "isp"},
1017         { NVHOST_MODULE_MPE, "mpe"},
1018         { NVHOST_MODULE_MSENC, "msenc"},
1019         { NVHOST_MODULE_TSEC, "tsec"},
1020         { NVHOST_MODULE_GPU, "gpu"},
1021         { NVHOST_MODULE_VIC, "vic"},
1022 };
1023
1024 static const char *get_device_name_for_dev(struct platform_device *dev)
1025 {
1026         int i;
1027         /* first choice is to use the class id if specified */
1028         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1029                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1030                 if (pdata->class == class_id_dev_name_map[i].class_id)
1031                         return class_id_dev_name_map[i].dev_name;
1032         }
1033
1034         /* second choice is module name if specified */
1035         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1036                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1037                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1038                         return module_id_dev_name_map[i].dev_name;
1039         }
1040
1041         /* last choice is to just use the given dev name */
1042         return dev->name;
1043 }
1044
1045 static struct device *nvhost_client_device_create(
1046         struct platform_device *pdev, struct cdev *cdev,
1047         const char *cdev_name, int devno,
1048         const struct file_operations *ops)
1049 {
1050         struct nvhost_master *host = nvhost_get_host(pdev);
1051         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1052         const char *use_dev_name;
1053         struct device *dev;
1054         int err;
1055
1056         nvhost_dbg_fn("");
1057
1058         BUG_ON(!host);
1059
1060         cdev_init(cdev, ops);
1061         cdev->owner = THIS_MODULE;
1062
1063         err = cdev_add(cdev, devno, 1);
1064         if (err < 0) {
1065                 dev_err(&pdev->dev,
1066                         "failed to add chan %i cdev\n", pdata->index);
1067                 return NULL;
1068         }
1069         use_dev_name = get_device_name_for_dev(pdev);
1070
1071         dev = device_create(host->nvhost_class,
1072                         NULL, devno, NULL,
1073                         (pdev->id <= 0) ?
1074                         IFACE_NAME "-%s%s" :
1075                         IFACE_NAME "-%s%s.%d",
1076                         cdev_name, use_dev_name, pdev->id);
1077
1078         if (IS_ERR(dev)) {
1079                 err = PTR_ERR(dev);
1080                 dev_err(&pdev->dev,
1081                         "failed to create %s %s device for %s\n",
1082                         use_dev_name, cdev_name, pdev->name);
1083                 return NULL;
1084         }
1085
1086         return dev;
1087 }
1088
1089 int nvhost_client_user_init(struct platform_device *dev)
1090 {
1091         int err, devno;
1092         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1093         struct nvhost_channel *ch = pdata->channel;
1094
1095         BUG_ON(!ch);
1096         // reserve 3 minor #s for <dev> and as-<dev> and ctrl-<dev>
1097
1098         err = alloc_chrdev_region(&devno, 0, 3, IFACE_NAME);
1099         if (err < 0) {
1100                 dev_err(&dev->dev, "failed to allocate devno\n");
1101                 goto fail;
1102         }
1103
1104         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1105                                 "", devno, &nvhost_channelops);
1106         if (ch->node == NULL)
1107                 goto fail;
1108         ++devno;
1109         ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1110                                 "as-", devno, &nvhost_asops);
1111         if (ch->as_node == NULL)
1112                 goto fail;
1113
1114         if (pdata->ctrl_ops) {
1115                 ++devno;
1116                 pdata->ctrl_node = nvhost_client_device_create(dev,
1117                                         &pdata->ctrl_cdev, "ctrl-",
1118                                         devno, pdata->ctrl_ops);
1119                 if (pdata->ctrl_node == NULL)
1120                         goto fail;
1121         }
1122
1123         return 0;
1124 fail:
1125         return err;
1126 }
1127
1128 int nvhost_client_device_init(struct platform_device *dev)
1129 {
1130         int err;
1131         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1132         struct nvhost_channel *ch;
1133         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1134
1135         ch = nvhost_alloc_channel(dev);
1136         if (ch == NULL)
1137                 return -ENODEV;
1138
1139         /* store the pointer to this device for channel */
1140         ch->dev = dev;
1141
1142         /* Create debugfs directory for the device */
1143         nvhost_device_debug_init(dev);
1144
1145         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1146         if (err)
1147                 goto fail;
1148
1149         err = nvhost_client_user_init(dev);
1150         if (err)
1151                 goto fail;
1152
1153         if (tickctrl_op().init_channel)
1154                 tickctrl_op().init_channel(dev);
1155
1156         err = nvhost_device_list_add(dev);
1157         if (err)
1158                 goto fail;
1159
1160         if (pdata->scaling_init)
1161                 pdata->scaling_init(dev);
1162
1163         /* reset syncpoint values for this unit */
1164         nvhost_module_busy(nvhost_master->dev);
1165         nvhost_syncpt_reset_client(dev);
1166         nvhost_module_idle(nvhost_master->dev);
1167
1168         /* Initialize dma parameters */
1169         dev->dev.dma_parms = &pdata->dma_parms;
1170         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1171
1172         dev_info(&dev->dev, "initialized\n");
1173
1174         if (pdata->slave) {
1175                 pdata->slave->dev.parent = dev->dev.parent;
1176                 platform_device_register(pdata->slave);
1177         }
1178
1179         return 0;
1180
1181 fail:
1182         /* Add clean-up */
1183         nvhost_free_channel(ch);
1184         return err;
1185 }
1186 EXPORT_SYMBOL(nvhost_client_device_init);
1187
1188 int nvhost_client_device_release(struct platform_device *dev)
1189 {
1190         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1191         struct nvhost_channel *ch;
1192         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1193
1194         ch = pdata->channel;
1195
1196         /* Release nvhost module resources */
1197         nvhost_module_deinit(dev);
1198
1199         /* Remove from nvhost device list */
1200         nvhost_device_list_remove(dev);
1201
1202         /* Release chardev and device node for user space */
1203         device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1204         cdev_del(&ch->cdev);
1205
1206         /* Free nvhost channel */
1207         nvhost_free_channel(ch);
1208
1209         return 0;
1210 }
1211 EXPORT_SYMBOL(nvhost_client_device_release);
1212
1213 int nvhost_client_device_suspend(struct device *dev)
1214 {
1215         int ret = 0;
1216         struct nvhost_device_data *pdata = dev_get_drvdata(dev);
1217
1218         ret = nvhost_module_suspend(dev);
1219         if (ret)
1220                 return ret;
1221
1222         ret = nvhost_channel_suspend(pdata->channel);
1223         if (ret)
1224                 return ret;
1225
1226         dev_info(dev, "suspend status: %d\n", ret);
1227
1228         return ret;
1229 }
1230 EXPORT_SYMBOL(nvhost_client_device_suspend);
1231
1232 int nvhost_client_device_resume(struct device *dev)
1233 {
1234         nvhost_module_resume(dev);
1235         dev_info(dev, "resuming\n");
1236         return 0;
1237 }
1238 EXPORT_SYMBOL(nvhost_client_device_resume);
1239
1240 int nvhost_client_device_get_resources(struct platform_device *dev)
1241 {
1242         int i;
1243         void __iomem *regs = NULL;
1244         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1245
1246         for (i = 0; i < dev->num_resources; i++) {
1247                 struct resource *r = NULL;
1248
1249                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1250                 /* We've run out of mem resources */
1251                 if (!r)
1252                         break;
1253
1254                 regs = devm_request_and_ioremap(&dev->dev, r);
1255                 if (!regs)
1256                         goto fail;
1257
1258                 pdata->aperture[i] = regs;
1259         }
1260
1261         return 0;
1262
1263 fail:
1264         dev_err(&dev->dev, "failed to get register memory\n");
1265
1266         return -ENXIO;
1267 }
1268 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1269
1270 /* This is a simple wrapper around request_firmware that takes
1271  * 'fw_name' and if available applies a SOC relative path prefix to it.
1272  * The caller is responsible for calling release_firmware later.
1273  */
1274 const struct firmware *
1275 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1276 {
1277         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1278         const struct firmware *fw;
1279         char *fw_path = NULL;
1280         int path_len, err;
1281
1282         /* This field is NULL when calling from SYS_EXIT.
1283            Add a check here to prevent crash in request_firmware */
1284         if (!current->fs) {
1285                 BUG();
1286                 return NULL;
1287         }
1288
1289         if (!fw_name)
1290                 return NULL;
1291
1292         if (op->soc_name) {
1293                 path_len = strlen(fw_name) + strlen(op->soc_name);
1294                 path_len += 2; /* for the path separator and zero terminator*/
1295
1296                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1297                                      GFP_KERNEL);
1298                 if (!fw_path)
1299                         return NULL;
1300
1301                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1302                 fw_name = fw_path;
1303         }
1304
1305         err = request_firmware(&fw, fw_name, &dev->dev);
1306         kfree(fw_path);
1307         if (err) {
1308                 dev_err(&dev->dev, "failed to get firmware\n");
1309                 return NULL;
1310         }
1311
1312         /* note: caller must release_firmware */
1313         return fw;
1314 }