ARM: tegra: Move platform detect from <mach/hardware.h> to <linux/tegra-soc.h>
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * drivers/video/tegra/host/bus_client.c
3  *
4  * Tegra Graphics Host Client Module
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/spinlock.h>
24 #include <linux/fs.h>
25 #include <linux/cdev.h>
26 #include <linux/uaccess.h>
27 #include <linux/file.h>
28 #include <linux/clk.h>
29 #include <linux/hrtimer.h>
30 #include <linux/export.h>
31 #include <linux/firmware.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/tegra-soc.h>
34
35 #include <trace/events/nvhost.h>
36
37 #include <linux/io.h>
38 #include <linux/string.h>
39
40 #include <linux/nvhost.h>
41 #include <linux/nvhost_ioctl.h>
42
43 #include <mach/gpufuse.h>
44
45 #include "debug.h"
46 #include "bus_client.h"
47 #include "dev.h"
48 #include "class_ids.h"
49 #include "nvhost_as.h"
50 #include "nvhost_memmgr.h"
51 #include "chip_support.h"
52 #include "nvhost_acm.h"
53
54 #include "nvhost_syncpt.h"
55 #include "nvhost_channel.h"
56 #include "nvhost_job.h"
57 #include "nvhost_hwctx.h"
58 #include "user_hwctx.h"
59
60 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
61 {
62         int err = 0;
63         struct resource *r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
64         if (!r) {
65                 dev_err(&ndev->dev, "failed to get memory resource\n");
66                 return -ENODEV;
67         }
68
69         if (offset + 4 * count > resource_size(r)
70                         || (offset + 4 * count < offset))
71                 err = -EPERM;
72
73         return err;
74 }
75
76 int nvhost_read_module_regs(struct platform_device *ndev,
77                         u32 offset, int count, u32 *values)
78 {
79         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
80         void __iomem *p = pdata->aperture[0] + offset;
81         int err;
82
83         if (!pdata->aperture[0])
84                 return -ENODEV;
85
86         /* verify offset */
87         err = validate_reg(ndev, offset, count);
88         if (err)
89                 return err;
90
91         nvhost_module_busy(ndev);
92         while (count--) {
93                 *(values++) = readl(p);
94                 p += 4;
95         }
96         rmb();
97         nvhost_module_idle(ndev);
98
99         return 0;
100 }
101
102 int nvhost_write_module_regs(struct platform_device *ndev,
103                         u32 offset, int count, const u32 *values)
104 {
105         void __iomem *p;
106         int err;
107         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
108
109         if (!pdata->aperture[0])
110                 return -ENODEV;
111
112         p = pdata->aperture[0] + offset;
113
114         /* verify offset */
115         err = validate_reg(ndev, offset, count);
116         if (err)
117                 return err;
118
119         nvhost_module_busy(ndev);
120         while (count--) {
121                 writel(*(values++), p);
122                 p += 4;
123         }
124         wmb();
125         nvhost_module_idle(ndev);
126
127         return 0;
128 }
129
130 void nvhost_client_writel(struct platform_device *pdev, u32 val, u32 reg)
131 {
132         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
133         writel(val, pdata->aperture[0] + reg * 4);
134 }
135
136 u32 nvhost_client_readl(struct platform_device *pdev, u32 reg)
137 {
138         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
139         return readl(pdata->aperture[0] + reg * 4);
140 }
141
142 struct nvhost_channel_userctx {
143         struct nvhost_channel *ch;
144         struct nvhost_hwctx *hwctx;
145         struct nvhost_job *job;
146         struct mem_mgr *memmgr;
147         u32 timeout;
148         u32 priority;
149         int clientid;
150         bool timeout_debug_dump;
151 };
152
153 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
154 {
155         struct nvhost_channel_userctx *priv = filp->private_data;
156
157         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
158
159         filp->private_data = NULL;
160
161         nvhost_module_remove_client(priv->ch->dev, priv);
162
163         if (priv->hwctx) {
164                 struct nvhost_channel *ch = priv->ch;
165                 struct nvhost_hwctx *ctx = priv->hwctx;
166
167                 mutex_lock(&ch->submitlock);
168                 if (ch->cur_ctx == ctx)
169                         ch->cur_ctx = NULL;
170                 mutex_unlock(&ch->submitlock);
171
172                 priv->hwctx->h->put(priv->hwctx);
173         }
174
175         if (priv->job)
176                 nvhost_job_put(priv->job);
177
178         nvhost_putchannel(priv->ch);
179
180         nvhost_memmgr_put_mgr(priv->memmgr);
181         kfree(priv);
182         return 0;
183 }
184
185 static int nvhost_channelopen(struct inode *inode, struct file *filp)
186 {
187         struct nvhost_channel_userctx *priv;
188         struct nvhost_channel *ch;
189         struct nvhost_device_data *pdata;
190
191         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
192         ch = nvhost_getchannel(ch);
193         if (!ch)
194                 return -ENOMEM;
195         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
196
197         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
198         if (!priv) {
199                 nvhost_putchannel(ch);
200                 return -ENOMEM;
201         }
202         filp->private_data = priv;
203         priv->ch = ch;
204         if(nvhost_module_add_client(ch->dev, priv))
205                 goto fail;
206
207         if (ch->ctxhandler && ch->ctxhandler->alloc) {
208                 nvhost_module_busy(ch->dev);
209                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
210                 nvhost_module_idle(ch->dev);
211                 if (!priv->hwctx)
212                         goto fail;
213         }
214         priv->priority = NVHOST_PRIORITY_MEDIUM;
215         priv->clientid = atomic_add_return(1,
216                         &nvhost_get_host(ch->dev)->clientid);
217         pdata = platform_get_drvdata(ch->dev);
218         priv->timeout = pdata->nvhost_timeout_default;
219         priv->timeout_debug_dump = true;
220         if (!tegra_platform_is_silicon())
221                 priv->timeout = 0;
222
223         return 0;
224 fail:
225         nvhost_channelrelease(inode, filp);
226         return -ENOMEM;
227 }
228
229 static int nvhost_ioctl_channel_alloc_obj_ctx(
230         struct nvhost_channel_userctx *ctx,
231         struct nvhost_alloc_obj_ctx_args *args)
232 {
233         int ret;
234
235         BUG_ON(!channel_op().alloc_obj);
236         nvhost_module_busy(ctx->ch->dev);
237         ret = channel_op().alloc_obj(ctx->hwctx, args);
238         nvhost_module_idle(ctx->ch->dev);
239         return ret;
240 }
241
242 static int nvhost_ioctl_channel_free_obj_ctx(
243         struct nvhost_channel_userctx *ctx,
244         struct nvhost_free_obj_ctx_args *args)
245 {
246         int ret;
247
248         BUG_ON(!channel_op().free_obj);
249         nvhost_module_busy(ctx->ch->dev);
250         ret = channel_op().free_obj(ctx->hwctx, args);
251         nvhost_module_idle(ctx->ch->dev);
252         return ret;
253 }
254
255 static int nvhost_ioctl_channel_alloc_gpfifo(
256         struct nvhost_channel_userctx *ctx,
257         struct nvhost_alloc_gpfifo_args *args)
258 {
259         int ret;
260
261         BUG_ON(!channel_op().alloc_gpfifo);
262         nvhost_module_busy(ctx->ch->dev);
263         ret = channel_op().alloc_gpfifo(ctx->hwctx, args);
264         nvhost_module_idle(ctx->ch->dev);
265         return ret;
266 }
267
268 static int nvhost_ioctl_channel_submit_gpfifo(
269         struct nvhost_channel_userctx *ctx,
270         struct nvhost_submit_gpfifo_args *args)
271 {
272         void *gpfifo;
273         u32 size;
274         int ret = 0;
275
276         if (!ctx->hwctx || ctx->hwctx->has_timedout)
277                 return -ETIMEDOUT;
278
279         size = args->num_entries * sizeof(struct nvhost_gpfifo);
280
281         gpfifo = kzalloc(size, GFP_KERNEL);
282         if (!gpfifo)
283                 return -ENOMEM;
284
285         if (copy_from_user(gpfifo,
286                            (void __user *)(uintptr_t)args->gpfifo, size)) {
287                 ret = -EINVAL;
288                 goto clean_up;
289         }
290
291         BUG_ON(!channel_op().submit_gpfifo);
292
293         nvhost_module_busy(ctx->ch->dev);
294         ret = channel_op().submit_gpfifo(ctx->hwctx, gpfifo,
295                         args->num_entries, &args->fence, args->flags);
296         nvhost_module_idle(ctx->ch->dev);
297 clean_up:
298         kfree(gpfifo);
299         return ret;
300 }
301
302 static int nvhost_ioctl_channel_wait(
303         struct nvhost_channel_userctx *ctx,
304         struct nvhost_wait_args *args)
305 {
306         int ret;
307
308         BUG_ON(!channel_op().wait);
309         nvhost_module_busy(ctx->ch->dev);
310         ret = channel_op().wait(ctx->hwctx, args);
311         nvhost_module_idle(ctx->ch->dev);
312         return ret;
313 }
314
315 static int nvhost_ioctl_channel_zcull_bind(
316         struct nvhost_channel_userctx *ctx,
317         struct nvhost_zcull_bind_args *args)
318 {
319         int ret;
320
321         BUG_ON(!channel_zcull_op().bind);
322         nvhost_module_busy(ctx->ch->dev);
323         ret = channel_zcull_op().bind(ctx->hwctx, args);
324         nvhost_module_idle(ctx->ch->dev);
325         return ret;
326 }
327
328 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
329                 struct nvhost_submit_args *args)
330 {
331         struct nvhost_job *job;
332         int num_cmdbufs = args->num_cmdbufs;
333         int num_relocs = args->num_relocs;
334         int num_waitchks = args->num_waitchks;
335         int num_syncpt_incrs = args->num_syncpt_incrs;
336         struct nvhost_cmdbuf __user *cmdbufs =
337                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
338         struct nvhost_reloc __user *relocs =
339                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
340         struct nvhost_reloc_shift __user *reloc_shifts =
341                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
342         struct nvhost_waitchk __user *waitchks =
343                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
344         struct nvhost_syncpt_incr __user *syncpt_incrs =
345                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
346         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
347         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
348
349         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
350         u32 *local_waitbases = NULL;
351         int err, i, hwctx_syncpt_idx = -1;
352
353         if (num_syncpt_incrs > host->info.nb_pts)
354                 return -EINVAL;
355
356         job = nvhost_job_alloc(ctx->ch,
357                         ctx->hwctx,
358                         num_cmdbufs,
359                         num_relocs,
360                         num_waitchks,
361                         num_syncpt_incrs,
362                         ctx->memmgr);
363         if (!job)
364                 return -ENOMEM;
365
366         job->num_relocs = args->num_relocs;
367         job->num_waitchk = args->num_waitchks;
368         job->num_syncpts = args->num_syncpt_incrs;
369         job->priority = ctx->priority;
370         job->clientid = ctx->clientid;
371
372         while (num_cmdbufs) {
373                 struct nvhost_cmdbuf cmdbuf;
374                 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
375                 if (err)
376                         goto fail;
377                 nvhost_job_add_gather(job,
378                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
379                 num_cmdbufs--;
380                 cmdbufs++;
381         }
382
383         err = copy_from_user(job->relocarray,
384                         relocs, sizeof(*relocs) * num_relocs);
385         if (err)
386                 goto fail;
387
388         err = copy_from_user(job->relocshiftarray,
389                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
390         if (err)
391                 goto fail;
392
393         err = copy_from_user(job->waitchk,
394                         waitchks, sizeof(*waitchks) * num_waitchks);
395         if (err)
396                 goto fail;
397
398         /* mass copy waitbases */
399         if (args->waitbases) {
400                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
401                         GFP_KERNEL);
402                 if (!local_waitbases) {
403                         err = -ENOMEM;
404                         goto fail;
405                 }
406
407                 err = copy_from_user(local_waitbases, waitbases,
408                         sizeof(u32) * num_syncpt_incrs);
409                 if (err) {
410                         err = -EINVAL;
411                         goto fail;
412                 }
413         }
414
415         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
416         if (!ctx->hwctx)
417                 hwctx_syncpt_idx = 0;
418
419         /*
420          * Go through each syncpoint from userspace. Here we:
421          * - Copy syncpoint information
422          * - Validate each syncpoint
423          * - Determine waitbase for each syncpoint
424          * - Determine the index of hwctx syncpoint in the table
425          */
426
427         for (i = 0; i < num_syncpt_incrs; ++i) {
428                 u32 waitbase;
429                 struct nvhost_syncpt_incr sp;
430
431                 /* Copy */
432                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
433                 if (err)
434                         goto fail;
435
436                 /* Validate */
437                 if (sp.syncpt_id > host->info.nb_pts) {
438                         err = -EINVAL;
439                         goto fail;
440                 }
441
442                 /* Determine waitbase */
443                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
444                         waitbase = local_waitbases[i];
445                 else
446                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
447                                 sp.syncpt_id);
448
449                 /* Store */
450                 job->sp[i].id = sp.syncpt_id;
451                 job->sp[i].incrs = sp.syncpt_incrs;
452                 job->sp[i].waitbase = waitbase;
453
454                 /* Find hwctx syncpoint */
455                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
456                         hwctx_syncpt_idx = i;
457         }
458
459         /* not needed anymore */
460         kfree(local_waitbases);
461         local_waitbases = NULL;
462
463         /* Is hwctx_syncpt_idx valid? */
464         if (hwctx_syncpt_idx == -1) {
465                 err = -EINVAL;
466                 goto fail;
467         }
468
469         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
470
471         trace_nvhost_channel_submit(ctx->ch->dev->name,
472                 job->num_gathers, job->num_relocs, job->num_waitchk,
473                 job->sp[job->hwctx_syncpt_idx].id,
474                 job->sp[job->hwctx_syncpt_idx].incrs);
475
476         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
477         if (err)
478                 goto fail;
479
480         if (args->timeout)
481                 job->timeout = min(ctx->timeout, args->timeout);
482         else
483                 job->timeout = ctx->timeout;
484         job->timeout_debug_dump = ctx->timeout_debug_dump;
485
486         err = nvhost_channel_submit(job);
487         if (err)
488                 goto fail_submit;
489
490         /* Deliver multiple fences back to the userspace */
491         if (fences)
492                 for (i = 0; i < num_syncpt_incrs; ++i) {
493                         u32 fence = job->sp[i].fence;
494                         err = copy_to_user(fences, &fence, sizeof(u32));
495                         if (err)
496                                 break;
497                         fences++;
498                 }
499
500         /* Deliver the fence using the old mechanism _only_ if a single
501          * syncpoint is used. */
502
503         if (num_syncpt_incrs == 1)
504                 args->fence = job->sp[job->hwctx_syncpt_idx].fence;
505         else
506                 args->fence = 0;
507
508         nvhost_job_put(job);
509
510         return 0;
511
512 fail_submit:
513         nvhost_job_unpin(job);
514 fail:
515         nvhost_job_put(job);
516         kfree(local_waitbases);
517         return err;
518 }
519
520 static int nvhost_ioctl_channel_set_ctxswitch(
521                 struct nvhost_channel_userctx *ctx,
522                 struct nvhost_set_ctxswitch_args *args)
523 {
524         struct nvhost_cmdbuf cmdbuf_save;
525         struct nvhost_cmdbuf cmdbuf_restore;
526         struct nvhost_syncpt_incr save_incr, restore_incr;
527         u32 save_waitbase, restore_waitbase;
528         struct nvhost_reloc reloc;
529         struct nvhost_hwctx_handler *ctxhandler = NULL;
530         struct nvhost_hwctx *nhwctx = NULL;
531         struct user_hwctx *hwctx;
532         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
533         int err;
534
535         /* Only channels with context support */
536         if (!ctx->hwctx)
537                 return -EFAULT;
538
539         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
540         if (args->num_cmdbufs_save != 1
541                         || args->num_cmdbufs_restore != 1
542                         || args->num_save_incrs != 1
543                         || args->num_restore_incrs != 1
544                         || args->num_relocs != 1)
545                 return -EINVAL;
546
547         err = copy_from_user(&cmdbuf_save,
548                         (void *)(uintptr_t)args->cmdbuf_save,
549                         sizeof(cmdbuf_save));
550         if (err)
551                 goto fail;
552
553         err = copy_from_user(&cmdbuf_restore,
554                         (void *)(uintptr_t)args->cmdbuf_restore,
555                         sizeof(cmdbuf_restore));
556         if (err)
557                 goto fail;
558
559         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
560                         sizeof(reloc));
561         if (err)
562                 goto fail;
563
564         err = copy_from_user(&save_incr,
565                         (void *)(uintptr_t)args->save_incrs,
566                         sizeof(save_incr));
567         if (err)
568                 goto fail;
569         err = copy_from_user(&save_waitbase,
570                         (void *)(uintptr_t)args->save_waitbases,
571                         sizeof(save_waitbase));
572
573         err = copy_from_user(&restore_incr,
574                         (void *)(uintptr_t)args->restore_incrs,
575                         sizeof(restore_incr));
576         if (err)
577                 goto fail;
578         err = copy_from_user(&restore_waitbase,
579                         (void *)(uintptr_t)args->restore_waitbases,
580                         sizeof(restore_waitbase));
581
582         if (save_incr.syncpt_id != pdata->syncpts[0]
583                         || restore_incr.syncpt_id != pdata->syncpts[0]
584                         || save_waitbase != pdata->waitbases[0]
585                         || restore_waitbase != pdata->waitbases[0]) {
586                 err = -EINVAL;
587                 goto fail;
588         }
589         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
590                         save_waitbase, ctx->ch);
591         if (!ctxhandler) {
592                 err = -ENOMEM;
593                 goto fail;
594         }
595
596         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
597         if (!nhwctx) {
598                 err = -ENOMEM;
599                 goto fail_hwctx;
600         }
601         hwctx = to_user_hwctx(nhwctx);
602
603         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
604                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
605                         cmdbuf_restore.mem, cmdbuf_restore.offset,
606                         cmdbuf_restore.words,
607                         pdata->syncpts[0], pdata->waitbases[0],
608                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
609
610         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
611         if (!nhwctx->memmgr)
612                 goto fail_set_restore;
613
614         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
615                         cmdbuf_restore.offset, cmdbuf_restore.words);
616         if (err)
617                 goto fail_set_restore;
618
619         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
620                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
621         if (err)
622                 goto fail_set_save;
623
624         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
625         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
626
627         /* Free old context */
628         ctx->hwctx->h->put(ctx->hwctx);
629         ctx->hwctx = nhwctx;
630
631         return 0;
632
633 fail_set_save:
634 fail_set_restore:
635         ctxhandler->put(&hwctx->hwctx);
636 fail_hwctx:
637         user_ctxhandler_free(ctxhandler);
638 fail:
639         return err;
640 }
641
642 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
643 static int nvhost_ioctl_channel_cycle_stats(
644         struct nvhost_channel_userctx *ctx,
645         struct nvhost_cycle_stats_args *args)
646 {
647         int ret;
648         BUG_ON(!channel_op().cycle_stats);
649         ret = channel_op().cycle_stats(ctx->hwctx, args);
650         return ret;
651 }
652 #endif
653
654 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
655         struct nvhost_read_3d_reg_args *args)
656 {
657         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
658                         args->offset, &args->value);
659 }
660
661 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
662 {
663         int i;
664         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
665
666         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
667                 if (pdata->clocks[i].moduleid == moduleid)
668                         return i;
669         }
670
671         /* Old user space is sending a random number in args. Return clock
672          * zero in these cases. */
673         return 0;
674 }
675
676 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
677         struct nvhost_clk_rate_args *arg)
678 {
679         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
680                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
681         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
682                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
683         int index = moduleid ?
684                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
685
686         return nvhost_module_set_rate(ctx->ch->dev,
687                         ctx, arg->rate, index, attr);
688 }
689
690 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
691         u32 moduleid, u32 *rate)
692 {
693         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
694
695         return nvhost_module_get_rate(ctx->ch->dev,
696                         (unsigned long *)rate, index);
697 }
698
699 static int nvhost_ioctl_channel_module_regrdwr(
700         struct nvhost_channel_userctx *ctx,
701         struct nvhost_ctrl_module_regrdwr_args *args)
702 {
703         u32 num_offsets = args->num_offsets;
704         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
705         u32 __user *values = (u32 *)(uintptr_t)args->values;
706         u32 vals[64];
707         struct platform_device *ndev;
708
709         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
710                 args->num_offsets, args->write);
711
712         /* Check that there is something to read and that block size is
713          * u32 aligned */
714         if (num_offsets == 0 || args->block_size & 3)
715                 return -EINVAL;
716
717         ndev = ctx->ch->dev;
718
719         while (num_offsets--) {
720                 int err;
721                 u32 offs;
722                 int remaining = args->block_size >> 2;
723
724                 if (get_user(offs, offsets))
725                         return -EFAULT;
726
727                 offsets++;
728                 while (remaining) {
729                         int batch = min(remaining, 64);
730                         if (args->write) {
731                                 if (copy_from_user(vals, values,
732                                                 batch * sizeof(u32)))
733                                         return -EFAULT;
734
735                                 err = nvhost_write_module_regs(ndev,
736                                         offs, batch, vals);
737                                 if (err)
738                                         return err;
739                         } else {
740                                 err = nvhost_read_module_regs(ndev,
741                                                 offs, batch, vals);
742                                 if (err)
743                                         return err;
744
745                                 if (copy_to_user(values, vals,
746                                                 batch * sizeof(u32)))
747                                         return -EFAULT;
748                         }
749
750                         remaining -= batch;
751                         offs += batch * sizeof(u32);
752                         values += batch;
753                 }
754         }
755
756         return 0;
757 }
758
759 static u32 create_mask(u32 *words, int num)
760 {
761         int i;
762         u32 word = 0;
763         for (i = 0; i < num && words[i] && words[i] < BITS_PER_LONG; i++)
764                 word |= BIT(words[i]);
765
766         return word;
767 }
768
769 static long nvhost_channelctl(struct file *filp,
770         unsigned int cmd, unsigned long arg)
771 {
772         struct nvhost_channel_userctx *priv = filp->private_data;
773         struct device *dev = &priv->ch->dev->dev;
774         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
775         int err = 0;
776
777         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
778                 (_IOC_NR(cmd) == 0) ||
779                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
780                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
781                 return -EFAULT;
782
783         if (_IOC_DIR(cmd) & _IOC_WRITE) {
784                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
785                         return -EFAULT;
786         }
787
788         switch (cmd) {
789         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
790         {
791                 struct nvhost_device_data *pdata = \
792                         platform_get_drvdata(priv->ch->dev);
793                 ((struct nvhost_get_param_args *)buf)->value =
794                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
795                 break;
796         }
797         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
798         {
799                 struct nvhost_device_data *pdata = \
800                         platform_get_drvdata(priv->ch->dev);
801                 struct nvhost_get_param_arg *arg =
802                         (struct nvhost_get_param_arg *)buf;
803                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
804                                 || !pdata->syncpts[arg->param])
805                         return -EINVAL;
806                 arg->value = pdata->syncpts[arg->param];
807                 break;
808         }
809         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
810         {
811                 struct nvhost_device_data *pdata = \
812                         platform_get_drvdata(priv->ch->dev);
813                 ((struct nvhost_get_param_args *)buf)->value =
814                         create_mask(pdata->waitbases,
815                                         NVHOST_MODULE_MAX_WAITBASES);
816                 break;
817         }
818         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
819         {
820                 struct nvhost_device_data *pdata = \
821                         platform_get_drvdata(priv->ch->dev);
822                 struct nvhost_get_param_arg *arg =
823                         (struct nvhost_get_param_arg *)buf;
824                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
825                                 || !pdata->waitbases[arg->param])
826                         return -EINVAL;
827                 arg->value = pdata->waitbases[arg->param];
828                 break;
829         }
830         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
831         {
832                 struct nvhost_device_data *pdata = \
833                         platform_get_drvdata(priv->ch->dev);
834                 ((struct nvhost_get_param_args *)buf)->value =
835                         create_mask(pdata->modulemutexes,
836                                         NVHOST_MODULE_MAX_MODMUTEXES);
837                 break;
838         }
839         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
840         {
841                 struct nvhost_device_data *pdata = \
842                         platform_get_drvdata(priv->ch->dev);
843                 struct nvhost_get_param_arg *arg =
844                         (struct nvhost_get_param_arg *)buf;
845                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
846                                 || !pdata->modulemutexes[arg->param])
847                         return -EINVAL;
848                 arg->value = pdata->modulemutexes[arg->param];
849                 break;
850         }
851         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
852         {
853                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
854                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
855
856                 if (IS_ERR(new_client)) {
857                         err = PTR_ERR(new_client);
858                         break;
859                 }
860                 if (priv->memmgr)
861                         nvhost_memmgr_put_mgr(priv->memmgr);
862
863                 priv->memmgr = new_client;
864
865                 if (priv->hwctx)
866                         priv->hwctx->memmgr = new_client;
867
868                 break;
869         }
870         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
871                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
872                 break;
873         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
874                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
875                 break;
876         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
877                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
878                 break;
879         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
880                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
881                 break;
882         case NVHOST_IOCTL_CHANNEL_WAIT:
883                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
884                 break;
885         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
886                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
887                 break;
888 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
889         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
890                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
891                 break;
892 #endif
893         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
894                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
895                 break;
896         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
897         {
898                 struct nvhost_clk_rate_args *arg =
899                                 (struct nvhost_clk_rate_args *)buf;
900
901                 err = nvhost_ioctl_channel_get_rate(priv,
902                                 arg->moduleid, &arg->rate);
903                 break;
904         }
905         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
906         {
907                 struct nvhost_clk_rate_args *arg =
908                                 (struct nvhost_clk_rate_args *)buf;
909
910                 err = nvhost_ioctl_channel_set_rate(priv, arg);
911                 break;
912         }
913         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
914                 priv->timeout =
915                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
916                 dev_dbg(&priv->ch->dev->dev,
917                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
918                         __func__, priv->timeout, priv);
919                 break;
920         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
921                 ((struct nvhost_get_param_args *)buf)->value =
922                                 priv->hwctx->has_timedout;
923                 break;
924         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
925                 priv->priority =
926                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
927                 break;
928         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
929                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
930                 break;
931         case NVHOST_IOCTL_CHANNEL_SUBMIT:
932                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
933                 break;
934         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
935                 priv->timeout = (u32)
936                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
937                 priv->timeout_debug_dump = !((u32)
938                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
939                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
940                 dev_dbg(&priv->ch->dev->dev,
941                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
942                         __func__, priv->timeout, priv);
943                 break;
944         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
945                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
946                 break;
947         default:
948                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
949                 err = -ENOTTY;
950                 break;
951         }
952
953         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
954                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
955
956         return err;
957 }
958
959 static const struct file_operations nvhost_channelops = {
960         .owner = THIS_MODULE,
961         .release = nvhost_channelrelease,
962         .open = nvhost_channelopen,
963         .unlocked_ioctl = nvhost_channelctl
964 };
965
966 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
967 {
968         struct nvhost_channel_userctx *userctx;
969         struct file *f = fget(fd);
970         if (!f)
971                 return 0;
972
973         if (f->f_op != &nvhost_channelops) {
974                 fput(f);
975                 return 0;
976         }
977
978         userctx = (struct nvhost_channel_userctx *)f->private_data;
979         fput(f);
980         return userctx->hwctx;
981 }
982
983
984 static const struct file_operations nvhost_asops = {
985         .owner = THIS_MODULE,
986         .release = nvhost_as_dev_release,
987         .open = nvhost_as_dev_open,
988         .unlocked_ioctl = nvhost_as_dev_ctl,
989 };
990
991 static struct {
992         int class_id;
993         const char *dev_name;
994 } class_id_dev_name_map[] = {
995         /*      { NV_HOST1X_CLASS_ID, ""}, */
996         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
997         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
998         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
999         { NV_GRAPHICS_GPU_CLASS_ID, "gpu"},
1000         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1001         { NV_TSEC_CLASS_ID, "tsec" },
1002 };
1003
1004 static struct {
1005         int module_id;
1006         const char *dev_name;
1007 } module_id_dev_name_map[] = {
1008         { NVHOST_MODULE_VI, "vi"},
1009         { NVHOST_MODULE_ISP, "isp"},
1010         { NVHOST_MODULE_MPE, "mpe"},
1011         { NVHOST_MODULE_MSENC, "msenc"},
1012         { NVHOST_MODULE_TSEC, "tsec"},
1013         { NVHOST_MODULE_GPU, "gpu"},
1014         { NVHOST_MODULE_VIC, "vic"},
1015 };
1016
1017 static const char *get_device_name_for_dev(struct platform_device *dev)
1018 {
1019         int i;
1020         /* first choice is to use the class id if specified */
1021         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1022                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1023                 if (pdata->class == class_id_dev_name_map[i].class_id)
1024                         return class_id_dev_name_map[i].dev_name;
1025         }
1026
1027         /* second choice is module name if specified */
1028         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1029                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1030                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1031                         return module_id_dev_name_map[i].dev_name;
1032         }
1033
1034         /* last choice is to just use the given dev name */
1035         return dev->name;
1036 }
1037
1038 static struct device *nvhost_client_device_create(
1039         struct platform_device *pdev, struct cdev *cdev,
1040         const char *cdev_name, int devno,
1041         const struct file_operations *ops)
1042 {
1043         struct nvhost_master *host = nvhost_get_host(pdev);
1044         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1045         const char *use_dev_name;
1046         struct device *dev;
1047         int err;
1048
1049         nvhost_dbg_fn("");
1050
1051         BUG_ON(!host);
1052
1053         cdev_init(cdev, ops);
1054         cdev->owner = THIS_MODULE;
1055
1056         err = cdev_add(cdev, devno, 1);
1057         if (err < 0) {
1058                 dev_err(&pdev->dev,
1059                         "failed to add chan %i cdev\n", pdata->index);
1060                 return NULL;
1061         }
1062         use_dev_name = get_device_name_for_dev(pdev);
1063
1064         dev = device_create(host->nvhost_class,
1065                         NULL, devno, NULL,
1066                         (pdev->id <= 0) ?
1067                         IFACE_NAME "-%s%s" :
1068                         IFACE_NAME "-%s%s.%d",
1069                         cdev_name, use_dev_name, pdev->id);
1070
1071         if (IS_ERR(dev)) {
1072                 err = PTR_ERR(dev);
1073                 dev_err(&pdev->dev,
1074                         "failed to create %s %s device for %s\n",
1075                         use_dev_name, cdev_name, pdev->name);
1076                 return NULL;
1077         }
1078
1079         return dev;
1080 }
1081
1082 int nvhost_client_user_init(struct platform_device *dev)
1083 {
1084         int err, devno;
1085         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1086         struct nvhost_channel *ch = pdata->channel;
1087
1088         BUG_ON(!ch);
1089         // reserve 3 minor #s for <dev> and as-<dev> and ctrl-<dev>
1090
1091         err = alloc_chrdev_region(&devno, 0, 3, IFACE_NAME);
1092         if (err < 0) {
1093                 dev_err(&dev->dev, "failed to allocate devno\n");
1094                 goto fail;
1095         }
1096
1097         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1098                                 "", devno, &nvhost_channelops);
1099         if (ch->node == NULL)
1100                 goto fail;
1101         ++devno;
1102         ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1103                                 "as-", devno, &nvhost_asops);
1104         if (ch->as_node == NULL)
1105                 goto fail;
1106
1107         if (pdata->ctrl_ops) {
1108                 ++devno;
1109                 pdata->ctrl_node = nvhost_client_device_create(dev,
1110                                         &pdata->ctrl_cdev, "ctrl-",
1111                                         devno, pdata->ctrl_ops);
1112                 if (pdata->ctrl_node == NULL)
1113                         goto fail;
1114         }
1115
1116         return 0;
1117 fail:
1118         return err;
1119 }
1120
1121 int nvhost_client_device_init(struct platform_device *dev)
1122 {
1123         int err;
1124         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1125         struct nvhost_channel *ch;
1126         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1127
1128         ch = nvhost_alloc_channel(dev);
1129         if (ch == NULL)
1130                 return -ENODEV;
1131
1132         /* store the pointer to this device for channel */
1133         ch->dev = dev;
1134
1135         /* Create debugfs directory for the device */
1136         nvhost_device_debug_init(dev);
1137
1138         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1139         if (err)
1140                 goto fail;
1141
1142         err = nvhost_client_user_init(dev);
1143         if (err)
1144                 goto fail;
1145
1146         if (tickctrl_op().init_channel)
1147                 tickctrl_op().init_channel(dev);
1148
1149         err = nvhost_device_list_add(dev);
1150         if (err)
1151                 goto fail;
1152
1153         if (pdata->scaling_init)
1154                 pdata->scaling_init(dev);
1155
1156         /* reset syncpoint values for this unit */
1157         nvhost_module_busy(nvhost_master->dev);
1158         nvhost_syncpt_reset_client(dev);
1159         nvhost_module_idle(nvhost_master->dev);
1160
1161         /* Initialize dma parameters */
1162         dev->dev.dma_parms = &pdata->dma_parms;
1163         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1164
1165         dev_info(&dev->dev, "initialized\n");
1166
1167         if (pdata->slave) {
1168                 pdata->slave->dev.parent = dev->dev.parent;
1169                 platform_device_register(pdata->slave);
1170         }
1171
1172         return 0;
1173
1174 fail:
1175         /* Add clean-up */
1176         nvhost_free_channel(ch);
1177         return err;
1178 }
1179 EXPORT_SYMBOL(nvhost_client_device_init);
1180
1181 int nvhost_client_device_release(struct platform_device *dev)
1182 {
1183         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1184         struct nvhost_channel *ch;
1185         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1186
1187         ch = pdata->channel;
1188
1189         /* Release nvhost module resources */
1190         nvhost_module_deinit(dev);
1191
1192         /* Remove from nvhost device list */
1193         nvhost_device_list_remove(dev);
1194
1195         /* Release chardev and device node for user space */
1196         device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1197         cdev_del(&ch->cdev);
1198
1199         /* Free nvhost channel */
1200         nvhost_free_channel(ch);
1201
1202         return 0;
1203 }
1204 EXPORT_SYMBOL(nvhost_client_device_release);
1205
1206 int nvhost_client_device_suspend(struct device *dev)
1207 {
1208         int ret = 0;
1209         struct nvhost_device_data *pdata = dev_get_drvdata(dev);
1210
1211         ret = nvhost_module_suspend(dev);
1212         if (ret)
1213                 return ret;
1214
1215         ret = nvhost_channel_suspend(pdata->channel);
1216         if (ret)
1217                 return ret;
1218
1219         dev_info(dev, "suspend status: %d\n", ret);
1220
1221         return ret;
1222 }
1223 EXPORT_SYMBOL(nvhost_client_device_suspend);
1224
1225 int nvhost_client_device_resume(struct device *dev)
1226 {
1227         nvhost_module_resume(dev);
1228         dev_info(dev, "resuming\n");
1229         return 0;
1230 }
1231 EXPORT_SYMBOL(nvhost_client_device_resume);
1232
1233 int nvhost_client_device_get_resources(struct platform_device *dev)
1234 {
1235         int i;
1236         void __iomem *regs = NULL;
1237         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1238
1239         for (i = 0; i < dev->num_resources; i++) {
1240                 struct resource *r = NULL;
1241
1242                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1243                 /* We've run out of mem resources */
1244                 if (!r)
1245                         break;
1246
1247                 regs = devm_request_and_ioremap(&dev->dev, r);
1248                 if (!regs)
1249                         goto fail;
1250
1251                 pdata->aperture[i] = regs;
1252         }
1253
1254         return 0;
1255
1256 fail:
1257         dev_err(&dev->dev, "failed to get register memory\n");
1258
1259         return -ENXIO;
1260 }
1261 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1262
1263 /* This is a simple wrapper around request_firmware that takes
1264  * 'fw_name' and if available applies a SOC relative path prefix to it.
1265  * The caller is responsible for calling release_firmware later.
1266  */
1267 const struct firmware *
1268 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1269 {
1270         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1271         const struct firmware *fw;
1272         char *fw_path = NULL;
1273         int path_len, err;
1274
1275         /* This field is NULL when calling from SYS_EXIT.
1276            Add a check here to prevent crash in request_firmware */
1277         if (!current->fs) {
1278                 BUG();
1279                 return NULL;
1280         }
1281
1282         if (!fw_name)
1283                 return NULL;
1284
1285         if (op->soc_name) {
1286                 path_len = strlen(fw_name) + strlen(op->soc_name);
1287                 path_len += 2; /* for the path separator and zero terminator*/
1288
1289                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1290                                      GFP_KERNEL);
1291                 if (!fw_path)
1292                         return NULL;
1293
1294                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1295                 fw_name = fw_path;
1296         }
1297
1298         err = request_firmware(&fw, fw_name, &dev->dev);
1299         kfree(fw_path);
1300         if (err) {
1301                 dev_err(&dev->dev, "failed to get firmware\n");
1302                 return NULL;
1303         }
1304
1305         /* note: caller must release_firmware */
1306         return fw;
1307 }