video: tegra: host: Fix race in debug spew
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * drivers/video/tegra/host/bus_client.c
3  *
4  * Tegra Graphics Host Client Module
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/spinlock.h>
24 #include <linux/fs.h>
25 #include <linux/cdev.h>
26 #include <linux/uaccess.h>
27 #include <linux/file.h>
28 #include <linux/clk.h>
29 #include <linux/hrtimer.h>
30 #include <linux/export.h>
31 #include <linux/firmware.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/tegra-soc.h>
34
35 #include <trace/events/nvhost.h>
36
37 #include <linux/io.h>
38 #include <linux/string.h>
39
40 #include <linux/nvhost.h>
41 #include <linux/nvhost_ioctl.h>
42
43 #include <mach/gpufuse.h>
44
45 #include "debug.h"
46 #include "bus_client.h"
47 #include "dev.h"
48 #include "class_ids.h"
49 #include "nvhost_as.h"
50 #include "nvhost_memmgr.h"
51 #include "chip_support.h"
52 #include "nvhost_acm.h"
53
54 #include "nvhost_syncpt.h"
55 #include "nvhost_channel.h"
56 #include "nvhost_job.h"
57 #include "nvhost_hwctx.h"
58 #include "user_hwctx.h"
59
60 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
61 {
62         int err = 0;
63         struct resource *r;
64         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
65
66         r = platform_get_resource(pdata->master ? pdata->master : ndev,
67                         IORESOURCE_MEM, 0);
68         if (!r) {
69                 dev_err(&ndev->dev, "failed to get memory resource\n");
70                 return -ENODEV;
71         }
72
73         if (offset + 4 * count > resource_size(r)
74                         || (offset + 4 * count < offset))
75                 err = -EPERM;
76
77         return err;
78 }
79
80 static __iomem void *get_aperture(struct platform_device *pdev)
81 {
82         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
83
84         if (pdata->master)
85                 pdata = platform_get_drvdata(pdata->master);
86
87         return pdata->aperture[0];
88 }
89
90 int nvhost_read_module_regs(struct platform_device *ndev,
91                         u32 offset, int count, u32 *values)
92 {
93         void __iomem *p = get_aperture(ndev);
94         int err;
95
96         if (!p)
97                 return -ENODEV;
98
99         /* verify offset */
100         err = validate_reg(ndev, offset, count);
101         if (err)
102                 return err;
103
104         nvhost_module_busy(ndev);
105         p += offset;
106         while (count--) {
107                 *(values++) = readl(p);
108                 p += 4;
109         }
110         rmb();
111         nvhost_module_idle(ndev);
112
113         return 0;
114 }
115
116 int nvhost_write_module_regs(struct platform_device *ndev,
117                         u32 offset, int count, const u32 *values)
118 {
119         int err;
120         void __iomem *p = get_aperture(ndev);
121
122         if (!p)
123                 return -ENODEV;
124
125         /* verify offset */
126         err = validate_reg(ndev, offset, count);
127         if (err)
128                 return err;
129
130         nvhost_module_busy(ndev);
131         p += offset;
132         while (count--) {
133                 writel(*(values++), p);
134                 p += 4;
135         }
136         wmb();
137         nvhost_module_idle(ndev);
138
139         return 0;
140 }
141
142 bool nvhost_client_can_writel(struct platform_device *pdev)
143 {
144         return !!get_aperture(pdev);
145 }
146 EXPORT_SYMBOL(nvhost_client_can_writel);
147
148 void nvhost_client_writel(struct platform_device *pdev, u32 val, u32 reg)
149 {
150         writel(val, get_aperture(pdev) + reg * 4);
151 }
152
153 u32 nvhost_client_readl(struct platform_device *pdev, u32 reg)
154 {
155         return readl(get_aperture(pdev) + reg * 4);
156 }
157
158 struct nvhost_channel_userctx {
159         struct nvhost_channel *ch;
160         struct nvhost_hwctx *hwctx;
161         struct nvhost_job *job;
162         struct mem_mgr *memmgr;
163         u32 timeout;
164         u32 priority;
165         int clientid;
166         bool timeout_debug_dump;
167 };
168
169 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
170 {
171         struct nvhost_channel_userctx *priv = filp->private_data;
172
173         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
174
175         filp->private_data = NULL;
176
177         nvhost_module_remove_client(priv->ch->dev, priv);
178
179         if (priv->hwctx) {
180                 struct nvhost_channel *ch = priv->ch;
181                 struct nvhost_hwctx *ctx = priv->hwctx;
182
183                 mutex_lock(&ch->submitlock);
184                 if (ch->cur_ctx == ctx)
185                         ch->cur_ctx = NULL;
186                 mutex_unlock(&ch->submitlock);
187
188                 priv->hwctx->h->put(priv->hwctx);
189         }
190
191         if (priv->job)
192                 nvhost_job_put(priv->job);
193
194         nvhost_putchannel(priv->ch);
195
196         nvhost_memmgr_put_mgr(priv->memmgr);
197         kfree(priv);
198         return 0;
199 }
200
201 static int nvhost_channelopen(struct inode *inode, struct file *filp)
202 {
203         struct nvhost_channel_userctx *priv;
204         struct nvhost_channel *ch;
205         struct nvhost_device_data *pdata;
206
207         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
208         ch = nvhost_getchannel(ch, false);
209         if (!ch)
210                 return -ENOMEM;
211         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
212
213         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
214         if (!priv) {
215                 nvhost_putchannel(ch);
216                 return -ENOMEM;
217         }
218         filp->private_data = priv;
219         priv->ch = ch;
220         if(nvhost_module_add_client(ch->dev, priv))
221                 goto fail;
222
223         if (ch->ctxhandler && ch->ctxhandler->alloc) {
224                 nvhost_module_busy(ch->dev);
225                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
226                 nvhost_module_idle(ch->dev);
227                 if (!priv->hwctx)
228                         goto fail;
229         }
230         priv->priority = NVHOST_PRIORITY_MEDIUM;
231         priv->clientid = atomic_add_return(1,
232                         &nvhost_get_host(ch->dev)->clientid);
233         pdata = platform_get_drvdata(ch->dev);
234         priv->timeout = pdata->nvhost_timeout_default;
235         priv->timeout_debug_dump = true;
236         if (!tegra_platform_is_silicon())
237                 priv->timeout = 0;
238
239         return 0;
240 fail:
241         nvhost_channelrelease(inode, filp);
242         return -ENOMEM;
243 }
244
245 static int nvhost_ioctl_channel_alloc_obj_ctx(
246         struct nvhost_channel_userctx *ctx,
247         struct nvhost_alloc_obj_ctx_args *args)
248 {
249         int ret;
250
251         BUG_ON(!channel_op().alloc_obj);
252         nvhost_module_busy(ctx->ch->dev);
253         ret = channel_op().alloc_obj(ctx->hwctx, args);
254         nvhost_module_idle(ctx->ch->dev);
255         return ret;
256 }
257
258 static int nvhost_ioctl_channel_free_obj_ctx(
259         struct nvhost_channel_userctx *ctx,
260         struct nvhost_free_obj_ctx_args *args)
261 {
262         int ret;
263
264         BUG_ON(!channel_op().free_obj);
265         nvhost_module_busy(ctx->ch->dev);
266         ret = channel_op().free_obj(ctx->hwctx, args);
267         nvhost_module_idle(ctx->ch->dev);
268         return ret;
269 }
270
271 static int nvhost_ioctl_channel_alloc_gpfifo(
272         struct nvhost_channel_userctx *ctx,
273         struct nvhost_alloc_gpfifo_args *args)
274 {
275         int ret;
276
277         BUG_ON(!channel_op().alloc_gpfifo);
278         nvhost_module_busy(ctx->ch->dev);
279         ret = channel_op().alloc_gpfifo(ctx->hwctx, args);
280         nvhost_module_idle(ctx->ch->dev);
281         return ret;
282 }
283
284 static int nvhost_ioctl_channel_submit_gpfifo(
285         struct nvhost_channel_userctx *ctx,
286         struct nvhost_submit_gpfifo_args *args)
287 {
288         void *gpfifo;
289         u32 size;
290         int ret = 0;
291
292         if (!ctx->hwctx || ctx->hwctx->has_timedout)
293                 return -ETIMEDOUT;
294
295         size = args->num_entries * sizeof(struct nvhost_gpfifo);
296
297         gpfifo = kzalloc(size, GFP_KERNEL);
298         if (!gpfifo)
299                 return -ENOMEM;
300
301         if (copy_from_user(gpfifo,
302                            (void __user *)(uintptr_t)args->gpfifo, size)) {
303                 ret = -EINVAL;
304                 goto clean_up;
305         }
306
307         BUG_ON(!channel_op().submit_gpfifo);
308
309         nvhost_module_busy(ctx->ch->dev);
310         ret = channel_op().submit_gpfifo(ctx->hwctx, gpfifo,
311                         args->num_entries, &args->fence, args->flags);
312         nvhost_module_idle(ctx->ch->dev);
313 clean_up:
314         kfree(gpfifo);
315         return ret;
316 }
317
318 static int nvhost_ioctl_channel_wait(
319         struct nvhost_channel_userctx *ctx,
320         struct nvhost_wait_args *args)
321 {
322         int ret;
323
324         BUG_ON(!channel_op().wait);
325         nvhost_module_busy(ctx->ch->dev);
326         ret = channel_op().wait(ctx->hwctx, args);
327         nvhost_module_idle(ctx->ch->dev);
328         return ret;
329 }
330
331 static int nvhost_ioctl_channel_zcull_bind(
332         struct nvhost_channel_userctx *ctx,
333         struct nvhost_zcull_bind_args *args)
334 {
335         int ret;
336
337         BUG_ON(!channel_zcull_op().bind);
338         nvhost_module_busy(ctx->ch->dev);
339         ret = channel_zcull_op().bind(ctx->hwctx, args);
340         nvhost_module_idle(ctx->ch->dev);
341         return ret;
342 }
343
344 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
345                 struct nvhost_submit_args *args)
346 {
347         struct nvhost_job *job;
348         int num_cmdbufs = args->num_cmdbufs;
349         int num_relocs = args->num_relocs;
350         int num_waitchks = args->num_waitchks;
351         int num_syncpt_incrs = args->num_syncpt_incrs;
352         struct nvhost_cmdbuf __user *cmdbufs =
353                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
354         struct nvhost_reloc __user *relocs =
355                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
356         struct nvhost_reloc_shift __user *reloc_shifts =
357                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
358         struct nvhost_waitchk __user *waitchks =
359                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
360         struct nvhost_syncpt_incr __user *syncpt_incrs =
361                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
362         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
363         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
364
365         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
366         u32 *local_waitbases = NULL;
367         int err, i, hwctx_syncpt_idx = -1;
368
369         if (num_syncpt_incrs > host->info.nb_pts)
370                 return -EINVAL;
371
372         job = nvhost_job_alloc(ctx->ch,
373                         ctx->hwctx,
374                         num_cmdbufs,
375                         num_relocs,
376                         num_waitchks,
377                         num_syncpt_incrs,
378                         ctx->memmgr);
379         if (!job)
380                 return -ENOMEM;
381
382         job->num_relocs = args->num_relocs;
383         job->num_waitchk = args->num_waitchks;
384         job->num_syncpts = args->num_syncpt_incrs;
385         job->priority = ctx->priority;
386         job->clientid = ctx->clientid;
387
388         while (num_cmdbufs) {
389                 struct nvhost_cmdbuf cmdbuf;
390                 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
391                 if (err)
392                         goto fail;
393                 nvhost_job_add_gather(job,
394                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
395                 num_cmdbufs--;
396                 cmdbufs++;
397         }
398
399         err = copy_from_user(job->relocarray,
400                         relocs, sizeof(*relocs) * num_relocs);
401         if (err)
402                 goto fail;
403
404         err = copy_from_user(job->relocshiftarray,
405                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
406         if (err)
407                 goto fail;
408
409         err = copy_from_user(job->waitchk,
410                         waitchks, sizeof(*waitchks) * num_waitchks);
411         if (err)
412                 goto fail;
413
414         /* mass copy waitbases */
415         if (args->waitbases) {
416                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
417                         GFP_KERNEL);
418                 if (!local_waitbases) {
419                         err = -ENOMEM;
420                         goto fail;
421                 }
422
423                 err = copy_from_user(local_waitbases, waitbases,
424                         sizeof(u32) * num_syncpt_incrs);
425                 if (err) {
426                         err = -EINVAL;
427                         goto fail;
428                 }
429         }
430
431         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
432         if (!ctx->hwctx)
433                 hwctx_syncpt_idx = 0;
434
435         /*
436          * Go through each syncpoint from userspace. Here we:
437          * - Copy syncpoint information
438          * - Validate each syncpoint
439          * - Determine waitbase for each syncpoint
440          * - Determine the index of hwctx syncpoint in the table
441          */
442
443         for (i = 0; i < num_syncpt_incrs; ++i) {
444                 u32 waitbase;
445                 struct nvhost_syncpt_incr sp;
446
447                 /* Copy */
448                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
449                 if (err)
450                         goto fail;
451
452                 /* Validate */
453                 if (sp.syncpt_id > host->info.nb_pts) {
454                         err = -EINVAL;
455                         goto fail;
456                 }
457
458                 /* Determine waitbase */
459                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
460                         waitbase = local_waitbases[i];
461                 else
462                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
463                                 sp.syncpt_id);
464
465                 /* Store */
466                 job->sp[i].id = sp.syncpt_id;
467                 job->sp[i].incrs = sp.syncpt_incrs;
468                 job->sp[i].waitbase = waitbase;
469
470                 /* Find hwctx syncpoint */
471                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
472                         hwctx_syncpt_idx = i;
473         }
474
475         /* not needed anymore */
476         kfree(local_waitbases);
477         local_waitbases = NULL;
478
479         /* Is hwctx_syncpt_idx valid? */
480         if (hwctx_syncpt_idx == -1) {
481                 err = -EINVAL;
482                 goto fail;
483         }
484
485         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
486
487         trace_nvhost_channel_submit(ctx->ch->dev->name,
488                 job->num_gathers, job->num_relocs, job->num_waitchk,
489                 job->sp[job->hwctx_syncpt_idx].id,
490                 job->sp[job->hwctx_syncpt_idx].incrs);
491
492         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
493         if (err)
494                 goto fail;
495
496         if (args->timeout)
497                 job->timeout = min(ctx->timeout, args->timeout);
498         else
499                 job->timeout = ctx->timeout;
500         job->timeout_debug_dump = ctx->timeout_debug_dump;
501
502         err = nvhost_channel_submit(job);
503         if (err)
504                 goto fail_submit;
505
506         /* Deliver multiple fences back to the userspace */
507         if (fences)
508                 for (i = 0; i < num_syncpt_incrs; ++i) {
509                         u32 fence = job->sp[i].fence;
510                         err = copy_to_user(fences, &fence, sizeof(u32));
511                         if (err)
512                                 break;
513                         fences++;
514                 }
515
516         /* Deliver the fence using the old mechanism _only_ if a single
517          * syncpoint is used. */
518
519         if (num_syncpt_incrs == 1)
520                 args->fence = job->sp[job->hwctx_syncpt_idx].fence;
521         else
522                 args->fence = 0;
523
524         nvhost_job_put(job);
525
526         return 0;
527
528 fail_submit:
529         nvhost_job_unpin(job);
530 fail:
531         nvhost_job_put(job);
532         kfree(local_waitbases);
533         return err;
534 }
535
536 static int nvhost_ioctl_channel_set_ctxswitch(
537                 struct nvhost_channel_userctx *ctx,
538                 struct nvhost_set_ctxswitch_args *args)
539 {
540         struct nvhost_cmdbuf cmdbuf_save;
541         struct nvhost_cmdbuf cmdbuf_restore;
542         struct nvhost_syncpt_incr save_incr, restore_incr;
543         u32 save_waitbase, restore_waitbase;
544         struct nvhost_reloc reloc;
545         struct nvhost_hwctx_handler *ctxhandler = NULL;
546         struct nvhost_hwctx *nhwctx = NULL;
547         struct user_hwctx *hwctx;
548         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
549         int err;
550
551         /* Only channels with context support */
552         if (!ctx->hwctx)
553                 return -EFAULT;
554
555         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
556         if (args->num_cmdbufs_save != 1
557                         || args->num_cmdbufs_restore != 1
558                         || args->num_save_incrs != 1
559                         || args->num_restore_incrs != 1
560                         || args->num_relocs != 1)
561                 return -EINVAL;
562
563         err = copy_from_user(&cmdbuf_save,
564                         (void *)(uintptr_t)args->cmdbuf_save,
565                         sizeof(cmdbuf_save));
566         if (err)
567                 goto fail;
568
569         err = copy_from_user(&cmdbuf_restore,
570                         (void *)(uintptr_t)args->cmdbuf_restore,
571                         sizeof(cmdbuf_restore));
572         if (err)
573                 goto fail;
574
575         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
576                         sizeof(reloc));
577         if (err)
578                 goto fail;
579
580         err = copy_from_user(&save_incr,
581                         (void *)(uintptr_t)args->save_incrs,
582                         sizeof(save_incr));
583         if (err)
584                 goto fail;
585         err = copy_from_user(&save_waitbase,
586                         (void *)(uintptr_t)args->save_waitbases,
587                         sizeof(save_waitbase));
588
589         err = copy_from_user(&restore_incr,
590                         (void *)(uintptr_t)args->restore_incrs,
591                         sizeof(restore_incr));
592         if (err)
593                 goto fail;
594         err = copy_from_user(&restore_waitbase,
595                         (void *)(uintptr_t)args->restore_waitbases,
596                         sizeof(restore_waitbase));
597
598         if (save_incr.syncpt_id != pdata->syncpts[0]
599                         || restore_incr.syncpt_id != pdata->syncpts[0]
600                         || save_waitbase != pdata->waitbases[0]
601                         || restore_waitbase != pdata->waitbases[0]) {
602                 err = -EINVAL;
603                 goto fail;
604         }
605         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
606                         save_waitbase, ctx->ch);
607         if (!ctxhandler) {
608                 err = -ENOMEM;
609                 goto fail;
610         }
611
612         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
613         if (!nhwctx) {
614                 err = -ENOMEM;
615                 goto fail_hwctx;
616         }
617         hwctx = to_user_hwctx(nhwctx);
618
619         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
620                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
621                         cmdbuf_restore.mem, cmdbuf_restore.offset,
622                         cmdbuf_restore.words,
623                         pdata->syncpts[0], pdata->waitbases[0],
624                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
625
626         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
627         if (!nhwctx->memmgr)
628                 goto fail_set_restore;
629
630         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
631                         cmdbuf_restore.offset, cmdbuf_restore.words);
632         if (err)
633                 goto fail_set_restore;
634
635         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
636                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
637         if (err)
638                 goto fail_set_save;
639
640         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
641         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
642
643         /* Free old context */
644         ctx->hwctx->h->put(ctx->hwctx);
645         ctx->hwctx = nhwctx;
646
647         return 0;
648
649 fail_set_save:
650 fail_set_restore:
651         ctxhandler->put(&hwctx->hwctx);
652 fail_hwctx:
653         user_ctxhandler_free(ctxhandler);
654 fail:
655         return err;
656 }
657
658 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
659 static int nvhost_ioctl_channel_cycle_stats(
660         struct nvhost_channel_userctx *ctx,
661         struct nvhost_cycle_stats_args *args)
662 {
663         int ret;
664         BUG_ON(!channel_op().cycle_stats);
665         ret = channel_op().cycle_stats(ctx->hwctx, args);
666         return ret;
667 }
668 #endif
669
670 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
671         struct nvhost_read_3d_reg_args *args)
672 {
673         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
674                         args->offset, &args->value);
675 }
676
677 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
678 {
679         int i;
680         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
681
682         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
683                 if (pdata->clocks[i].moduleid == moduleid)
684                         return i;
685         }
686
687         /* Old user space is sending a random number in args. Return clock
688          * zero in these cases. */
689         return 0;
690 }
691
692 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
693         struct nvhost_clk_rate_args *arg)
694 {
695         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
696                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
697         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
698                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
699         int index = moduleid ?
700                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
701
702         return nvhost_module_set_rate(ctx->ch->dev,
703                         ctx, arg->rate, index, attr);
704 }
705
706 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
707         u32 moduleid, u32 *rate)
708 {
709         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
710
711         return nvhost_module_get_rate(ctx->ch->dev,
712                         (unsigned long *)rate, index);
713 }
714
715 static int nvhost_ioctl_channel_module_regrdwr(
716         struct nvhost_channel_userctx *ctx,
717         struct nvhost_ctrl_module_regrdwr_args *args)
718 {
719         u32 num_offsets = args->num_offsets;
720         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
721         u32 __user *values = (u32 *)(uintptr_t)args->values;
722         u32 vals[64];
723         struct platform_device *ndev;
724
725         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
726                 args->num_offsets, args->write);
727
728         /* Check that there is something to read and that block size is
729          * u32 aligned */
730         if (num_offsets == 0 || args->block_size & 3)
731                 return -EINVAL;
732
733         ndev = ctx->ch->dev;
734
735         while (num_offsets--) {
736                 int err;
737                 u32 offs;
738                 int remaining = args->block_size >> 2;
739
740                 if (get_user(offs, offsets))
741                         return -EFAULT;
742
743                 offsets++;
744                 while (remaining) {
745                         int batch = min(remaining, 64);
746                         if (args->write) {
747                                 if (copy_from_user(vals, values,
748                                                 batch * sizeof(u32)))
749                                         return -EFAULT;
750
751                                 err = nvhost_write_module_regs(ndev,
752                                         offs, batch, vals);
753                                 if (err)
754                                         return err;
755                         } else {
756                                 err = nvhost_read_module_regs(ndev,
757                                                 offs, batch, vals);
758                                 if (err)
759                                         return err;
760
761                                 if (copy_to_user(values, vals,
762                                                 batch * sizeof(u32)))
763                                         return -EFAULT;
764                         }
765
766                         remaining -= batch;
767                         offs += batch * sizeof(u32);
768                         values += batch;
769                 }
770         }
771
772         return 0;
773 }
774
775 static u32 create_mask(u32 *words, int num)
776 {
777         int i;
778         u32 word = 0;
779         for (i = 0; i < num && words[i] && words[i] < BITS_PER_LONG; i++)
780                 word |= BIT(words[i]);
781
782         return word;
783 }
784
785 static long nvhost_channelctl(struct file *filp,
786         unsigned int cmd, unsigned long arg)
787 {
788         struct nvhost_channel_userctx *priv = filp->private_data;
789         struct device *dev = &priv->ch->dev->dev;
790         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
791         int err = 0;
792
793         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
794                 (_IOC_NR(cmd) == 0) ||
795                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
796                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
797                 return -EFAULT;
798
799         if (_IOC_DIR(cmd) & _IOC_WRITE) {
800                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
801                         return -EFAULT;
802         }
803
804         switch (cmd) {
805         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
806         {
807                 struct nvhost_device_data *pdata = \
808                         platform_get_drvdata(priv->ch->dev);
809                 ((struct nvhost_get_param_args *)buf)->value =
810                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
811                 break;
812         }
813         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
814         {
815                 struct nvhost_device_data *pdata = \
816                         platform_get_drvdata(priv->ch->dev);
817                 struct nvhost_get_param_arg *arg =
818                         (struct nvhost_get_param_arg *)buf;
819                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
820                                 || !pdata->syncpts[arg->param])
821                         return -EINVAL;
822                 arg->value = pdata->syncpts[arg->param];
823                 break;
824         }
825         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
826         {
827                 struct nvhost_device_data *pdata = \
828                         platform_get_drvdata(priv->ch->dev);
829                 ((struct nvhost_get_param_args *)buf)->value =
830                         create_mask(pdata->waitbases,
831                                         NVHOST_MODULE_MAX_WAITBASES);
832                 break;
833         }
834         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
835         {
836                 struct nvhost_device_data *pdata = \
837                         platform_get_drvdata(priv->ch->dev);
838                 struct nvhost_get_param_arg *arg =
839                         (struct nvhost_get_param_arg *)buf;
840                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
841                                 || !pdata->waitbases[arg->param])
842                         return -EINVAL;
843                 arg->value = pdata->waitbases[arg->param];
844                 break;
845         }
846         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
847         {
848                 struct nvhost_device_data *pdata = \
849                         platform_get_drvdata(priv->ch->dev);
850                 ((struct nvhost_get_param_args *)buf)->value =
851                         create_mask(pdata->modulemutexes,
852                                         NVHOST_MODULE_MAX_MODMUTEXES);
853                 break;
854         }
855         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
856         {
857                 struct nvhost_device_data *pdata = \
858                         platform_get_drvdata(priv->ch->dev);
859                 struct nvhost_get_param_arg *arg =
860                         (struct nvhost_get_param_arg *)buf;
861                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
862                                 || !pdata->modulemutexes[arg->param])
863                         return -EINVAL;
864                 arg->value = pdata->modulemutexes[arg->param];
865                 break;
866         }
867         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
868         {
869                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
870                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
871
872                 if (IS_ERR(new_client)) {
873                         err = PTR_ERR(new_client);
874                         break;
875                 }
876                 if (priv->memmgr)
877                         nvhost_memmgr_put_mgr(priv->memmgr);
878
879                 priv->memmgr = new_client;
880
881                 if (priv->hwctx)
882                         priv->hwctx->memmgr = new_client;
883
884                 break;
885         }
886         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
887                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
888                 break;
889         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
890                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
891                 break;
892         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
893                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
894                 break;
895         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
896                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
897                 break;
898         case NVHOST_IOCTL_CHANNEL_WAIT:
899                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
900                 break;
901         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
902                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
903                 break;
904 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
905         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
906                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
907                 break;
908 #endif
909         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
910                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
911                 break;
912         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
913         {
914                 struct nvhost_clk_rate_args *arg =
915                                 (struct nvhost_clk_rate_args *)buf;
916
917                 err = nvhost_ioctl_channel_get_rate(priv,
918                                 arg->moduleid, &arg->rate);
919                 break;
920         }
921         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
922         {
923                 struct nvhost_clk_rate_args *arg =
924                                 (struct nvhost_clk_rate_args *)buf;
925
926                 err = nvhost_ioctl_channel_set_rate(priv, arg);
927                 break;
928         }
929         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
930                 priv->timeout =
931                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
932                 dev_dbg(&priv->ch->dev->dev,
933                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
934                         __func__, priv->timeout, priv);
935                 break;
936         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
937                 ((struct nvhost_get_param_args *)buf)->value =
938                                 priv->hwctx->has_timedout;
939                 break;
940         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
941                 priv->priority =
942                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
943                 break;
944         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
945                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
946                 break;
947         case NVHOST_IOCTL_CHANNEL_SUBMIT:
948                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
949                 break;
950         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
951                 priv->timeout = (u32)
952                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
953                 priv->timeout_debug_dump = !((u32)
954                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
955                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
956                 dev_dbg(&priv->ch->dev->dev,
957                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
958                         __func__, priv->timeout, priv);
959                 break;
960         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
961                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
962                 break;
963         default:
964                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
965                 err = -ENOTTY;
966                 break;
967         }
968
969         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
970                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
971
972         return err;
973 }
974
975 static const struct file_operations nvhost_channelops = {
976         .owner = THIS_MODULE,
977         .release = nvhost_channelrelease,
978         .open = nvhost_channelopen,
979         .unlocked_ioctl = nvhost_channelctl
980 };
981
982 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
983 {
984         struct nvhost_channel_userctx *userctx;
985         struct file *f = fget(fd);
986         if (!f)
987                 return 0;
988
989         if (f->f_op != &nvhost_channelops) {
990                 fput(f);
991                 return 0;
992         }
993
994         userctx = (struct nvhost_channel_userctx *)f->private_data;
995         fput(f);
996         return userctx->hwctx;
997 }
998
999
1000 static const struct file_operations nvhost_asops = {
1001         .owner = THIS_MODULE,
1002         .release = nvhost_as_dev_release,
1003         .open = nvhost_as_dev_open,
1004         .unlocked_ioctl = nvhost_as_dev_ctl,
1005 };
1006
1007 static struct {
1008         int class_id;
1009         const char *dev_name;
1010 } class_id_dev_name_map[] = {
1011         /*      { NV_HOST1X_CLASS_ID, ""}, */
1012         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1013         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1014         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1015         { NV_GRAPHICS_GPU_CLASS_ID, "gpu"},
1016         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1017         { NV_TSEC_CLASS_ID, "tsec" },
1018 };
1019
1020 static struct {
1021         int module_id;
1022         const char *dev_name;
1023 } module_id_dev_name_map[] = {
1024         { NVHOST_MODULE_VI, "vi"},
1025         { NVHOST_MODULE_ISP, "isp"},
1026         { NVHOST_MODULE_MPE, "mpe"},
1027         { NVHOST_MODULE_MSENC, "msenc"},
1028         { NVHOST_MODULE_TSEC, "tsec"},
1029         { NVHOST_MODULE_GPU, "gpu"},
1030         { NVHOST_MODULE_VIC, "vic"},
1031 };
1032
1033 static const char *get_device_name_for_dev(struct platform_device *dev)
1034 {
1035         int i;
1036         /* first choice is to use the class id if specified */
1037         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1038                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1039                 if (pdata->class == class_id_dev_name_map[i].class_id)
1040                         return class_id_dev_name_map[i].dev_name;
1041         }
1042
1043         /* second choice is module name if specified */
1044         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1045                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1046                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1047                         return module_id_dev_name_map[i].dev_name;
1048         }
1049
1050         /* last choice is to just use the given dev name */
1051         return dev->name;
1052 }
1053
1054 static struct device *nvhost_client_device_create(
1055         struct platform_device *pdev, struct cdev *cdev,
1056         const char *cdev_name, int devno,
1057         const struct file_operations *ops)
1058 {
1059         struct nvhost_master *host = nvhost_get_host(pdev);
1060         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1061         const char *use_dev_name;
1062         struct device *dev;
1063         int err;
1064
1065         nvhost_dbg_fn("");
1066
1067         BUG_ON(!host);
1068
1069         cdev_init(cdev, ops);
1070         cdev->owner = THIS_MODULE;
1071
1072         err = cdev_add(cdev, devno, 1);
1073         if (err < 0) {
1074                 dev_err(&pdev->dev,
1075                         "failed to add chan %i cdev\n", pdata->index);
1076                 return NULL;
1077         }
1078         use_dev_name = get_device_name_for_dev(pdev);
1079
1080         dev = device_create(host->nvhost_class,
1081                         NULL, devno, NULL,
1082                         (pdev->id <= 0) ?
1083                         IFACE_NAME "-%s%s" :
1084                         IFACE_NAME "-%s%s.%d",
1085                         cdev_name, use_dev_name, pdev->id);
1086
1087         if (IS_ERR(dev)) {
1088                 err = PTR_ERR(dev);
1089                 dev_err(&pdev->dev,
1090                         "failed to create %s %s device for %s\n",
1091                         use_dev_name, cdev_name, pdev->name);
1092                 return NULL;
1093         }
1094
1095         return dev;
1096 }
1097
1098 int nvhost_client_user_init(struct platform_device *dev)
1099 {
1100         int err, devno;
1101         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1102         struct nvhost_channel *ch = pdata->channel;
1103
1104         BUG_ON(!ch);
1105         // reserve 3 minor #s for <dev> and as-<dev> and ctrl-<dev>
1106
1107         err = alloc_chrdev_region(&devno, 0, 3, IFACE_NAME);
1108         if (err < 0) {
1109                 dev_err(&dev->dev, "failed to allocate devno\n");
1110                 goto fail;
1111         }
1112
1113         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1114                                 "", devno, &nvhost_channelops);
1115         if (ch->node == NULL)
1116                 goto fail;
1117         ++devno;
1118         ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1119                                 "as-", devno, &nvhost_asops);
1120         if (ch->as_node == NULL)
1121                 goto fail;
1122
1123         if (pdata->ctrl_ops) {
1124                 ++devno;
1125                 pdata->ctrl_node = nvhost_client_device_create(dev,
1126                                         &pdata->ctrl_cdev, "ctrl-",
1127                                         devno, pdata->ctrl_ops);
1128                 if (pdata->ctrl_node == NULL)
1129                         goto fail;
1130         }
1131
1132         return 0;
1133 fail:
1134         return err;
1135 }
1136
1137 int nvhost_client_device_init(struct platform_device *dev)
1138 {
1139         int err;
1140         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1141         struct nvhost_channel *ch;
1142         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1143
1144         ch = nvhost_alloc_channel(dev);
1145         if (ch == NULL)
1146                 return -ENODEV;
1147
1148         /* store the pointer to this device for channel */
1149         ch->dev = dev;
1150
1151         /* Create debugfs directory for the device */
1152         nvhost_device_debug_init(dev);
1153
1154         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1155         if (err)
1156                 goto fail;
1157
1158         err = nvhost_client_user_init(dev);
1159         if (err)
1160                 goto fail;
1161
1162         if (tickctrl_op().init_channel)
1163                 tickctrl_op().init_channel(dev);
1164
1165         err = nvhost_device_list_add(dev);
1166         if (err)
1167                 goto fail;
1168
1169         if (pdata->scaling_init)
1170                 pdata->scaling_init(dev);
1171
1172         /* reset syncpoint values for this unit */
1173         nvhost_module_busy(nvhost_master->dev);
1174         nvhost_syncpt_reset_client(dev);
1175         nvhost_module_idle(nvhost_master->dev);
1176
1177         /* Initialize dma parameters */
1178         dev->dev.dma_parms = &pdata->dma_parms;
1179         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1180
1181         dev_info(&dev->dev, "initialized\n");
1182
1183         if (pdata->slave) {
1184                 pdata->slave->dev.parent = dev->dev.parent;
1185                 platform_device_register(pdata->slave);
1186         }
1187
1188         return 0;
1189
1190 fail:
1191         /* Add clean-up */
1192         nvhost_free_channel(ch);
1193         return err;
1194 }
1195 EXPORT_SYMBOL(nvhost_client_device_init);
1196
1197 int nvhost_client_device_release(struct platform_device *dev)
1198 {
1199         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1200         struct nvhost_channel *ch;
1201         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1202
1203         ch = pdata->channel;
1204
1205         /* Release nvhost module resources */
1206         nvhost_module_deinit(dev);
1207
1208         /* Remove from nvhost device list */
1209         nvhost_device_list_remove(dev);
1210
1211         /* Release chardev and device node for user space */
1212         device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1213         cdev_del(&ch->cdev);
1214
1215         /* Free nvhost channel */
1216         nvhost_free_channel(ch);
1217
1218         return 0;
1219 }
1220 EXPORT_SYMBOL(nvhost_client_device_release);
1221
1222 int nvhost_client_device_suspend(struct device *dev)
1223 {
1224         int ret = 0;
1225         struct nvhost_device_data *pdata = dev_get_drvdata(dev);
1226
1227         ret = nvhost_module_suspend(dev);
1228         if (ret)
1229                 return ret;
1230
1231         ret = nvhost_channel_suspend(pdata->channel);
1232         if (ret)
1233                 return ret;
1234
1235         dev_info(dev, "suspend status: %d\n", ret);
1236
1237         return ret;
1238 }
1239 EXPORT_SYMBOL(nvhost_client_device_suspend);
1240
1241 int nvhost_client_device_resume(struct device *dev)
1242 {
1243         nvhost_module_resume(dev);
1244         dev_info(dev, "resuming\n");
1245         return 0;
1246 }
1247 EXPORT_SYMBOL(nvhost_client_device_resume);
1248
1249 int nvhost_client_device_get_resources(struct platform_device *dev)
1250 {
1251         int i;
1252         void __iomem *regs = NULL;
1253         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1254
1255         for (i = 0; i < dev->num_resources; i++) {
1256                 struct resource *r = NULL;
1257
1258                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1259                 /* We've run out of mem resources */
1260                 if (!r)
1261                         break;
1262
1263                 regs = devm_request_and_ioremap(&dev->dev, r);
1264                 if (!regs)
1265                         goto fail;
1266
1267                 pdata->aperture[i] = regs;
1268         }
1269
1270         return 0;
1271
1272 fail:
1273         dev_err(&dev->dev, "failed to get register memory\n");
1274
1275         return -ENXIO;
1276 }
1277 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1278
1279 /* This is a simple wrapper around request_firmware that takes
1280  * 'fw_name' and if available applies a SOC relative path prefix to it.
1281  * The caller is responsible for calling release_firmware later.
1282  */
1283 const struct firmware *
1284 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1285 {
1286         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1287         const struct firmware *fw;
1288         char *fw_path = NULL;
1289         int path_len, err;
1290
1291         /* This field is NULL when calling from SYS_EXIT.
1292            Add a check here to prevent crash in request_firmware */
1293         if (!current->fs) {
1294                 BUG();
1295                 return NULL;
1296         }
1297
1298         if (!fw_name)
1299                 return NULL;
1300
1301         if (op->soc_name) {
1302                 path_len = strlen(fw_name) + strlen(op->soc_name);
1303                 path_len += 2; /* for the path separator and zero terminator*/
1304
1305                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1306                                      GFP_KERNEL);
1307                 if (!fw_path)
1308                         return NULL;
1309
1310                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1311                 fw_name = fw_path;
1312         }
1313
1314         err = request_firmware(&fw, fw_name, &dev->dev);
1315         kfree(fw_path);
1316         if (err) {
1317                 dev_err(&dev->dev, "failed to get firmware\n");
1318                 return NULL;
1319         }
1320
1321         /* note: caller must release_firmware */
1322         return fw;
1323 }