video: tegra: host: Clear new args before filling it
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * Tegra Graphics Host Client Module
3  *
4  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/firmware.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/tegra-soc.h>
32
33 #include <trace/events/nvhost.h>
34
35 #include <linux/io.h>
36 #include <linux/string.h>
37
38 #include <linux/nvhost.h>
39 #include <linux/nvhost_ioctl.h>
40
41 #include <mach/gpufuse.h>
42
43 #include "debug.h"
44 #include "bus_client.h"
45 #include "dev.h"
46 #include "class_ids.h"
47 #include "nvhost_as.h"
48 #include "nvhost_memmgr.h"
49 #include "chip_support.h"
50 #include "nvhost_acm.h"
51
52 #include "nvhost_syncpt.h"
53 #include "nvhost_channel.h"
54 #include "nvhost_job.h"
55 #include "nvhost_hwctx.h"
56 #include "user_hwctx.h"
57 #include "nvhost_sync.h"
58
59 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
60 {
61         int err = 0;
62         struct resource *r;
63         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
64
65         r = platform_get_resource(pdata->master ? pdata->master : ndev,
66                         IORESOURCE_MEM, 0);
67         if (!r) {
68                 dev_err(&ndev->dev, "failed to get memory resource\n");
69                 return -ENODEV;
70         }
71
72         if (offset + 4 * count > resource_size(r)
73                         || (offset + 4 * count < offset))
74                 err = -EPERM;
75
76         return err;
77 }
78
79 static __iomem void *get_aperture(struct platform_device *pdev)
80 {
81         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
82
83         if (pdata->master)
84                 pdata = platform_get_drvdata(pdata->master);
85
86         return pdata->aperture[0];
87 }
88
89 void host1x_writel(struct platform_device *pdev, u32 r, u32 v)
90 {
91         void __iomem *addr = get_aperture(pdev) + r;
92         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
93         writel(v, addr);
94 }
95 EXPORT_SYMBOL_GPL(host1x_writel);
96
97 u32 host1x_readl(struct platform_device *pdev, u32 r)
98 {
99         void __iomem *addr = get_aperture(pdev) + r;
100         u32 v = readl(addr);
101         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
102         return v;
103 }
104 EXPORT_SYMBOL_GPL(host1x_readl);
105
106 int nvhost_read_module_regs(struct platform_device *ndev,
107                         u32 offset, int count, u32 *values)
108 {
109         void __iomem *p = get_aperture(ndev);
110         int err;
111
112         if (!p)
113                 return -ENODEV;
114
115         /* verify offset */
116         err = validate_reg(ndev, offset, count);
117         if (err)
118                 return err;
119
120         nvhost_module_busy(ndev);
121         p += offset;
122         while (count--) {
123                 *(values++) = readl(p);
124                 p += 4;
125         }
126         rmb();
127         nvhost_module_idle(ndev);
128
129         return 0;
130 }
131
132 int nvhost_write_module_regs(struct platform_device *ndev,
133                         u32 offset, int count, const u32 *values)
134 {
135         int err;
136         void __iomem *p = get_aperture(ndev);
137
138         if (!p)
139                 return -ENODEV;
140
141         /* verify offset */
142         err = validate_reg(ndev, offset, count);
143         if (err)
144                 return err;
145
146         nvhost_module_busy(ndev);
147         p += offset;
148         while (count--) {
149                 writel(*(values++), p);
150                 p += 4;
151         }
152         wmb();
153         nvhost_module_idle(ndev);
154
155         return 0;
156 }
157
158 struct nvhost_channel_userctx {
159         struct nvhost_channel *ch;
160         struct nvhost_hwctx *hwctx;
161         struct nvhost_job *job;
162         struct mem_mgr *memmgr;
163         u32 timeout;
164         u32 priority;
165         int clientid;
166         bool timeout_debug_dump;
167 };
168
169 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
170 {
171         struct nvhost_channel_userctx *priv = filp->private_data;
172
173         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
174
175         filp->private_data = NULL;
176
177         nvhost_module_remove_client(priv->ch->dev, priv);
178
179         if (priv->hwctx) {
180                 struct nvhost_channel *ch = priv->ch;
181                 struct nvhost_hwctx *ctx = priv->hwctx;
182
183                 mutex_lock(&ch->submitlock);
184                 if (ch->cur_ctx == ctx)
185                         ch->cur_ctx = NULL;
186                 mutex_unlock(&ch->submitlock);
187
188                 priv->hwctx->h->put(priv->hwctx);
189         }
190
191         if (priv->job)
192                 nvhost_job_put(priv->job);
193
194         nvhost_putchannel(priv->ch);
195
196         nvhost_memmgr_put_mgr(priv->memmgr);
197         kfree(priv);
198         return 0;
199 }
200
201 static int nvhost_channelopen(struct inode *inode, struct file *filp)
202 {
203         struct nvhost_channel_userctx *priv;
204         struct nvhost_channel *ch;
205         struct nvhost_device_data *pdata;
206
207         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
208         ch = nvhost_getchannel(ch, false);
209         if (!ch)
210                 return -ENOMEM;
211         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
212
213         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
214         if (!priv) {
215                 nvhost_putchannel(ch);
216                 return -ENOMEM;
217         }
218         filp->private_data = priv;
219         priv->ch = ch;
220         if (nvhost_module_add_client(ch->dev, priv))
221                 goto fail;
222
223         if (ch->ctxhandler && ch->ctxhandler->alloc) {
224                 nvhost_module_busy(ch->dev);
225                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
226                 nvhost_module_idle(ch->dev);
227                 if (!priv->hwctx)
228                         goto fail;
229         }
230         priv->priority = NVHOST_PRIORITY_MEDIUM;
231         priv->clientid = atomic_add_return(1,
232                         &nvhost_get_host(ch->dev)->clientid);
233         pdata = dev_get_drvdata(ch->dev->dev.parent);
234         priv->timeout = pdata->nvhost_timeout_default;
235         priv->timeout_debug_dump = true;
236         if (!tegra_platform_is_silicon())
237                 priv->timeout = 0;
238
239         return 0;
240 fail:
241         nvhost_channelrelease(inode, filp);
242         return -ENOMEM;
243 }
244
245 static int nvhost_ioctl_channel_alloc_obj_ctx(
246         struct nvhost_channel_userctx *ctx,
247         struct nvhost_alloc_obj_ctx_args *args)
248 {
249         int ret;
250
251         BUG_ON(!channel_op(ctx->ch).alloc_obj);
252         nvhost_module_busy(ctx->ch->dev);
253         ret = channel_op(ctx->ch).alloc_obj(ctx->hwctx, args);
254         nvhost_module_idle(ctx->ch->dev);
255         return ret;
256 }
257
258 static int nvhost_ioctl_channel_free_obj_ctx(
259         struct nvhost_channel_userctx *ctx,
260         struct nvhost_free_obj_ctx_args *args)
261 {
262         int ret;
263
264         BUG_ON(!channel_op(ctx->ch).free_obj);
265         nvhost_module_busy(ctx->ch->dev);
266         ret = channel_op(ctx->ch).free_obj(ctx->hwctx, args);
267         nvhost_module_idle(ctx->ch->dev);
268         return ret;
269 }
270
271 static int nvhost_ioctl_channel_alloc_gpfifo(
272         struct nvhost_channel_userctx *ctx,
273         struct nvhost_alloc_gpfifo_args *args)
274 {
275         int ret;
276
277         BUG_ON(!channel_op(ctx->ch).alloc_gpfifo);
278         nvhost_module_busy(ctx->ch->dev);
279         ret = channel_op(ctx->ch).alloc_gpfifo(ctx->hwctx, args);
280         nvhost_module_idle(ctx->ch->dev);
281         return ret;
282 }
283
284 static int nvhost_ioctl_channel_set_error_notifier(
285         struct nvhost_channel_userctx *ctx,
286         struct nvhost_set_error_notifier *args)
287 {
288         int ret;
289         BUG_ON(!channel_op(ctx->ch).set_error_notifier);
290         ret = channel_op(ctx->ch).set_error_notifier(ctx->hwctx, args);
291         return ret;
292 }
293
294 static int nvhost_ioctl_channel_submit_gpfifo(
295         struct nvhost_channel_userctx *ctx,
296         struct nvhost_submit_gpfifo_args *args)
297 {
298         void *gpfifo;
299         u32 size;
300         int ret = 0;
301
302         if (!ctx->hwctx || ctx->hwctx->has_timedout)
303                 return -ETIMEDOUT;
304
305         size = args->num_entries * sizeof(struct nvhost_gpfifo);
306
307         gpfifo = kzalloc(size, GFP_KERNEL);
308         if (!gpfifo)
309                 return -ENOMEM;
310
311         if (copy_from_user(gpfifo,
312                            (void __user *)(uintptr_t)args->gpfifo, size)) {
313                 ret = -EINVAL;
314                 goto clean_up;
315         }
316
317         BUG_ON(!channel_op(ctx->ch).submit_gpfifo);
318
319         nvhost_module_busy(ctx->ch->dev);
320         ret = channel_op(ctx->ch).submit_gpfifo(ctx->hwctx, gpfifo,
321                         args->num_entries, &args->fence, args->flags);
322         nvhost_module_idle(ctx->ch->dev);
323 clean_up:
324         kfree(gpfifo);
325         return ret;
326 }
327
328 static int nvhost_ioctl_channel_wait(
329         struct nvhost_channel_userctx *ctx,
330         struct nvhost_wait_args *args)
331 {
332         int ret;
333
334         BUG_ON(!channel_op(ctx->ch).wait);
335         nvhost_module_busy(ctx->ch->dev);
336         ret = channel_op(ctx->ch).wait(ctx->hwctx, args);
337         nvhost_module_idle(ctx->ch->dev);
338         return ret;
339 }
340
341 static int nvhost_ioctl_channel_set_priority(
342         struct nvhost_channel_userctx *ctx,
343         struct nvhost_set_priority_args *args)
344 {
345         int ret = 0;
346         if (channel_op(ctx->ch).set_priority) {
347                 nvhost_module_busy(ctx->ch->dev);
348                 ret = channel_op(ctx->ch).set_priority(ctx->hwctx, args);
349                 nvhost_module_idle(ctx->ch->dev);
350         }
351         return ret;
352 }
353
354 static int nvhost_ioctl_channel_zcull_bind(
355         struct nvhost_channel_userctx *ctx,
356         struct nvhost_zcull_bind_args *args)
357 {
358         int ret;
359
360         BUG_ON(!channel_zcull_op(ctx->ch).bind);
361         nvhost_module_busy(ctx->ch->dev);
362         ret = channel_zcull_op(ctx->ch).bind(ctx->hwctx, args);
363         nvhost_module_idle(ctx->ch->dev);
364         return ret;
365 }
366
367 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
368                 struct nvhost_submit_args *args)
369 {
370         struct nvhost_job *job;
371         int num_cmdbufs = args->num_cmdbufs;
372         int num_relocs = args->num_relocs;
373         int num_waitchks = args->num_waitchks;
374         int num_syncpt_incrs = args->num_syncpt_incrs;
375         struct nvhost_cmdbuf __user *cmdbufs =
376                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
377         struct nvhost_cmdbuf __user *cmdbuf_exts =
378                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbuf_exts;
379         struct nvhost_reloc __user *relocs =
380                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
381         struct nvhost_reloc_shift __user *reloc_shifts =
382                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
383         struct nvhost_waitchk __user *waitchks =
384                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
385         struct nvhost_syncpt_incr __user *syncpt_incrs =
386                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
387         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
388         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
389         u32 __user *class_ids = (u32 *)(uintptr_t)args->class_ids;
390
391         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
392         u32 *local_waitbases = NULL, *local_class_ids = NULL;
393         int err, i, hwctx_syncpt_idx = -1;
394
395         if (num_syncpt_incrs > host->info.nb_pts)
396                 return -EINVAL;
397
398         job = nvhost_job_alloc(ctx->ch,
399                         ctx->hwctx,
400                         num_cmdbufs,
401                         num_relocs,
402                         num_waitchks,
403                         num_syncpt_incrs,
404                         ctx->memmgr);
405         if (!job)
406                 return -ENOMEM;
407
408         job->num_relocs = args->num_relocs;
409         job->num_waitchk = args->num_waitchks;
410         job->num_syncpts = args->num_syncpt_incrs;
411         job->priority = ctx->priority;
412         job->clientid = ctx->clientid;
413
414         /* mass copy class_ids */
415         if (args->class_ids) {
416                 local_class_ids = kzalloc(sizeof(u32) * num_cmdbufs,
417                         GFP_KERNEL);
418                 if (!local_class_ids) {
419                         err = -ENOMEM;
420                         goto fail;
421                 }
422                 err = copy_from_user(local_class_ids, class_ids,
423                         sizeof(u32) * num_cmdbufs);
424                 if (err) {
425                         err = -EINVAL;
426                         goto fail;
427                 }
428         }
429
430         for (i = 0; i < num_cmdbufs; ++i) {
431                 struct nvhost_cmdbuf cmdbuf;
432                 struct nvhost_cmdbuf_ext cmdbuf_ext;
433                 u32 class_id = class_ids ? local_class_ids[i] : 0;
434
435                 err = copy_from_user(&cmdbuf, cmdbufs + i, sizeof(cmdbuf));
436                 if (err)
437                         goto fail;
438
439                 err = copy_from_user(&cmdbuf_ext,
440                                 cmdbuf_exts + i, sizeof(cmdbuf_ext));
441                 if (err)
442                         cmdbuf_ext.pre_fence = -1;
443
444                 nvhost_job_add_gather(job, cmdbuf.mem, cmdbuf.words,
445                                       cmdbuf.offset, class_id,
446                                       cmdbuf_ext.pre_fence);
447         }
448
449         kfree(local_class_ids);
450         local_class_ids = NULL;
451
452         err = copy_from_user(job->relocarray,
453                         relocs, sizeof(*relocs) * num_relocs);
454         if (err)
455                 goto fail;
456
457         err = copy_from_user(job->relocshiftarray,
458                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
459         if (err)
460                 goto fail;
461
462         err = copy_from_user(job->waitchk,
463                         waitchks, sizeof(*waitchks) * num_waitchks);
464         if (err)
465                 goto fail;
466
467         /* mass copy waitbases */
468         if (args->waitbases) {
469                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
470                         GFP_KERNEL);
471                 if (!local_waitbases) {
472                         err = -ENOMEM;
473                         goto fail;
474                 }
475
476                 err = copy_from_user(local_waitbases, waitbases,
477                         sizeof(u32) * num_syncpt_incrs);
478                 if (err) {
479                         err = -EINVAL;
480                         goto fail;
481                 }
482         }
483
484         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
485         if (!ctx->hwctx)
486                 hwctx_syncpt_idx = 0;
487
488         /*
489          * Go through each syncpoint from userspace. Here we:
490          * - Copy syncpoint information
491          * - Validate each syncpoint
492          * - Determine waitbase for each syncpoint
493          * - Determine the index of hwctx syncpoint in the table
494          */
495
496         for (i = 0; i < num_syncpt_incrs; ++i) {
497                 u32 waitbase;
498                 struct nvhost_syncpt_incr sp;
499
500                 /* Copy */
501                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
502                 if (err)
503                         goto fail;
504
505                 /* Validate */
506                 if (sp.syncpt_id > host->info.nb_pts) {
507                         err = -EINVAL;
508                         goto fail;
509                 }
510
511                 /* Determine waitbase */
512                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
513                         waitbase = local_waitbases[i];
514                 else
515                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
516                                 sp.syncpt_id);
517
518                 /* Store */
519                 job->sp[i].id = sp.syncpt_id;
520                 job->sp[i].incrs = sp.syncpt_incrs;
521                 job->sp[i].waitbase = waitbase;
522
523                 /* Find hwctx syncpoint */
524                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
525                         hwctx_syncpt_idx = i;
526         }
527
528         /* not needed anymore */
529         kfree(local_waitbases);
530         local_waitbases = NULL;
531
532         /* Is hwctx_syncpt_idx valid? */
533         if (hwctx_syncpt_idx == -1) {
534                 err = -EINVAL;
535                 goto fail;
536         }
537
538         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
539
540         trace_nvhost_channel_submit(ctx->ch->dev->name,
541                 job->num_gathers, job->num_relocs, job->num_waitchk,
542                 job->sp[job->hwctx_syncpt_idx].id,
543                 job->sp[job->hwctx_syncpt_idx].incrs);
544
545         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
546         if (err)
547                 goto fail;
548
549         if (args->timeout)
550                 job->timeout = min(ctx->timeout, args->timeout);
551         else
552                 job->timeout = ctx->timeout;
553         job->timeout_debug_dump = ctx->timeout_debug_dump;
554
555         err = nvhost_channel_submit(job);
556         if (err)
557                 goto fail_submit;
558
559         /* Deliver multiple fences back to the userspace */
560         if (fences)
561                 for (i = 0; i < num_syncpt_incrs; ++i) {
562                         u32 fence = job->sp[i].fence;
563                         err = copy_to_user(fences, &fence, sizeof(u32));
564                         if (err)
565                                 break;
566                         fences++;
567                 }
568
569         /* Deliver the fence using the old mechanism _only_ if a single
570          * syncpoint is used. */
571
572         if (args->flags & BIT(NVHOST_SUBMIT_FLAG_SYNC_FENCE_FD)) {
573                 struct nvhost_ctrl_sync_fence_info pts[num_syncpt_incrs];
574
575                 for (i = 0; i < num_syncpt_incrs; i++) {
576                         pts[i].id = job->sp[i].id;
577                         pts[i].thresh = job->sp[i].fence;
578                 }
579
580                 err = nvhost_sync_create_fence(
581                                 &nvhost_get_host(ctx->ch->dev)->syncpt,
582                                 pts, num_syncpt_incrs, "fence", &args->fence);
583                 if (err)
584                         goto fail;
585         } else if (num_syncpt_incrs == 1)
586                 args->fence = job->sp[job->hwctx_syncpt_idx].fence;
587         else
588                 args->fence = 0;
589
590         nvhost_job_put(job);
591
592         return 0;
593
594 fail_submit:
595         nvhost_job_unpin(job);
596 fail:
597         nvhost_job_put(job);
598         kfree(local_class_ids);
599         kfree(local_waitbases);
600         return err;
601 }
602
603 static int nvhost_ioctl_channel_set_ctxswitch(
604                 struct nvhost_channel_userctx *ctx,
605                 struct nvhost_set_ctxswitch_args *args)
606 {
607         struct nvhost_cmdbuf cmdbuf_save;
608         struct nvhost_cmdbuf cmdbuf_restore;
609         struct nvhost_syncpt_incr save_incr, restore_incr;
610         u32 save_waitbase, restore_waitbase;
611         struct nvhost_reloc reloc;
612         struct nvhost_hwctx_handler *ctxhandler = NULL;
613         struct nvhost_hwctx *nhwctx = NULL;
614         struct user_hwctx *hwctx;
615         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
616         int err;
617
618         /* Only channels with context support */
619         if (!ctx->hwctx)
620                 return -EFAULT;
621
622         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
623         if (args->num_cmdbufs_save != 1
624                         || args->num_cmdbufs_restore != 1
625                         || args->num_save_incrs != 1
626                         || args->num_restore_incrs != 1
627                         || args->num_relocs != 1)
628                 return -EINVAL;
629
630         err = copy_from_user(&cmdbuf_save,
631                         (void *)(uintptr_t)args->cmdbuf_save,
632                         sizeof(cmdbuf_save));
633         if (err)
634                 goto fail;
635
636         err = copy_from_user(&cmdbuf_restore,
637                         (void *)(uintptr_t)args->cmdbuf_restore,
638                         sizeof(cmdbuf_restore));
639         if (err)
640                 goto fail;
641
642         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
643                         sizeof(reloc));
644         if (err)
645                 goto fail;
646
647         err = copy_from_user(&save_incr,
648                         (void *)(uintptr_t)args->save_incrs,
649                         sizeof(save_incr));
650         if (err)
651                 goto fail;
652         err = copy_from_user(&save_waitbase,
653                         (void *)(uintptr_t)args->save_waitbases,
654                         sizeof(save_waitbase));
655
656         err = copy_from_user(&restore_incr,
657                         (void *)(uintptr_t)args->restore_incrs,
658                         sizeof(restore_incr));
659         if (err)
660                 goto fail;
661         err = copy_from_user(&restore_waitbase,
662                         (void *)(uintptr_t)args->restore_waitbases,
663                         sizeof(restore_waitbase));
664
665         if (save_incr.syncpt_id != pdata->syncpts[0]
666                         || restore_incr.syncpt_id != pdata->syncpts[0]
667                         || save_waitbase != pdata->waitbases[0]
668                         || restore_waitbase != pdata->waitbases[0]) {
669                 err = -EINVAL;
670                 goto fail;
671         }
672         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
673                         save_waitbase, ctx->ch);
674         if (!ctxhandler) {
675                 err = -ENOMEM;
676                 goto fail;
677         }
678
679         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
680         if (!nhwctx) {
681                 err = -ENOMEM;
682                 goto fail_hwctx;
683         }
684         hwctx = to_user_hwctx(nhwctx);
685
686         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
687                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
688                         cmdbuf_restore.mem, cmdbuf_restore.offset,
689                         cmdbuf_restore.words,
690                         pdata->syncpts[0], pdata->waitbases[0],
691                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
692
693         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
694         if (!nhwctx->memmgr)
695                 goto fail_set_restore;
696
697         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
698                         cmdbuf_restore.offset, cmdbuf_restore.words);
699         if (err)
700                 goto fail_set_restore;
701
702         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
703                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
704         if (err)
705                 goto fail_set_save;
706
707         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
708         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
709
710         /* Free old context */
711         ctx->hwctx->h->put(ctx->hwctx);
712         ctx->hwctx = nhwctx;
713
714         return 0;
715
716 fail_set_save:
717 fail_set_restore:
718         ctxhandler->put(&hwctx->hwctx);
719 fail_hwctx:
720         user_ctxhandler_free(ctxhandler);
721 fail:
722         return err;
723 }
724
725 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
726 static int nvhost_ioctl_channel_cycle_stats(
727         struct nvhost_channel_userctx *ctx,
728         struct nvhost_cycle_stats_args *args)
729 {
730         int ret;
731         BUG_ON(!channel_op(ctx->ch).cycle_stats);
732         ret = channel_op(ctx->ch).cycle_stats(ctx->hwctx, args);
733         return ret;
734 }
735 #endif
736
737 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
738         struct nvhost_read_3d_reg_args *args)
739 {
740         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
741                         args->offset, &args->value);
742 }
743
744 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
745 {
746         int i;
747         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
748
749         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
750                 if (pdata->clocks[i].moduleid == moduleid)
751                         return i;
752         }
753
754         /* Old user space is sending a random number in args. Return clock
755          * zero in these cases. */
756         return 0;
757 }
758
759 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
760         struct nvhost_clk_rate_args *arg)
761 {
762         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
763                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
764         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
765                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
766         int index = moduleid ?
767                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
768
769         return nvhost_module_set_rate(ctx->ch->dev,
770                         ctx, arg->rate, index, attr);
771 }
772
773 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
774         u32 moduleid, u32 *rate)
775 {
776         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
777
778         return nvhost_module_get_rate(ctx->ch->dev,
779                         (unsigned long *)rate, index);
780 }
781
782 static int nvhost_ioctl_channel_module_regrdwr(
783         struct nvhost_channel_userctx *ctx,
784         struct nvhost_ctrl_module_regrdwr_args *args)
785 {
786         u32 num_offsets = args->num_offsets;
787         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
788         u32 __user *values = (u32 *)(uintptr_t)args->values;
789         u32 vals[64];
790         struct platform_device *ndev;
791
792         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
793                 args->num_offsets, args->write);
794
795         /* Check that there is something to read and that block size is
796          * u32 aligned */
797         if (num_offsets == 0 || args->block_size & 3)
798                 return -EINVAL;
799
800         ndev = ctx->ch->dev;
801
802         while (num_offsets--) {
803                 int err;
804                 u32 offs;
805                 int remaining = args->block_size >> 2;
806
807                 if (get_user(offs, offsets))
808                         return -EFAULT;
809
810                 offsets++;
811                 while (remaining) {
812                         int batch = min(remaining, 64);
813                         if (args->write) {
814                                 if (copy_from_user(vals, values,
815                                                 batch * sizeof(u32)))
816                                         return -EFAULT;
817
818                                 err = nvhost_write_module_regs(ndev,
819                                         offs, batch, vals);
820                                 if (err)
821                                         return err;
822                         } else {
823                                 err = nvhost_read_module_regs(ndev,
824                                                 offs, batch, vals);
825                                 if (err)
826                                         return err;
827
828                                 if (copy_to_user(values, vals,
829                                                 batch * sizeof(u32)))
830                                         return -EFAULT;
831                         }
832
833                         remaining -= batch;
834                         offs += batch * sizeof(u32);
835                         values += batch;
836                 }
837         }
838
839         return 0;
840 }
841
842 static u32 create_mask(u32 *words, int num)
843 {
844         int i;
845         u32 word = 0;
846         for (i = 0; i < num && words[i] && words[i] < 32; i++)
847                 word |= BIT(words[i]);
848
849         return word;
850 }
851
852 static long nvhost_channelctl(struct file *filp,
853         unsigned int cmd, unsigned long arg)
854 {
855         struct nvhost_channel_userctx *priv = filp->private_data;
856         struct device *dev = &priv->ch->dev->dev;
857         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
858         int err = 0;
859
860         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
861                 (_IOC_NR(cmd) == 0) ||
862                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
863                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
864                 return -EFAULT;
865
866         if (_IOC_DIR(cmd) & _IOC_WRITE) {
867                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
868                         return -EFAULT;
869         }
870
871         switch (cmd) {
872         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
873         {
874                 struct nvhost_device_data *pdata = \
875                         platform_get_drvdata(priv->ch->dev);
876                 ((struct nvhost_get_param_args *)buf)->value =
877                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
878                 break;
879         }
880         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
881         {
882                 struct nvhost_device_data *pdata = \
883                         platform_get_drvdata(priv->ch->dev);
884                 struct nvhost_get_param_arg *arg =
885                         (struct nvhost_get_param_arg *)buf;
886                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
887                                 || !pdata->syncpts[arg->param])
888                         return -EINVAL;
889                 arg->value = pdata->syncpts[arg->param];
890                 break;
891         }
892         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
893         {
894                 struct nvhost_device_data *pdata = \
895                         platform_get_drvdata(priv->ch->dev);
896                 ((struct nvhost_get_param_args *)buf)->value =
897                         create_mask(pdata->waitbases,
898                                         NVHOST_MODULE_MAX_WAITBASES);
899                 break;
900         }
901         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
902         {
903                 struct nvhost_device_data *pdata = \
904                         platform_get_drvdata(priv->ch->dev);
905                 struct nvhost_get_param_arg *arg =
906                         (struct nvhost_get_param_arg *)buf;
907                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
908                                 || !pdata->waitbases[arg->param])
909                         return -EINVAL;
910                 arg->value = pdata->waitbases[arg->param];
911                 break;
912         }
913         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
914         {
915                 struct nvhost_device_data *pdata = \
916                         platform_get_drvdata(priv->ch->dev);
917                 ((struct nvhost_get_param_args *)buf)->value =
918                         create_mask(pdata->modulemutexes,
919                                         NVHOST_MODULE_MAX_MODMUTEXES);
920                 break;
921         }
922         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
923         {
924                 struct nvhost_device_data *pdata = \
925                         platform_get_drvdata(priv->ch->dev);
926                 struct nvhost_get_param_arg *arg =
927                         (struct nvhost_get_param_arg *)buf;
928                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
929                                 || !pdata->modulemutexes[arg->param])
930                         return -EINVAL;
931                 arg->value = pdata->modulemutexes[arg->param];
932                 break;
933         }
934         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
935         {
936                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
937                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
938
939                 if (IS_ERR(new_client)) {
940                         err = PTR_ERR(new_client);
941                         break;
942                 }
943                 if (priv->memmgr)
944                         nvhost_memmgr_put_mgr(priv->memmgr);
945
946                 priv->memmgr = new_client;
947
948                 if (priv->hwctx)
949                         priv->hwctx->memmgr = new_client;
950
951                 break;
952         }
953         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
954                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
955                 break;
956         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
957                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
958                 break;
959         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
960                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
961                 break;
962         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
963                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
964                 break;
965         case NVHOST_IOCTL_CHANNEL_WAIT:
966                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
967                 break;
968         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
969                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
970                 break;
971         case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
972                 err = nvhost_ioctl_channel_set_error_notifier(priv,
973                         (void *)buf);
974                 break;
975 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
976         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
977                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
978                 break;
979 #endif
980         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
981                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
982                 break;
983         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
984         {
985                 struct nvhost_clk_rate_args *arg =
986                                 (struct nvhost_clk_rate_args *)buf;
987
988                 err = nvhost_ioctl_channel_get_rate(priv,
989                                 arg->moduleid, &arg->rate);
990                 break;
991         }
992         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
993         {
994                 struct nvhost_clk_rate_args *arg =
995                                 (struct nvhost_clk_rate_args *)buf;
996
997                 err = nvhost_ioctl_channel_set_rate(priv, arg);
998                 break;
999         }
1000         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1001                 priv->timeout =
1002                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1003                 dev_dbg(&priv->ch->dev->dev,
1004                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1005                         __func__, priv->timeout, priv);
1006                 break;
1007         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1008                 ((struct nvhost_get_param_args *)buf)->value =
1009                                 priv->hwctx->has_timedout;
1010                 break;
1011         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1012                 nvhost_ioctl_channel_set_priority(priv, (void *)buf);
1013                 priv->priority =
1014                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
1015                 break;
1016         case NVHOST32_IOCTL_CHANNEL_MODULE_REGRDWR:
1017         {
1018                 struct nvhost32_ctrl_module_regrdwr_args *args32 =
1019                         (struct nvhost32_ctrl_module_regrdwr_args *)buf;
1020                 struct nvhost_ctrl_module_regrdwr_args args;
1021                 args.id = args32->id;
1022                 args.num_offsets = args32->num_offsets;
1023                 args.block_size = args32->block_size;
1024                 args.offsets = args32->offsets;
1025                 args.values = args32->values;
1026                 args.write = args32->write;
1027                 err = nvhost_ioctl_channel_module_regrdwr(priv, &args);
1028                 break;
1029         }
1030         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1031                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1032                 break;
1033         case NVHOST32_IOCTL_CHANNEL_SUBMIT:
1034         {
1035                 struct nvhost32_submit_args *args32 = (void *)buf;
1036                 struct nvhost_submit_args args;
1037
1038                 memset(&args, 0, sizeof(args));
1039                 args.submit_version = args32->submit_version;
1040                 args.num_syncpt_incrs = args32->num_syncpt_incrs;
1041                 args.num_cmdbufs = args32->num_cmdbufs;
1042                 args.num_relocs = args32->num_relocs;
1043                 args.num_waitchks = args32->num_waitchks;
1044                 args.timeout = args32->timeout;
1045                 args.syncpt_incrs = args32->syncpt_incrs;
1046                 args.fence = args32->fence;
1047
1048                 args.cmdbufs = args32->cmdbufs;
1049                 args.relocs = args32->relocs;
1050                 args.reloc_shifts = args32->reloc_shifts;
1051                 args.waitchks = args32->waitchks;
1052                 args.waitbases = args32->waitbases;
1053                 args.class_ids = args32->class_ids;
1054                 args.fences = args32->fences;
1055
1056                 err = nvhost_ioctl_channel_submit(priv, &args);
1057                 args32->fence = args.fence;
1058                 break;
1059         }
1060         case NVHOST_IOCTL_CHANNEL_SUBMIT:
1061                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1062                 break;
1063         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1064                 priv->timeout = (u32)
1065                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
1066                 priv->timeout_debug_dump = !((u32)
1067                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1068                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1069                 dev_dbg(&priv->ch->dev->dev,
1070                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1071                         __func__, priv->timeout, priv);
1072                 break;
1073         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
1074                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
1075                 break;
1076         default:
1077                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
1078                 err = -ENOTTY;
1079                 break;
1080         }
1081
1082         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1083                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1084
1085         return err;
1086 }
1087
1088 static const struct file_operations nvhost_channelops = {
1089         .owner = THIS_MODULE,
1090         .release = nvhost_channelrelease,
1091         .open = nvhost_channelopen,
1092 #ifdef CONFIG_COMPAT
1093         .compat_ioctl = nvhost_channelctl,
1094 #endif
1095         .unlocked_ioctl = nvhost_channelctl
1096 };
1097
1098 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
1099 {
1100         struct nvhost_channel_userctx *userctx;
1101         struct file *f = fget(fd);
1102         if (!f)
1103                 return 0;
1104
1105         if (f->f_op != &nvhost_channelops) {
1106                 fput(f);
1107                 return 0;
1108         }
1109
1110         userctx = (struct nvhost_channel_userctx *)f->private_data;
1111         fput(f);
1112         return userctx->hwctx;
1113 }
1114
1115
1116 static const struct file_operations nvhost_asops = {
1117         .owner = THIS_MODULE,
1118         .release = nvhost_as_dev_release,
1119         .open = nvhost_as_dev_open,
1120 #ifdef CONFIG_COMPAT
1121         .compat_ioctl = nvhost_as_dev_ctl,
1122 #endif
1123         .unlocked_ioctl = nvhost_as_dev_ctl,
1124 };
1125
1126 static struct {
1127         int class_id;
1128         const char *dev_name;
1129 } class_id_dev_name_map[] = {
1130         /*      { NV_HOST1X_CLASS_ID, ""}, */
1131         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1132         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1133         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1134         { NV_GRAPHICS_GPU_CLASS_ID, "gpu"},
1135         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1136         { NV_TSEC_CLASS_ID, "tsec" },
1137 };
1138
1139 static struct {
1140         int module_id;
1141         const char *dev_name;
1142 } module_id_dev_name_map[] = {
1143         { NVHOST_MODULE_VI, "vi"},
1144         { NVHOST_MODULE_ISP, "isp"},
1145         { NVHOST_MODULE_MPE, "mpe"},
1146         { NVHOST_MODULE_MSENC, "msenc"},
1147         { NVHOST_MODULE_TSEC, "tsec"},
1148         { NVHOST_MODULE_GPU, "gpu"},
1149         { NVHOST_MODULE_VIC, "vic"},
1150 };
1151
1152 static const char *get_device_name_for_dev(struct platform_device *dev)
1153 {
1154         int i;
1155         /* first choice is to use the class id if specified */
1156         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1157                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1158                 if (pdata->class == class_id_dev_name_map[i].class_id)
1159                         return class_id_dev_name_map[i].dev_name;
1160         }
1161
1162         /* second choice is module name if specified */
1163         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1164                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1165                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1166                         return module_id_dev_name_map[i].dev_name;
1167         }
1168
1169         /* last choice is to just use the given dev name */
1170         return dev->name;
1171 }
1172
1173 static struct device *nvhost_client_device_create(
1174         struct platform_device *pdev, struct cdev *cdev,
1175         const char *cdev_name, int devno,
1176         const struct file_operations *ops)
1177 {
1178         struct nvhost_master *host = nvhost_get_host(pdev);
1179         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1180         const char *use_dev_name;
1181         struct device *dev;
1182         int err;
1183
1184         nvhost_dbg_fn("");
1185
1186         BUG_ON(!host);
1187
1188         cdev_init(cdev, ops);
1189         cdev->owner = THIS_MODULE;
1190
1191         err = cdev_add(cdev, devno, 1);
1192         if (err < 0) {
1193                 dev_err(&pdev->dev,
1194                         "failed to add chan %i cdev\n", pdata->index);
1195                 return NULL;
1196         }
1197         use_dev_name = get_device_name_for_dev(pdev);
1198
1199         dev = device_create(host->nvhost_class,
1200                         NULL, devno, NULL,
1201                         (pdev->id <= 0) ?
1202                         IFACE_NAME "-%s%s" :
1203                         IFACE_NAME "-%s%s.%d",
1204                         cdev_name, use_dev_name, pdev->id);
1205
1206         if (IS_ERR(dev)) {
1207                 err = PTR_ERR(dev);
1208                 dev_err(&pdev->dev,
1209                         "failed to create %s %s device for %s\n",
1210                         use_dev_name, cdev_name, pdev->name);
1211                 return NULL;
1212         }
1213
1214         return dev;
1215 }
1216
1217 int nvhost_client_user_init(struct platform_device *dev)
1218 {
1219         int err, devno;
1220         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1221         struct nvhost_channel *ch = pdata->channel;
1222
1223         BUG_ON(!ch);
1224         /* reserve 5 minor #s for <dev> and as-<dev>, ctrl-<dev>,
1225          * dbg-<dev> and prof-<dev> */
1226
1227         err = alloc_chrdev_region(&devno, 0, 5, IFACE_NAME);
1228         if (err < 0) {
1229                 dev_err(&dev->dev, "failed to allocate devno\n");
1230                 goto fail;
1231         }
1232
1233         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1234                                 "", devno, &nvhost_channelops);
1235         if (ch->node == NULL)
1236                 goto fail;
1237         if (pdata->as_ops) {
1238                 ++devno;
1239                 ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1240                                         "as-", devno, &nvhost_asops);
1241                 if (ch->as_node == NULL)
1242                         goto fail;
1243         }
1244
1245         /* module control (npn-channel based, global) interface */
1246         if (pdata->ctrl_ops) {
1247                 ++devno;
1248                 pdata->ctrl_node = nvhost_client_device_create(dev,
1249                                         &pdata->ctrl_cdev, "ctrl-",
1250                                         devno, pdata->ctrl_ops);
1251                 if (pdata->ctrl_node == NULL)
1252                         goto fail;
1253         }
1254
1255         /* module debugger interface (per channel and global) */
1256         if (pdata->dbg_ops) {
1257                 ++devno;
1258                 pdata->dbg_node = nvhost_client_device_create(dev,
1259                                         &pdata->dbg_cdev, "dbg-",
1260                                         devno, pdata->dbg_ops);
1261                 if (pdata->dbg_node == NULL)
1262                         goto fail;
1263         }
1264
1265         /* module profiler interface (per channel and global) */
1266         if (pdata->prof_ops) {
1267                 ++devno;
1268                 pdata->prof_node = nvhost_client_device_create(dev,
1269                                         &pdata->prof_cdev, "prof-",
1270                                         devno, pdata->prof_ops);
1271                 if (pdata->prof_node == NULL)
1272                         goto fail;
1273         }
1274
1275
1276
1277         return 0;
1278 fail:
1279         return err;
1280 }
1281
1282 void nvhost_client_user_deinit(struct platform_device *dev)
1283 {
1284         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1285         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1286         struct nvhost_channel *ch = pdata->channel;
1287
1288         BUG_ON(!ch);
1289
1290         if (ch->node) {
1291                 device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1292                 cdev_del(&ch->cdev);
1293         }
1294
1295         if (ch->as_node) {
1296                 device_destroy(nvhost_master->nvhost_class, ch->as_cdev.dev);
1297                 cdev_del(&ch->as_cdev);
1298         }
1299
1300         if (pdata->ctrl_node) {
1301                 device_destroy(nvhost_master->nvhost_class,
1302                                pdata->ctrl_cdev.dev);
1303                 cdev_del(&pdata->ctrl_cdev);
1304         }
1305
1306         if (pdata->dbg_node) {
1307                 device_destroy(nvhost_master->nvhost_class,
1308                                pdata->dbg_cdev.dev);
1309                 cdev_del(&pdata->dbg_cdev);
1310         }
1311
1312         if (pdata->prof_node) {
1313                 device_destroy(nvhost_master->nvhost_class,
1314                                pdata->prof_cdev.dev);
1315                 cdev_del(&pdata->prof_cdev);
1316         }
1317 }
1318
1319 int nvhost_client_device_init(struct platform_device *dev)
1320 {
1321         int err;
1322         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1323         struct nvhost_channel *ch;
1324         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1325
1326         ch = nvhost_alloc_channel(dev);
1327         if (ch == NULL)
1328                 return -ENODEV;
1329
1330         /* store the pointer to this device for channel */
1331         ch->dev = dev;
1332
1333         /* Create debugfs directory for the device */
1334         nvhost_device_debug_init(dev);
1335
1336         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1337         if (err)
1338                 goto fail1;
1339
1340         err = nvhost_client_user_init(dev);
1341         if (err)
1342                 goto fail;
1343
1344         if (tickctrl_op().init_channel)
1345                 tickctrl_op().init_channel(dev);
1346
1347         err = nvhost_device_list_add(dev);
1348         if (err)
1349                 goto fail;
1350
1351         if (pdata->scaling_init)
1352                 pdata->scaling_init(dev);
1353
1354         /* reset syncpoint values for this unit */
1355         nvhost_module_busy(nvhost_master->dev);
1356         nvhost_syncpt_reset_client(dev);
1357         nvhost_module_idle(nvhost_master->dev);
1358
1359         /* Initialize dma parameters */
1360         dev->dev.dma_parms = &pdata->dma_parms;
1361         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1362
1363         dev_info(&dev->dev, "initialized\n");
1364
1365         if (pdata->slave && !pdata->slave_initialized) {
1366                 struct nvhost_device_data *slave_pdata =
1367                                         pdata->slave->dev.platform_data;
1368                 slave_pdata->master = dev;
1369                 pdata->slave->dev.parent = dev->dev.parent;
1370                 platform_device_register(pdata->slave);
1371                 pdata->slave_initialized = 1;
1372         }
1373
1374         return 0;
1375
1376 fail:
1377         /* Add clean-up */
1378         dev_err(&dev->dev, "failed to init client device\n");
1379         nvhost_client_user_deinit(dev);
1380 fail1:
1381         nvhost_device_debug_deinit(dev);
1382         nvhost_free_channel(ch);
1383         return err;
1384 }
1385 EXPORT_SYMBOL(nvhost_client_device_init);
1386
1387 int nvhost_client_device_release(struct platform_device *dev)
1388 {
1389         struct nvhost_channel *ch;
1390         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1391
1392         ch = pdata->channel;
1393
1394         /* Release nvhost module resources */
1395         nvhost_module_deinit(dev);
1396
1397         /* Remove from nvhost device list */
1398         nvhost_device_list_remove(dev);
1399
1400         /* Release chardev and device node for user space */
1401         nvhost_client_user_deinit(dev);
1402
1403         /* Remove debugFS */
1404         nvhost_device_debug_deinit(dev);
1405
1406         /* Free nvhost channel */
1407         nvhost_free_channel(ch);
1408
1409         return 0;
1410 }
1411 EXPORT_SYMBOL(nvhost_client_device_release);
1412
1413 int nvhost_client_device_get_resources(struct platform_device *dev)
1414 {
1415         int i;
1416         void __iomem *regs = NULL;
1417         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1418
1419         for (i = 0; i < dev->num_resources; i++) {
1420                 struct resource *r = NULL;
1421
1422                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1423                 /* We've run out of mem resources */
1424                 if (!r)
1425                         break;
1426
1427                 regs = devm_request_and_ioremap(&dev->dev, r);
1428                 if (!regs)
1429                         goto fail;
1430
1431                 pdata->aperture[i] = regs;
1432         }
1433
1434         return 0;
1435
1436 fail:
1437         dev_err(&dev->dev, "failed to get register memory\n");
1438
1439         return -ENXIO;
1440 }
1441 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1442
1443 /* This is a simple wrapper around request_firmware that takes
1444  * 'fw_name' and if available applies a SOC relative path prefix to it.
1445  * The caller is responsible for calling release_firmware later.
1446  */
1447 const struct firmware *
1448 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1449 {
1450         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1451         const struct firmware *fw;
1452         char *fw_path = NULL;
1453         int path_len, err;
1454
1455         /* This field is NULL when calling from SYS_EXIT.
1456            Add a check here to prevent crash in request_firmware */
1457         if (!current->fs) {
1458                 BUG();
1459                 return NULL;
1460         }
1461
1462         if (!fw_name)
1463                 return NULL;
1464
1465         if (op->soc_name) {
1466                 path_len = strlen(fw_name) + strlen(op->soc_name);
1467                 path_len += 2; /* for the path separator and zero terminator*/
1468
1469                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1470                                      GFP_KERNEL);
1471                 if (!fw_path)
1472                         return NULL;
1473
1474                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1475                 fw_name = fw_path;
1476         }
1477
1478         err = request_firmware(&fw, fw_name, &dev->dev);
1479         kfree(fw_path);
1480         if (err) {
1481                 dev_err(&dev->dev, "failed to get firmware\n");
1482                 return NULL;
1483         }
1484
1485         /* note: caller must release_firmware */
1486         return fw;
1487 }