68d8c48cf78284eb483b18666f3a1e306bd370f1
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * Tegra Graphics Host Client Module
3  *
4  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/firmware.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/tegra-soc.h>
32
33 #include <trace/events/nvhost.h>
34
35 #include <linux/io.h>
36 #include <linux/string.h>
37
38 #include <linux/nvhost.h>
39 #include <linux/nvhost_ioctl.h>
40
41 #include <mach/gpufuse.h>
42
43 #include "debug.h"
44 #include "bus_client.h"
45 #include "dev.h"
46 #include "class_ids.h"
47 #include "nvhost_as.h"
48 #include "nvhost_memmgr.h"
49 #include "chip_support.h"
50 #include "nvhost_acm.h"
51
52 #include "nvhost_syncpt.h"
53 #include "nvhost_channel.h"
54 #include "nvhost_job.h"
55 #include "nvhost_hwctx.h"
56 #include "user_hwctx.h"
57
58 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
59 {
60         int err = 0;
61         struct resource *r;
62         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
63
64         r = platform_get_resource(pdata->master ? pdata->master : ndev,
65                         IORESOURCE_MEM, 0);
66         if (!r) {
67                 dev_err(&ndev->dev, "failed to get memory resource\n");
68                 return -ENODEV;
69         }
70
71         if (offset + 4 * count > resource_size(r)
72                         || (offset + 4 * count < offset))
73                 err = -EPERM;
74
75         return err;
76 }
77
78 static __iomem void *get_aperture(struct platform_device *pdev)
79 {
80         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
81
82         if (pdata->master)
83                 pdata = platform_get_drvdata(pdata->master);
84
85         return pdata->aperture[0];
86 }
87
88 int nvhost_read_module_regs(struct platform_device *ndev,
89                         u32 offset, int count, u32 *values)
90 {
91         void __iomem *p = get_aperture(ndev);
92         int err;
93
94         if (!p)
95                 return -ENODEV;
96
97         /* verify offset */
98         err = validate_reg(ndev, offset, count);
99         if (err)
100                 return err;
101
102         nvhost_module_busy(ndev);
103         p += offset;
104         while (count--) {
105                 *(values++) = readl(p);
106                 p += 4;
107         }
108         rmb();
109         nvhost_module_idle(ndev);
110
111         return 0;
112 }
113
114 int nvhost_write_module_regs(struct platform_device *ndev,
115                         u32 offset, int count, const u32 *values)
116 {
117         int err;
118         void __iomem *p = get_aperture(ndev);
119
120         if (!p)
121                 return -ENODEV;
122
123         /* verify offset */
124         err = validate_reg(ndev, offset, count);
125         if (err)
126                 return err;
127
128         nvhost_module_busy(ndev);
129         p += offset;
130         while (count--) {
131                 writel(*(values++), p);
132                 p += 4;
133         }
134         wmb();
135         nvhost_module_idle(ndev);
136
137         return 0;
138 }
139
140 bool nvhost_client_can_writel(struct platform_device *pdev)
141 {
142         return !!get_aperture(pdev);
143 }
144 EXPORT_SYMBOL(nvhost_client_can_writel);
145
146 void nvhost_client_writel(struct platform_device *pdev, u32 val, u32 reg)
147 {
148         writel(val, get_aperture(pdev) + reg * 4);
149 }
150 EXPORT_SYMBOL(nvhost_client_writel);
151
152 u32 nvhost_client_readl(struct platform_device *pdev, u32 reg)
153 {
154         return readl(get_aperture(pdev) + reg * 4);
155 }
156
157 struct nvhost_channel_userctx {
158         struct nvhost_channel *ch;
159         struct nvhost_hwctx *hwctx;
160         struct nvhost_job *job;
161         struct mem_mgr *memmgr;
162         u32 timeout;
163         u32 priority;
164         int clientid;
165         bool timeout_debug_dump;
166 };
167
168 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
169 {
170         struct nvhost_channel_userctx *priv = filp->private_data;
171
172         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
173
174         filp->private_data = NULL;
175
176         nvhost_module_remove_client(priv->ch->dev, priv);
177
178         if (priv->hwctx) {
179                 struct nvhost_channel *ch = priv->ch;
180                 struct nvhost_hwctx *ctx = priv->hwctx;
181
182                 mutex_lock(&ch->submitlock);
183                 if (ch->cur_ctx == ctx)
184                         ch->cur_ctx = NULL;
185                 mutex_unlock(&ch->submitlock);
186
187                 priv->hwctx->h->put(priv->hwctx);
188         }
189
190         if (priv->job)
191                 nvhost_job_put(priv->job);
192
193         nvhost_putchannel(priv->ch);
194
195         nvhost_memmgr_put_mgr(priv->memmgr);
196         kfree(priv);
197         return 0;
198 }
199
200 static int nvhost_channelopen(struct inode *inode, struct file *filp)
201 {
202         struct nvhost_channel_userctx *priv;
203         struct nvhost_channel *ch;
204         struct nvhost_device_data *pdata;
205
206         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
207         ch = nvhost_getchannel(ch, false);
208         if (!ch)
209                 return -ENOMEM;
210         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
211
212         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
213         if (!priv) {
214                 nvhost_putchannel(ch);
215                 return -ENOMEM;
216         }
217         filp->private_data = priv;
218         priv->ch = ch;
219         if (nvhost_module_add_client(ch->dev, priv))
220                 goto fail;
221
222         if (ch->ctxhandler && ch->ctxhandler->alloc) {
223                 nvhost_module_busy(ch->dev);
224                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
225                 nvhost_module_idle(ch->dev);
226                 if (!priv->hwctx)
227                         goto fail;
228         }
229         priv->priority = NVHOST_PRIORITY_MEDIUM;
230         priv->clientid = atomic_add_return(1,
231                         &nvhost_get_host(ch->dev)->clientid);
232         pdata = dev_get_drvdata(ch->dev->dev.parent);
233         priv->timeout = pdata->nvhost_timeout_default;
234         priv->timeout_debug_dump = true;
235         if (!tegra_platform_is_silicon())
236                 priv->timeout = 0;
237
238         return 0;
239 fail:
240         nvhost_channelrelease(inode, filp);
241         return -ENOMEM;
242 }
243
244 static int nvhost_ioctl_channel_alloc_obj_ctx(
245         struct nvhost_channel_userctx *ctx,
246         struct nvhost_alloc_obj_ctx_args *args)
247 {
248         int ret;
249
250         BUG_ON(!channel_op(ctx->ch).alloc_obj);
251         nvhost_module_busy(ctx->ch->dev);
252         ret = channel_op(ctx->ch).alloc_obj(ctx->hwctx, args);
253         nvhost_module_idle(ctx->ch->dev);
254         return ret;
255 }
256
257 static int nvhost_ioctl_channel_free_obj_ctx(
258         struct nvhost_channel_userctx *ctx,
259         struct nvhost_free_obj_ctx_args *args)
260 {
261         int ret;
262
263         BUG_ON(!channel_op(ctx->ch).free_obj);
264         nvhost_module_busy(ctx->ch->dev);
265         ret = channel_op(ctx->ch).free_obj(ctx->hwctx, args);
266         nvhost_module_idle(ctx->ch->dev);
267         return ret;
268 }
269
270 static int nvhost_ioctl_channel_alloc_gpfifo(
271         struct nvhost_channel_userctx *ctx,
272         struct nvhost_alloc_gpfifo_args *args)
273 {
274         int ret;
275
276         BUG_ON(!channel_op(ctx->ch).alloc_gpfifo);
277         nvhost_module_busy(ctx->ch->dev);
278         ret = channel_op(ctx->ch).alloc_gpfifo(ctx->hwctx, args);
279         nvhost_module_idle(ctx->ch->dev);
280         return ret;
281 }
282
283 static int nvhost_ioctl_channel_set_error_notifier(
284         struct nvhost_channel_userctx *ctx,
285         struct nvhost_set_error_notifier *args)
286 {
287         int ret;
288         BUG_ON(!channel_op(ctx->ch).set_error_notifier);
289         ret = channel_op(ctx->ch).set_error_notifier(ctx->hwctx, args);
290         return ret;
291 }
292
293 static int nvhost_ioctl_channel_submit_gpfifo(
294         struct nvhost_channel_userctx *ctx,
295         struct nvhost_submit_gpfifo_args *args)
296 {
297         void *gpfifo;
298         u32 size;
299         int ret = 0;
300
301         if (!ctx->hwctx || ctx->hwctx->has_timedout)
302                 return -ETIMEDOUT;
303
304         size = args->num_entries * sizeof(struct nvhost_gpfifo);
305
306         gpfifo = kzalloc(size, GFP_KERNEL);
307         if (!gpfifo)
308                 return -ENOMEM;
309
310         if (copy_from_user(gpfifo,
311                            (void __user *)(uintptr_t)args->gpfifo, size)) {
312                 ret = -EINVAL;
313                 goto clean_up;
314         }
315
316         BUG_ON(!channel_op(ctx->ch).submit_gpfifo);
317
318         nvhost_module_busy(ctx->ch->dev);
319         ret = channel_op(ctx->ch).submit_gpfifo(ctx->hwctx, gpfifo,
320                         args->num_entries, &args->fence, args->flags);
321         nvhost_module_idle(ctx->ch->dev);
322 clean_up:
323         kfree(gpfifo);
324         return ret;
325 }
326
327 static int nvhost_ioctl_channel_wait(
328         struct nvhost_channel_userctx *ctx,
329         struct nvhost_wait_args *args)
330 {
331         int ret;
332
333         BUG_ON(!channel_op(ctx->ch).wait);
334         nvhost_module_busy(ctx->ch->dev);
335         ret = channel_op(ctx->ch).wait(ctx->hwctx, args);
336         nvhost_module_idle(ctx->ch->dev);
337         return ret;
338 }
339
340 static int nvhost_ioctl_channel_set_priority(
341         struct nvhost_channel_userctx *ctx,
342         struct nvhost_set_priority_args *args)
343 {
344         int ret = 0;
345         if (channel_op(ctx->ch).set_priority) {
346                 nvhost_module_busy(ctx->ch->dev);
347                 ret = channel_op(ctx->ch).set_priority(ctx->hwctx, args);
348                 nvhost_module_idle(ctx->ch->dev);
349         }
350         return ret;
351 }
352
353 static int nvhost_ioctl_channel_zcull_bind(
354         struct nvhost_channel_userctx *ctx,
355         struct nvhost_zcull_bind_args *args)
356 {
357         int ret;
358
359         BUG_ON(!channel_zcull_op(ctx->ch).bind);
360         nvhost_module_busy(ctx->ch->dev);
361         ret = channel_zcull_op(ctx->ch).bind(ctx->hwctx, args);
362         nvhost_module_idle(ctx->ch->dev);
363         return ret;
364 }
365
366 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
367                 struct nvhost_submit_args *args)
368 {
369         struct nvhost_job *job;
370         int num_cmdbufs = args->num_cmdbufs;
371         int num_relocs = args->num_relocs;
372         int num_waitchks = args->num_waitchks;
373         int num_syncpt_incrs = args->num_syncpt_incrs;
374         struct nvhost_cmdbuf __user *cmdbufs =
375                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
376         struct nvhost_reloc __user *relocs =
377                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
378         struct nvhost_reloc_shift __user *reloc_shifts =
379                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
380         struct nvhost_waitchk __user *waitchks =
381                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
382         struct nvhost_syncpt_incr __user *syncpt_incrs =
383                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
384         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
385         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
386         u32 __user *class_ids = (u32 *)(uintptr_t)args->class_ids;
387
388         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
389         u32 *local_waitbases = NULL, *local_class_ids = NULL;
390         int err, i, hwctx_syncpt_idx = -1;
391
392         if (num_syncpt_incrs > host->info.nb_pts)
393                 return -EINVAL;
394
395         job = nvhost_job_alloc(ctx->ch,
396                         ctx->hwctx,
397                         num_cmdbufs,
398                         num_relocs,
399                         num_waitchks,
400                         num_syncpt_incrs,
401                         ctx->memmgr);
402         if (!job)
403                 return -ENOMEM;
404
405         job->num_relocs = args->num_relocs;
406         job->num_waitchk = args->num_waitchks;
407         job->num_syncpts = args->num_syncpt_incrs;
408         job->priority = ctx->priority;
409         job->clientid = ctx->clientid;
410
411         /* mass copy class_ids */
412         if (args->class_ids) {
413                 local_class_ids = kzalloc(sizeof(u32) * num_cmdbufs,
414                         GFP_KERNEL);
415                 if (!local_class_ids) {
416                         err = -ENOMEM;
417                         goto fail;
418                 }
419                 err = copy_from_user(local_class_ids, class_ids,
420                         sizeof(u32) * num_cmdbufs);
421                 if (err) {
422                         err = -EINVAL;
423                         goto fail;
424                 }
425         }
426
427         for (i = 0; i < num_cmdbufs; ++i) {
428                 struct nvhost_cmdbuf cmdbuf;
429                 u32 class_id = class_ids ? local_class_ids[i] : 0;
430
431                 err = copy_from_user(&cmdbuf, cmdbufs + i, sizeof(cmdbuf));
432                 if (err)
433                         goto fail;
434
435                 nvhost_job_add_gather(job, cmdbuf.mem, cmdbuf.words,
436                                 cmdbuf.offset, class_id);
437         }
438
439         kfree(local_class_ids);
440         local_class_ids = NULL;
441
442         err = copy_from_user(job->relocarray,
443                         relocs, sizeof(*relocs) * num_relocs);
444         if (err)
445                 goto fail;
446
447         err = copy_from_user(job->relocshiftarray,
448                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
449         if (err)
450                 goto fail;
451
452         err = copy_from_user(job->waitchk,
453                         waitchks, sizeof(*waitchks) * num_waitchks);
454         if (err)
455                 goto fail;
456
457         /* mass copy waitbases */
458         if (args->waitbases) {
459                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
460                         GFP_KERNEL);
461                 if (!local_waitbases) {
462                         err = -ENOMEM;
463                         goto fail;
464                 }
465
466                 err = copy_from_user(local_waitbases, waitbases,
467                         sizeof(u32) * num_syncpt_incrs);
468                 if (err) {
469                         err = -EINVAL;
470                         goto fail;
471                 }
472         }
473
474         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
475         if (!ctx->hwctx)
476                 hwctx_syncpt_idx = 0;
477
478         /*
479          * Go through each syncpoint from userspace. Here we:
480          * - Copy syncpoint information
481          * - Validate each syncpoint
482          * - Determine waitbase for each syncpoint
483          * - Determine the index of hwctx syncpoint in the table
484          */
485
486         for (i = 0; i < num_syncpt_incrs; ++i) {
487                 u32 waitbase;
488                 struct nvhost_syncpt_incr sp;
489
490                 /* Copy */
491                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
492                 if (err)
493                         goto fail;
494
495                 /* Validate */
496                 if (sp.syncpt_id > host->info.nb_pts) {
497                         err = -EINVAL;
498                         goto fail;
499                 }
500
501                 /* Determine waitbase */
502                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
503                         waitbase = local_waitbases[i];
504                 else
505                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
506                                 sp.syncpt_id);
507
508                 /* Store */
509                 job->sp[i].id = sp.syncpt_id;
510                 job->sp[i].incrs = sp.syncpt_incrs;
511                 job->sp[i].waitbase = waitbase;
512
513                 /* Find hwctx syncpoint */
514                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
515                         hwctx_syncpt_idx = i;
516         }
517
518         /* not needed anymore */
519         kfree(local_waitbases);
520         local_waitbases = NULL;
521
522         /* Is hwctx_syncpt_idx valid? */
523         if (hwctx_syncpt_idx == -1) {
524                 err = -EINVAL;
525                 goto fail;
526         }
527
528         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
529
530         trace_nvhost_channel_submit(ctx->ch->dev->name,
531                 job->num_gathers, job->num_relocs, job->num_waitchk,
532                 job->sp[job->hwctx_syncpt_idx].id,
533                 job->sp[job->hwctx_syncpt_idx].incrs);
534
535         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
536         if (err)
537                 goto fail;
538
539         if (args->timeout)
540                 job->timeout = min(ctx->timeout, args->timeout);
541         else
542                 job->timeout = ctx->timeout;
543         job->timeout_debug_dump = ctx->timeout_debug_dump;
544
545         err = nvhost_channel_submit(job);
546         if (err)
547                 goto fail_submit;
548
549         /* Deliver multiple fences back to the userspace */
550         if (fences)
551                 for (i = 0; i < num_syncpt_incrs; ++i) {
552                         u32 fence = job->sp[i].fence;
553                         err = copy_to_user(fences, &fence, sizeof(u32));
554                         if (err)
555                                 break;
556                         fences++;
557                 }
558
559         /* Deliver the fence using the old mechanism _only_ if a single
560          * syncpoint is used. */
561
562         if (num_syncpt_incrs == 1)
563                 args->fence = job->sp[job->hwctx_syncpt_idx].fence;
564         else
565                 args->fence = 0;
566
567         nvhost_job_put(job);
568
569         return 0;
570
571 fail_submit:
572         nvhost_job_unpin(job);
573 fail:
574         nvhost_job_put(job);
575         kfree(local_class_ids);
576         kfree(local_waitbases);
577         return err;
578 }
579
580 static int nvhost_ioctl_channel_set_ctxswitch(
581                 struct nvhost_channel_userctx *ctx,
582                 struct nvhost_set_ctxswitch_args *args)
583 {
584         struct nvhost_cmdbuf cmdbuf_save;
585         struct nvhost_cmdbuf cmdbuf_restore;
586         struct nvhost_syncpt_incr save_incr, restore_incr;
587         u32 save_waitbase, restore_waitbase;
588         struct nvhost_reloc reloc;
589         struct nvhost_hwctx_handler *ctxhandler = NULL;
590         struct nvhost_hwctx *nhwctx = NULL;
591         struct user_hwctx *hwctx;
592         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
593         int err;
594
595         /* Only channels with context support */
596         if (!ctx->hwctx)
597                 return -EFAULT;
598
599         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
600         if (args->num_cmdbufs_save != 1
601                         || args->num_cmdbufs_restore != 1
602                         || args->num_save_incrs != 1
603                         || args->num_restore_incrs != 1
604                         || args->num_relocs != 1)
605                 return -EINVAL;
606
607         err = copy_from_user(&cmdbuf_save,
608                         (void *)(uintptr_t)args->cmdbuf_save,
609                         sizeof(cmdbuf_save));
610         if (err)
611                 goto fail;
612
613         err = copy_from_user(&cmdbuf_restore,
614                         (void *)(uintptr_t)args->cmdbuf_restore,
615                         sizeof(cmdbuf_restore));
616         if (err)
617                 goto fail;
618
619         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
620                         sizeof(reloc));
621         if (err)
622                 goto fail;
623
624         err = copy_from_user(&save_incr,
625                         (void *)(uintptr_t)args->save_incrs,
626                         sizeof(save_incr));
627         if (err)
628                 goto fail;
629         err = copy_from_user(&save_waitbase,
630                         (void *)(uintptr_t)args->save_waitbases,
631                         sizeof(save_waitbase));
632
633         err = copy_from_user(&restore_incr,
634                         (void *)(uintptr_t)args->restore_incrs,
635                         sizeof(restore_incr));
636         if (err)
637                 goto fail;
638         err = copy_from_user(&restore_waitbase,
639                         (void *)(uintptr_t)args->restore_waitbases,
640                         sizeof(restore_waitbase));
641
642         if (save_incr.syncpt_id != pdata->syncpts[0]
643                         || restore_incr.syncpt_id != pdata->syncpts[0]
644                         || save_waitbase != pdata->waitbases[0]
645                         || restore_waitbase != pdata->waitbases[0]) {
646                 err = -EINVAL;
647                 goto fail;
648         }
649         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
650                         save_waitbase, ctx->ch);
651         if (!ctxhandler) {
652                 err = -ENOMEM;
653                 goto fail;
654         }
655
656         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
657         if (!nhwctx) {
658                 err = -ENOMEM;
659                 goto fail_hwctx;
660         }
661         hwctx = to_user_hwctx(nhwctx);
662
663         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
664                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
665                         cmdbuf_restore.mem, cmdbuf_restore.offset,
666                         cmdbuf_restore.words,
667                         pdata->syncpts[0], pdata->waitbases[0],
668                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
669
670         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
671         if (!nhwctx->memmgr)
672                 goto fail_set_restore;
673
674         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
675                         cmdbuf_restore.offset, cmdbuf_restore.words);
676         if (err)
677                 goto fail_set_restore;
678
679         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
680                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
681         if (err)
682                 goto fail_set_save;
683
684         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
685         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
686
687         /* Free old context */
688         ctx->hwctx->h->put(ctx->hwctx);
689         ctx->hwctx = nhwctx;
690
691         return 0;
692
693 fail_set_save:
694 fail_set_restore:
695         ctxhandler->put(&hwctx->hwctx);
696 fail_hwctx:
697         user_ctxhandler_free(ctxhandler);
698 fail:
699         return err;
700 }
701
702 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
703 static int nvhost_ioctl_channel_cycle_stats(
704         struct nvhost_channel_userctx *ctx,
705         struct nvhost_cycle_stats_args *args)
706 {
707         int ret;
708         BUG_ON(!channel_op(ctx->ch).cycle_stats);
709         ret = channel_op(ctx->ch).cycle_stats(ctx->hwctx, args);
710         return ret;
711 }
712 #endif
713
714 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
715         struct nvhost_read_3d_reg_args *args)
716 {
717         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
718                         args->offset, &args->value);
719 }
720
721 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
722 {
723         int i;
724         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
725
726         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
727                 if (pdata->clocks[i].moduleid == moduleid)
728                         return i;
729         }
730
731         /* Old user space is sending a random number in args. Return clock
732          * zero in these cases. */
733         return 0;
734 }
735
736 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
737         struct nvhost_clk_rate_args *arg)
738 {
739         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
740                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
741         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
742                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
743         int index = moduleid ?
744                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
745
746         return nvhost_module_set_rate(ctx->ch->dev,
747                         ctx, arg->rate, index, attr);
748 }
749
750 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
751         u32 moduleid, u32 *rate)
752 {
753         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
754
755         return nvhost_module_get_rate(ctx->ch->dev,
756                         (unsigned long *)rate, index);
757 }
758
759 static int nvhost_ioctl_channel_module_regrdwr(
760         struct nvhost_channel_userctx *ctx,
761         struct nvhost_ctrl_module_regrdwr_args *args)
762 {
763         u32 num_offsets = args->num_offsets;
764         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
765         u32 __user *values = (u32 *)(uintptr_t)args->values;
766         u32 vals[64];
767         struct platform_device *ndev;
768
769         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
770                 args->num_offsets, args->write);
771
772         /* Check that there is something to read and that block size is
773          * u32 aligned */
774         if (num_offsets == 0 || args->block_size & 3)
775                 return -EINVAL;
776
777         ndev = ctx->ch->dev;
778
779         while (num_offsets--) {
780                 int err;
781                 u32 offs;
782                 int remaining = args->block_size >> 2;
783
784                 if (get_user(offs, offsets))
785                         return -EFAULT;
786
787                 offsets++;
788                 while (remaining) {
789                         int batch = min(remaining, 64);
790                         if (args->write) {
791                                 if (copy_from_user(vals, values,
792                                                 batch * sizeof(u32)))
793                                         return -EFAULT;
794
795                                 err = nvhost_write_module_regs(ndev,
796                                         offs, batch, vals);
797                                 if (err)
798                                         return err;
799                         } else {
800                                 err = nvhost_read_module_regs(ndev,
801                                                 offs, batch, vals);
802                                 if (err)
803                                         return err;
804
805                                 if (copy_to_user(values, vals,
806                                                 batch * sizeof(u32)))
807                                         return -EFAULT;
808                         }
809
810                         remaining -= batch;
811                         offs += batch * sizeof(u32);
812                         values += batch;
813                 }
814         }
815
816         return 0;
817 }
818
819 static u32 create_mask(u32 *words, int num)
820 {
821         int i;
822         u32 word = 0;
823         for (i = 0; i < num && words[i] && words[i] < 32; i++)
824                 word |= BIT(words[i]);
825
826         return word;
827 }
828
829 static long nvhost_channelctl(struct file *filp,
830         unsigned int cmd, unsigned long arg)
831 {
832         struct nvhost_channel_userctx *priv = filp->private_data;
833         struct device *dev = &priv->ch->dev->dev;
834         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
835         int err = 0;
836
837         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
838                 (_IOC_NR(cmd) == 0) ||
839                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
840                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
841                 return -EFAULT;
842
843         if (_IOC_DIR(cmd) & _IOC_WRITE) {
844                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
845                         return -EFAULT;
846         }
847
848         switch (cmd) {
849         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
850         {
851                 struct nvhost_device_data *pdata = \
852                         platform_get_drvdata(priv->ch->dev);
853                 ((struct nvhost_get_param_args *)buf)->value =
854                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
855                 break;
856         }
857         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
858         {
859                 struct nvhost_device_data *pdata = \
860                         platform_get_drvdata(priv->ch->dev);
861                 struct nvhost_get_param_arg *arg =
862                         (struct nvhost_get_param_arg *)buf;
863                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
864                                 || !pdata->syncpts[arg->param])
865                         return -EINVAL;
866                 arg->value = pdata->syncpts[arg->param];
867                 break;
868         }
869         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
870         {
871                 struct nvhost_device_data *pdata = \
872                         platform_get_drvdata(priv->ch->dev);
873                 ((struct nvhost_get_param_args *)buf)->value =
874                         create_mask(pdata->waitbases,
875                                         NVHOST_MODULE_MAX_WAITBASES);
876                 break;
877         }
878         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
879         {
880                 struct nvhost_device_data *pdata = \
881                         platform_get_drvdata(priv->ch->dev);
882                 struct nvhost_get_param_arg *arg =
883                         (struct nvhost_get_param_arg *)buf;
884                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
885                                 || !pdata->waitbases[arg->param])
886                         return -EINVAL;
887                 arg->value = pdata->waitbases[arg->param];
888                 break;
889         }
890         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
891         {
892                 struct nvhost_device_data *pdata = \
893                         platform_get_drvdata(priv->ch->dev);
894                 ((struct nvhost_get_param_args *)buf)->value =
895                         create_mask(pdata->modulemutexes,
896                                         NVHOST_MODULE_MAX_MODMUTEXES);
897                 break;
898         }
899         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
900         {
901                 struct nvhost_device_data *pdata = \
902                         platform_get_drvdata(priv->ch->dev);
903                 struct nvhost_get_param_arg *arg =
904                         (struct nvhost_get_param_arg *)buf;
905                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
906                                 || !pdata->modulemutexes[arg->param])
907                         return -EINVAL;
908                 arg->value = pdata->modulemutexes[arg->param];
909                 break;
910         }
911         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
912         {
913                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
914                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
915
916                 if (IS_ERR(new_client)) {
917                         err = PTR_ERR(new_client);
918                         break;
919                 }
920                 if (priv->memmgr)
921                         nvhost_memmgr_put_mgr(priv->memmgr);
922
923                 priv->memmgr = new_client;
924
925                 if (priv->hwctx)
926                         priv->hwctx->memmgr = new_client;
927
928                 break;
929         }
930         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
931                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
932                 break;
933         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
934                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
935                 break;
936         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
937                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
938                 break;
939         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
940                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
941                 break;
942         case NVHOST_IOCTL_CHANNEL_WAIT:
943                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
944                 break;
945         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
946                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
947                 break;
948         case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
949                 err = nvhost_ioctl_channel_set_error_notifier(priv,
950                         (void *)buf);
951                 break;
952 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
953         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
954                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
955                 break;
956 #endif
957         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
958                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
959                 break;
960         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
961         {
962                 struct nvhost_clk_rate_args *arg =
963                                 (struct nvhost_clk_rate_args *)buf;
964
965                 err = nvhost_ioctl_channel_get_rate(priv,
966                                 arg->moduleid, &arg->rate);
967                 break;
968         }
969         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
970         {
971                 struct nvhost_clk_rate_args *arg =
972                                 (struct nvhost_clk_rate_args *)buf;
973
974                 err = nvhost_ioctl_channel_set_rate(priv, arg);
975                 break;
976         }
977         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
978                 priv->timeout =
979                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
980                 dev_dbg(&priv->ch->dev->dev,
981                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
982                         __func__, priv->timeout, priv);
983                 break;
984         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
985                 ((struct nvhost_get_param_args *)buf)->value =
986                                 priv->hwctx->has_timedout;
987                 break;
988         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
989                 nvhost_ioctl_channel_set_priority(priv, (void *)buf);
990                 priv->priority =
991                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
992                 break;
993         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
994                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
995                 break;
996         case NVHOST_IOCTL_CHANNEL_SUBMIT:
997                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
998                 break;
999         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1000                 priv->timeout = (u32)
1001                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
1002                 priv->timeout_debug_dump = !((u32)
1003                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1004                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1005                 dev_dbg(&priv->ch->dev->dev,
1006                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1007                         __func__, priv->timeout, priv);
1008                 break;
1009         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
1010                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
1011                 break;
1012         default:
1013                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
1014                 err = -ENOTTY;
1015                 break;
1016         }
1017
1018         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1019                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1020
1021         return err;
1022 }
1023
1024 static const struct file_operations nvhost_channelops = {
1025         .owner = THIS_MODULE,
1026         .release = nvhost_channelrelease,
1027         .open = nvhost_channelopen,
1028 #ifdef CONFIG_COMPAT
1029         .compat_ioctl = nvhost_channelctl,
1030 #endif
1031         .unlocked_ioctl = nvhost_channelctl
1032 };
1033
1034 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
1035 {
1036         struct nvhost_channel_userctx *userctx;
1037         struct file *f = fget(fd);
1038         if (!f)
1039                 return 0;
1040
1041         if (f->f_op != &nvhost_channelops) {
1042                 fput(f);
1043                 return 0;
1044         }
1045
1046         userctx = (struct nvhost_channel_userctx *)f->private_data;
1047         fput(f);
1048         return userctx->hwctx;
1049 }
1050
1051
1052 static const struct file_operations nvhost_asops = {
1053         .owner = THIS_MODULE,
1054         .release = nvhost_as_dev_release,
1055         .open = nvhost_as_dev_open,
1056 #ifdef CONFIG_COMPAT
1057         .compat_ioctl = nvhost_as_dev_ctl,
1058 #endif
1059         .unlocked_ioctl = nvhost_as_dev_ctl,
1060 };
1061
1062 static struct {
1063         int class_id;
1064         const char *dev_name;
1065 } class_id_dev_name_map[] = {
1066         /*      { NV_HOST1X_CLASS_ID, ""}, */
1067         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1068         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1069         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1070         { NV_GRAPHICS_GPU_CLASS_ID, "gpu"},
1071         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1072         { NV_TSEC_CLASS_ID, "tsec" },
1073 };
1074
1075 static struct {
1076         int module_id;
1077         const char *dev_name;
1078 } module_id_dev_name_map[] = {
1079         { NVHOST_MODULE_VI, "vi"},
1080         { NVHOST_MODULE_ISP, "isp"},
1081         { NVHOST_MODULE_MPE, "mpe"},
1082         { NVHOST_MODULE_MSENC, "msenc"},
1083         { NVHOST_MODULE_TSEC, "tsec"},
1084         { NVHOST_MODULE_GPU, "gpu"},
1085         { NVHOST_MODULE_VIC, "vic"},
1086 };
1087
1088 static const char *get_device_name_for_dev(struct platform_device *dev)
1089 {
1090         int i;
1091         /* first choice is to use the class id if specified */
1092         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1093                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1094                 if (pdata->class == class_id_dev_name_map[i].class_id)
1095                         return class_id_dev_name_map[i].dev_name;
1096         }
1097
1098         /* second choice is module name if specified */
1099         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1100                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1101                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1102                         return module_id_dev_name_map[i].dev_name;
1103         }
1104
1105         /* last choice is to just use the given dev name */
1106         return dev->name;
1107 }
1108
1109 static struct device *nvhost_client_device_create(
1110         struct platform_device *pdev, struct cdev *cdev,
1111         const char *cdev_name, int devno,
1112         const struct file_operations *ops)
1113 {
1114         struct nvhost_master *host = nvhost_get_host(pdev);
1115         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1116         const char *use_dev_name;
1117         struct device *dev;
1118         int err;
1119
1120         nvhost_dbg_fn("");
1121
1122         BUG_ON(!host);
1123
1124         cdev_init(cdev, ops);
1125         cdev->owner = THIS_MODULE;
1126
1127         err = cdev_add(cdev, devno, 1);
1128         if (err < 0) {
1129                 dev_err(&pdev->dev,
1130                         "failed to add chan %i cdev\n", pdata->index);
1131                 return NULL;
1132         }
1133         use_dev_name = get_device_name_for_dev(pdev);
1134
1135         dev = device_create(host->nvhost_class,
1136                         NULL, devno, NULL,
1137                         (pdev->id <= 0) ?
1138                         IFACE_NAME "-%s%s" :
1139                         IFACE_NAME "-%s%s.%d",
1140                         cdev_name, use_dev_name, pdev->id);
1141
1142         if (IS_ERR(dev)) {
1143                 err = PTR_ERR(dev);
1144                 dev_err(&pdev->dev,
1145                         "failed to create %s %s device for %s\n",
1146                         use_dev_name, cdev_name, pdev->name);
1147                 return NULL;
1148         }
1149
1150         return dev;
1151 }
1152
1153 int nvhost_client_user_init(struct platform_device *dev)
1154 {
1155         int err, devno;
1156         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1157         struct nvhost_channel *ch = pdata->channel;
1158
1159         BUG_ON(!ch);
1160         /* reserve 5 minor #s for <dev> and as-<dev>, ctrl-<dev>,
1161          * dbg-<dev> and prof-<dev> */
1162
1163         err = alloc_chrdev_region(&devno, 0, 5, IFACE_NAME);
1164         if (err < 0) {
1165                 dev_err(&dev->dev, "failed to allocate devno\n");
1166                 goto fail;
1167         }
1168
1169         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1170                                 "", devno, &nvhost_channelops);
1171         if (ch->node == NULL)
1172                 goto fail;
1173         if (pdata->as_ops) {
1174                 ++devno;
1175                 ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1176                                         "as-", devno, &nvhost_asops);
1177                 if (ch->as_node == NULL)
1178                         goto fail;
1179         }
1180
1181         /* module control (npn-channel based, global) interface */
1182         if (pdata->ctrl_ops) {
1183                 ++devno;
1184                 pdata->ctrl_node = nvhost_client_device_create(dev,
1185                                         &pdata->ctrl_cdev, "ctrl-",
1186                                         devno, pdata->ctrl_ops);
1187                 if (pdata->ctrl_node == NULL)
1188                         goto fail;
1189         }
1190
1191         /* module debugger interface (per channel and global) */
1192         if (pdata->dbg_ops) {
1193                 ++devno;
1194                 pdata->dbg_node = nvhost_client_device_create(dev,
1195                                         &pdata->dbg_cdev, "dbg-",
1196                                         devno, pdata->dbg_ops);
1197                 if (pdata->dbg_node == NULL)
1198                         goto fail;
1199         }
1200
1201         /* module profiler interface (per channel and global) */
1202         if (pdata->prof_ops) {
1203                 ++devno;
1204                 pdata->prof_node = nvhost_client_device_create(dev,
1205                                         &pdata->prof_cdev, "prof-",
1206                                         devno, pdata->prof_ops);
1207                 if (pdata->prof_node == NULL)
1208                         goto fail;
1209         }
1210
1211
1212
1213         return 0;
1214 fail:
1215         return err;
1216 }
1217
1218 void nvhost_client_user_deinit(struct platform_device *dev)
1219 {
1220         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1221         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1222         struct nvhost_channel *ch = pdata->channel;
1223
1224         BUG_ON(!ch);
1225
1226         if (ch->node) {
1227                 device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1228                 cdev_del(&ch->cdev);
1229         }
1230
1231         if (ch->as_node) {
1232                 device_destroy(nvhost_master->nvhost_class, ch->as_cdev.dev);
1233                 cdev_del(&ch->as_cdev);
1234         }
1235
1236         if (pdata->ctrl_node) {
1237                 device_destroy(nvhost_master->nvhost_class,
1238                                pdata->ctrl_cdev.dev);
1239                 cdev_del(&pdata->ctrl_cdev);
1240         }
1241
1242         if (pdata->dbg_node) {
1243                 device_destroy(nvhost_master->nvhost_class,
1244                                pdata->dbg_cdev.dev);
1245                 cdev_del(&pdata->dbg_cdev);
1246         }
1247
1248         if (pdata->prof_node) {
1249                 device_destroy(nvhost_master->nvhost_class,
1250                                pdata->prof_cdev.dev);
1251                 cdev_del(&pdata->prof_cdev);
1252         }
1253 }
1254
1255 int nvhost_client_device_init(struct platform_device *dev)
1256 {
1257         int err;
1258         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1259         struct nvhost_channel *ch;
1260         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1261
1262         ch = nvhost_alloc_channel(dev);
1263         if (ch == NULL)
1264                 return -ENODEV;
1265
1266         /* store the pointer to this device for channel */
1267         ch->dev = dev;
1268
1269         /* Create debugfs directory for the device */
1270         nvhost_device_debug_init(dev);
1271
1272         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1273         if (err)
1274                 goto fail;
1275
1276         err = nvhost_client_user_init(dev);
1277         if (err)
1278                 goto fail;
1279
1280         if (tickctrl_op().init_channel)
1281                 tickctrl_op().init_channel(dev);
1282
1283         err = nvhost_device_list_add(dev);
1284         if (err)
1285                 goto fail;
1286
1287         if (pdata->scaling_init)
1288                 pdata->scaling_init(dev);
1289
1290         /* reset syncpoint values for this unit */
1291         nvhost_module_busy(nvhost_master->dev);
1292         nvhost_syncpt_reset_client(dev);
1293         nvhost_module_idle(nvhost_master->dev);
1294
1295         /* Initialize dma parameters */
1296         dev->dev.dma_parms = &pdata->dma_parms;
1297         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1298
1299         dev_info(&dev->dev, "initialized\n");
1300
1301         if (pdata->slave && !pdata->slave_initialized) {
1302                 struct nvhost_device_data *slave_pdata =
1303                                         pdata->slave->dev.platform_data;
1304                 slave_pdata->master = dev;
1305                 pdata->slave->dev.parent = dev->dev.parent;
1306                 platform_device_register(pdata->slave);
1307                 pdata->slave_initialized = 1;
1308         }
1309
1310         return 0;
1311
1312 fail:
1313         /* Add clean-up */
1314         nvhost_free_channel(ch);
1315         return err;
1316 }
1317 EXPORT_SYMBOL(nvhost_client_device_init);
1318
1319 int nvhost_client_device_release(struct platform_device *dev)
1320 {
1321         struct nvhost_channel *ch;
1322         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1323
1324         ch = pdata->channel;
1325
1326         /* Release nvhost module resources */
1327         nvhost_module_deinit(dev);
1328
1329         /* Remove from nvhost device list */
1330         nvhost_device_list_remove(dev);
1331
1332         /* Release chardev and device node for user space */
1333         nvhost_client_user_deinit(dev);
1334
1335         /* Free nvhost channel */
1336         nvhost_free_channel(ch);
1337
1338         return 0;
1339 }
1340 EXPORT_SYMBOL(nvhost_client_device_release);
1341
1342 int nvhost_client_device_get_resources(struct platform_device *dev)
1343 {
1344         int i;
1345         void __iomem *regs = NULL;
1346         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1347
1348         for (i = 0; i < dev->num_resources; i++) {
1349                 struct resource *r = NULL;
1350
1351                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1352                 /* We've run out of mem resources */
1353                 if (!r)
1354                         break;
1355
1356                 regs = devm_request_and_ioremap(&dev->dev, r);
1357                 if (!regs)
1358                         goto fail;
1359
1360                 pdata->aperture[i] = regs;
1361         }
1362
1363         return 0;
1364
1365 fail:
1366         dev_err(&dev->dev, "failed to get register memory\n");
1367
1368         return -ENXIO;
1369 }
1370 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1371
1372 /* This is a simple wrapper around request_firmware that takes
1373  * 'fw_name' and if available applies a SOC relative path prefix to it.
1374  * The caller is responsible for calling release_firmware later.
1375  */
1376 const struct firmware *
1377 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1378 {
1379         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1380         const struct firmware *fw;
1381         char *fw_path = NULL;
1382         int path_len, err;
1383
1384         /* This field is NULL when calling from SYS_EXIT.
1385            Add a check here to prevent crash in request_firmware */
1386         if (!current->fs) {
1387                 BUG();
1388                 return NULL;
1389         }
1390
1391         if (!fw_name)
1392                 return NULL;
1393
1394         if (op->soc_name) {
1395                 path_len = strlen(fw_name) + strlen(op->soc_name);
1396                 path_len += 2; /* for the path separator and zero terminator*/
1397
1398                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1399                                      GFP_KERNEL);
1400                 if (!fw_path)
1401                         return NULL;
1402
1403                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1404                 fw_name = fw_path;
1405         }
1406
1407         err = request_firmware(&fw, fw_name, &dev->dev);
1408         kfree(fw_path);
1409         if (err) {
1410                 dev_err(&dev->dev, "failed to get firmware\n");
1411                 return NULL;
1412         }
1413
1414         /* note: caller must release_firmware */
1415         return fw;
1416 }