030e28cfc49f04c513a21ada72892e9905c2f269
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * Tegra Graphics Host Client Module
3  *
4  * Copyright (c) 2010-2014, NVIDIA Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/firmware.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/tegra-soc.h>
32
33 #include <trace/events/nvhost.h>
34
35 #include <linux/io.h>
36 #include <linux/string.h>
37
38 #include <linux/nvhost.h>
39 #include <linux/nvhost_ioctl.h>
40
41 #include <mach/gpufuse.h>
42
43 #include "debug.h"
44 #include "bus_client.h"
45 #include "dev.h"
46 #include "class_ids.h"
47 #include "nvhost_as.h"
48 #include "nvhost_memmgr.h"
49 #include "chip_support.h"
50 #include "nvhost_acm.h"
51
52 #include "nvhost_syncpt.h"
53 #include "nvhost_channel.h"
54 #include "nvhost_job.h"
55 #include "nvhost_hwctx.h"
56 #include "user_hwctx.h"
57 #include "nvhost_sync.h"
58
59 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
60 {
61         int err = 0;
62         struct resource *r;
63         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
64
65         r = platform_get_resource(pdata->master ? pdata->master : ndev,
66                         IORESOURCE_MEM, 0);
67         if (!r) {
68                 dev_err(&ndev->dev, "failed to get memory resource\n");
69                 return -ENODEV;
70         }
71
72         if (offset + 4 * count > resource_size(r)
73                         || (offset + 4 * count < offset))
74                 err = -EPERM;
75
76         return err;
77 }
78
79 static __iomem void *get_aperture(struct platform_device *pdev)
80 {
81         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
82
83         if (pdata->master)
84                 pdata = platform_get_drvdata(pdata->master);
85
86         return pdata->aperture[0];
87 }
88
89 void host1x_writel(struct platform_device *pdev, u32 r, u32 v)
90 {
91         void __iomem *addr = get_aperture(pdev) + r;
92         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
93         writel(v, addr);
94 }
95 EXPORT_SYMBOL_GPL(host1x_writel);
96
97 u32 host1x_readl(struct platform_device *pdev, u32 r)
98 {
99         void __iomem *addr = get_aperture(pdev) + r;
100         u32 v = readl(addr);
101         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
102         return v;
103 }
104 EXPORT_SYMBOL_GPL(host1x_readl);
105
106 int nvhost_read_module_regs(struct platform_device *ndev,
107                         u32 offset, int count, u32 *values)
108 {
109         void __iomem *p = get_aperture(ndev);
110         int err;
111
112         if (!p)
113                 return -ENODEV;
114
115         /* verify offset */
116         err = validate_reg(ndev, offset, count);
117         if (err)
118                 return err;
119
120         nvhost_module_busy(ndev);
121         p += offset;
122         while (count--) {
123                 *(values++) = readl(p);
124                 p += 4;
125         }
126         rmb();
127         nvhost_module_idle(ndev);
128
129         return 0;
130 }
131
132 int nvhost_write_module_regs(struct platform_device *ndev,
133                         u32 offset, int count, const u32 *values)
134 {
135         int err;
136         void __iomem *p = get_aperture(ndev);
137
138         if (!p)
139                 return -ENODEV;
140
141         /* verify offset */
142         err = validate_reg(ndev, offset, count);
143         if (err)
144                 return err;
145
146         nvhost_module_busy(ndev);
147         p += offset;
148         while (count--) {
149                 writel(*(values++), p);
150                 p += 4;
151         }
152         wmb();
153         nvhost_module_idle(ndev);
154
155         return 0;
156 }
157
158 struct nvhost_channel_userctx {
159         struct nvhost_channel *ch;
160         struct nvhost_hwctx *hwctx;
161         struct nvhost_job *job;
162         struct mem_mgr *memmgr;
163         u32 timeout;
164         u32 priority;
165         int clientid;
166         bool timeout_debug_dump;
167 };
168
169 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
170 {
171         struct nvhost_channel_userctx *priv = filp->private_data;
172
173         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
174
175         filp->private_data = NULL;
176
177         nvhost_module_remove_client(priv->ch->dev, priv);
178
179         if (priv->hwctx) {
180                 struct nvhost_channel *ch = priv->ch;
181                 struct nvhost_hwctx *ctx = priv->hwctx;
182
183                 mutex_lock(&ch->submitlock);
184                 if (ch->cur_ctx == ctx)
185                         ch->cur_ctx = NULL;
186                 mutex_unlock(&ch->submitlock);
187
188                 priv->hwctx->h->put(priv->hwctx);
189         }
190
191         if (priv->job)
192                 nvhost_job_put(priv->job);
193
194         nvhost_putchannel(priv->ch);
195
196         nvhost_memmgr_put_mgr(priv->memmgr);
197         kfree(priv);
198         return 0;
199 }
200
201 static int nvhost_channelopen(struct inode *inode, struct file *filp)
202 {
203         struct nvhost_channel_userctx *priv;
204         struct nvhost_channel *ch;
205         struct nvhost_device_data *pdata;
206
207         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
208         ch = nvhost_getchannel(ch, false);
209         if (!ch)
210                 return -ENOMEM;
211         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
212
213         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
214         if (!priv) {
215                 nvhost_putchannel(ch);
216                 return -ENOMEM;
217         }
218         filp->private_data = priv;
219         priv->ch = ch;
220         if (nvhost_module_add_client(ch->dev, priv))
221                 goto fail;
222
223         if (ch->ctxhandler && ch->ctxhandler->alloc) {
224                 nvhost_module_busy(ch->dev);
225                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
226                 nvhost_module_idle(ch->dev);
227                 if (!priv->hwctx)
228                         goto fail;
229         }
230         priv->priority = NVHOST_PRIORITY_MEDIUM;
231         priv->clientid = atomic_add_return(1,
232                         &nvhost_get_host(ch->dev)->clientid);
233         pdata = dev_get_drvdata(ch->dev->dev.parent);
234         priv->timeout = pdata->nvhost_timeout_default;
235         priv->timeout_debug_dump = true;
236         if (!tegra_platform_is_silicon())
237                 priv->timeout = 0;
238         return 0;
239 fail:
240         nvhost_channelrelease(inode, filp);
241         return -ENOMEM;
242 }
243
244 static int nvhost_ioctl_channel_alloc_obj_ctx(
245         struct nvhost_channel_userctx *ctx,
246         struct nvhost_alloc_obj_ctx_args *args)
247 {
248         int ret;
249
250         BUG_ON(!channel_op(ctx->ch).alloc_obj);
251         nvhost_module_busy(ctx->ch->dev);
252         ret = channel_op(ctx->ch).alloc_obj(ctx->hwctx, args);
253         nvhost_module_idle(ctx->ch->dev);
254         return ret;
255 }
256
257 static int nvhost_ioctl_channel_free_obj_ctx(
258         struct nvhost_channel_userctx *ctx,
259         struct nvhost_free_obj_ctx_args *args)
260 {
261         int ret;
262
263         BUG_ON(!channel_op(ctx->ch).free_obj);
264         nvhost_module_busy(ctx->ch->dev);
265         ret = channel_op(ctx->ch).free_obj(ctx->hwctx, args);
266         nvhost_module_idle(ctx->ch->dev);
267         return ret;
268 }
269
270 static int nvhost_ioctl_channel_alloc_gpfifo(
271         struct nvhost_channel_userctx *ctx,
272         struct nvhost_alloc_gpfifo_args *args)
273 {
274         int ret;
275
276         BUG_ON(!channel_op(ctx->ch).alloc_gpfifo);
277         nvhost_module_busy(ctx->ch->dev);
278         ret = channel_op(ctx->ch).alloc_gpfifo(ctx->hwctx, args);
279         nvhost_module_idle(ctx->ch->dev);
280         return ret;
281 }
282
283 static int nvhost_ioctl_channel_set_error_notifier(
284         struct nvhost_channel_userctx *ctx,
285         struct nvhost_set_error_notifier *args)
286 {
287         int ret;
288         BUG_ON(!channel_op(ctx->ch).set_error_notifier);
289         ret = channel_op(ctx->ch).set_error_notifier(ctx->hwctx, args);
290         return ret;
291 }
292
293 static int nvhost_ioctl_channel_submit_gpfifo(
294         struct nvhost_channel_userctx *ctx,
295         struct nvhost_submit_gpfifo_args *args)
296 {
297         void *gpfifo;
298         u32 size;
299         int ret = 0;
300
301         if (!ctx->hwctx || ctx->hwctx->has_timedout)
302                 return -ETIMEDOUT;
303
304         size = args->num_entries * sizeof(struct nvhost_gpfifo);
305
306         gpfifo = kzalloc(size, GFP_KERNEL);
307         if (!gpfifo)
308                 return -ENOMEM;
309
310         if (copy_from_user(gpfifo,
311                            (void __user *)(uintptr_t)args->gpfifo, size)) {
312                 ret = -EINVAL;
313                 goto clean_up;
314         }
315
316         BUG_ON(!channel_op(ctx->ch).submit_gpfifo);
317
318         nvhost_module_busy(ctx->ch->dev);
319         ret = channel_op(ctx->ch).submit_gpfifo(ctx->hwctx, gpfifo,
320                         args->num_entries, &args->fence, args->flags);
321         nvhost_module_idle(ctx->ch->dev);
322 clean_up:
323         kfree(gpfifo);
324         return ret;
325 }
326
327 static int nvhost_ioctl_channel_wait(
328         struct nvhost_channel_userctx *ctx,
329         struct nvhost_wait_args *args)
330 {
331         int ret;
332
333         BUG_ON(!channel_op(ctx->ch).wait);
334         nvhost_module_busy(ctx->ch->dev);
335         ret = channel_op(ctx->ch).wait(ctx->hwctx, args);
336         nvhost_module_idle(ctx->ch->dev);
337         return ret;
338 }
339
340 static int nvhost_ioctl_channel_set_priority(
341         struct nvhost_channel_userctx *ctx,
342         struct nvhost_set_priority_args *args)
343 {
344         int ret = 0;
345         if (channel_op(ctx->ch).set_priority) {
346                 nvhost_module_busy(ctx->ch->dev);
347                 ret = channel_op(ctx->ch).set_priority(ctx->hwctx, args);
348                 nvhost_module_idle(ctx->ch->dev);
349         }
350         return ret;
351 }
352
353 static int nvhost_ioctl_channel_zcull_bind(
354         struct nvhost_channel_userctx *ctx,
355         struct nvhost_zcull_bind_args *args)
356 {
357         int ret;
358
359         BUG_ON(!channel_zcull_op(ctx->ch).bind);
360         nvhost_module_busy(ctx->ch->dev);
361         ret = channel_zcull_op(ctx->ch).bind(ctx->hwctx, args);
362         nvhost_module_idle(ctx->ch->dev);
363         return ret;
364 }
365
366 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
367                 struct nvhost_submit_args *args)
368 {
369         struct nvhost_job *job;
370         int num_cmdbufs = args->num_cmdbufs;
371         int num_relocs = args->num_relocs;
372         int num_waitchks = args->num_waitchks;
373         int num_syncpt_incrs = args->num_syncpt_incrs;
374         struct nvhost_cmdbuf __user *cmdbufs =
375                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
376         struct nvhost_cmdbuf __user *cmdbuf_exts =
377                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbuf_exts;
378         struct nvhost_reloc __user *relocs =
379                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
380         struct nvhost_reloc_shift __user *reloc_shifts =
381                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
382         struct nvhost_waitchk __user *waitchks =
383                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
384         struct nvhost_syncpt_incr __user *syncpt_incrs =
385                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
386         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
387         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
388         u32 __user *class_ids = (u32 *)(uintptr_t)args->class_ids;
389
390         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
391         u32 *local_waitbases = NULL, *local_class_ids = NULL;
392         int err, i, hwctx_syncpt_idx = -1;
393
394         if (num_syncpt_incrs > host->info.nb_pts)
395                 return -EINVAL;
396
397         job = nvhost_job_alloc(ctx->ch,
398                         ctx->hwctx,
399                         num_cmdbufs,
400                         num_relocs,
401                         num_waitchks,
402                         num_syncpt_incrs,
403                         ctx->memmgr);
404         if (!job)
405                 return -ENOMEM;
406
407         job->num_relocs = args->num_relocs;
408         job->num_waitchk = args->num_waitchks;
409         job->num_syncpts = args->num_syncpt_incrs;
410         job->priority = ctx->priority;
411         job->clientid = ctx->clientid;
412
413         /* mass copy class_ids */
414         if (args->class_ids) {
415                 local_class_ids = kzalloc(sizeof(u32) * num_cmdbufs,
416                         GFP_KERNEL);
417                 if (!local_class_ids) {
418                         err = -ENOMEM;
419                         goto fail;
420                 }
421                 err = copy_from_user(local_class_ids, class_ids,
422                         sizeof(u32) * num_cmdbufs);
423                 if (err) {
424                         err = -EINVAL;
425                         goto fail;
426                 }
427         }
428
429         for (i = 0; i < num_cmdbufs; ++i) {
430                 struct nvhost_cmdbuf cmdbuf;
431                 struct nvhost_cmdbuf_ext cmdbuf_ext;
432                 u32 class_id = class_ids ? local_class_ids[i] : 0;
433
434                 err = copy_from_user(&cmdbuf, cmdbufs + i, sizeof(cmdbuf));
435                 if (err)
436                         goto fail;
437
438                 cmdbuf_ext.pre_fence = -1;
439                 if (cmdbuf_exts)
440                         err = copy_from_user(&cmdbuf_ext,
441                                         cmdbuf_exts + i, sizeof(cmdbuf_ext));
442                 if (err)
443                         cmdbuf_ext.pre_fence = -1;
444
445                 nvhost_job_add_gather(job, cmdbuf.mem, cmdbuf.words,
446                                       cmdbuf.offset, class_id,
447                                       cmdbuf_ext.pre_fence);
448         }
449
450         kfree(local_class_ids);
451         local_class_ids = NULL;
452
453         err = copy_from_user(job->relocarray,
454                         relocs, sizeof(*relocs) * num_relocs);
455         if (err)
456                 goto fail;
457
458         err = copy_from_user(job->relocshiftarray,
459                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
460         if (err)
461                 goto fail;
462
463         err = copy_from_user(job->waitchk,
464                         waitchks, sizeof(*waitchks) * num_waitchks);
465         if (err)
466                 goto fail;
467
468         /* mass copy waitbases */
469         if (args->waitbases) {
470                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
471                         GFP_KERNEL);
472                 if (!local_waitbases) {
473                         err = -ENOMEM;
474                         goto fail;
475                 }
476
477                 err = copy_from_user(local_waitbases, waitbases,
478                         sizeof(u32) * num_syncpt_incrs);
479                 if (err) {
480                         err = -EINVAL;
481                         goto fail;
482                 }
483         }
484
485         /* set valid id for hwctx_syncpt_idx if hwctx does not provide one */
486         if (!ctx->hwctx || ctx->hwctx->h->syncpt == NVSYNCPT_INVALID)
487                 hwctx_syncpt_idx = 0;
488
489         /*
490          * Go through each syncpoint from userspace. Here we:
491          * - Copy syncpoint information
492          * - Validate each syncpoint
493          * - Determine waitbase for each syncpoint
494          * - Determine the index of hwctx syncpoint in the table
495          */
496
497         for (i = 0; i < num_syncpt_incrs; ++i) {
498                 u32 waitbase;
499                 struct nvhost_syncpt_incr sp;
500
501                 /* Copy */
502                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
503                 if (err)
504                         goto fail;
505
506                 /* Validate */
507                 if (sp.syncpt_id > host->info.nb_pts) {
508                         err = -EINVAL;
509                         goto fail;
510                 }
511
512                 /* Determine waitbase */
513                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
514                         waitbase = local_waitbases[i];
515                 else
516                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
517                                 sp.syncpt_id);
518
519                 /* Store */
520                 job->sp[i].id = sp.syncpt_id;
521                 job->sp[i].incrs = sp.syncpt_incrs;
522                 job->sp[i].waitbase = waitbase;
523
524                 /* Find hwctx syncpoint */
525                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
526                         hwctx_syncpt_idx = i;
527         }
528
529         /* not needed anymore */
530         kfree(local_waitbases);
531         local_waitbases = NULL;
532
533         /* Is hwctx_syncpt_idx valid? */
534         if (hwctx_syncpt_idx == -1) {
535                 err = -EINVAL;
536                 goto fail;
537         }
538
539         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
540
541         trace_nvhost_channel_submit(ctx->ch->dev->name,
542                 job->num_gathers, job->num_relocs, job->num_waitchk,
543                 job->sp[job->hwctx_syncpt_idx].id,
544                 job->sp[job->hwctx_syncpt_idx].incrs);
545
546         nvhost_module_busy(ctx->ch->dev);
547         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
548         nvhost_module_idle(ctx->ch->dev);
549         if (err)
550                 goto fail;
551
552         if (args->timeout)
553                 job->timeout = min(ctx->timeout, args->timeout);
554         else
555                 job->timeout = ctx->timeout;
556         job->timeout_debug_dump = ctx->timeout_debug_dump;
557
558         err = nvhost_channel_submit(job);
559         if (err)
560                 goto fail_submit;
561
562         /* Deliver multiple fences back to the userspace */
563         if (fences)
564                 for (i = 0; i < num_syncpt_incrs; ++i) {
565                         u32 fence = job->sp[i].fence;
566                         err = copy_to_user(fences, &fence, sizeof(u32));
567                         if (err)
568                                 break;
569                         fences++;
570                 }
571
572         /* Deliver the fence using the old mechanism _only_ if a single
573          * syncpoint is used. */
574
575         if (args->flags & BIT(NVHOST_SUBMIT_FLAG_SYNC_FENCE_FD)) {
576                 struct nvhost_ctrl_sync_fence_info pts[num_syncpt_incrs];
577
578                 for (i = 0; i < num_syncpt_incrs; i++) {
579                         pts[i].id = job->sp[i].id;
580                         pts[i].thresh = job->sp[i].fence;
581                 }
582
583                 err = nvhost_sync_create_fence(
584                                 &nvhost_get_host(ctx->ch->dev)->syncpt,
585                                 pts, num_syncpt_incrs, "fence", &args->fence);
586                 if (err)
587                         goto fail;
588         } else if (num_syncpt_incrs == 1)
589                 args->fence = job->sp[job->hwctx_syncpt_idx].fence;
590         else
591                 args->fence = 0;
592
593         nvhost_job_put(job);
594
595         return 0;
596
597 fail_submit:
598         nvhost_job_unpin(job);
599 fail:
600         nvhost_job_put(job);
601         kfree(local_class_ids);
602         kfree(local_waitbases);
603         return err;
604 }
605
606 static int nvhost_ioctl_channel_set_ctxswitch(
607                 struct nvhost_channel_userctx *ctx,
608                 struct nvhost_set_ctxswitch_args *args)
609 {
610         struct nvhost_cmdbuf cmdbuf_save;
611         struct nvhost_cmdbuf cmdbuf_restore;
612         struct nvhost_syncpt_incr save_incr, restore_incr;
613         u32 save_waitbase, restore_waitbase;
614         struct nvhost_reloc reloc;
615         struct nvhost_hwctx_handler *ctxhandler = NULL;
616         struct nvhost_hwctx *nhwctx = NULL;
617         struct user_hwctx *hwctx;
618         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
619         int err;
620
621         /* Only channels with context support */
622         if (!ctx->hwctx)
623                 return -EFAULT;
624
625         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
626         if (args->num_cmdbufs_save != 1
627                         || args->num_cmdbufs_restore != 1
628                         || args->num_save_incrs != 1
629                         || args->num_restore_incrs != 1
630                         || args->num_relocs != 1)
631                 return -EINVAL;
632
633         err = copy_from_user(&cmdbuf_save,
634                         (void *)(uintptr_t)args->cmdbuf_save,
635                         sizeof(cmdbuf_save));
636         if (err)
637                 goto fail;
638
639         err = copy_from_user(&cmdbuf_restore,
640                         (void *)(uintptr_t)args->cmdbuf_restore,
641                         sizeof(cmdbuf_restore));
642         if (err)
643                 goto fail;
644
645         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
646                         sizeof(reloc));
647         if (err)
648                 goto fail;
649
650         err = copy_from_user(&save_incr,
651                         (void *)(uintptr_t)args->save_incrs,
652                         sizeof(save_incr));
653         if (err)
654                 goto fail;
655         err = copy_from_user(&save_waitbase,
656                         (void *)(uintptr_t)args->save_waitbases,
657                         sizeof(save_waitbase));
658
659         err = copy_from_user(&restore_incr,
660                         (void *)(uintptr_t)args->restore_incrs,
661                         sizeof(restore_incr));
662         if (err)
663                 goto fail;
664         err = copy_from_user(&restore_waitbase,
665                         (void *)(uintptr_t)args->restore_waitbases,
666                         sizeof(restore_waitbase));
667
668         if (save_incr.syncpt_id != pdata->syncpts[0]
669                         || restore_incr.syncpt_id != pdata->syncpts[0]
670                         || save_waitbase != pdata->waitbases[0]
671                         || restore_waitbase != pdata->waitbases[0]) {
672                 err = -EINVAL;
673                 goto fail;
674         }
675         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
676                         save_waitbase, ctx->ch);
677         if (!ctxhandler) {
678                 err = -ENOMEM;
679                 goto fail;
680         }
681
682         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
683         if (!nhwctx) {
684                 err = -ENOMEM;
685                 goto fail_hwctx;
686         }
687         hwctx = to_user_hwctx(nhwctx);
688
689         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
690                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
691                         cmdbuf_restore.mem, cmdbuf_restore.offset,
692                         cmdbuf_restore.words,
693                         pdata->syncpts[0], pdata->waitbases[0],
694                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
695
696         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
697         if (!nhwctx->memmgr)
698                 goto fail_set_restore;
699
700         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
701                         cmdbuf_restore.offset, cmdbuf_restore.words);
702         if (err)
703                 goto fail_set_restore;
704
705         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
706                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
707         if (err)
708                 goto fail_set_save;
709
710         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
711         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
712
713         /* Free old context */
714         ctx->hwctx->h->put(ctx->hwctx);
715         ctx->hwctx = nhwctx;
716
717         return 0;
718
719 fail_set_save:
720 fail_set_restore:
721         ctxhandler->put(&hwctx->hwctx);
722 fail_hwctx:
723         user_ctxhandler_free(ctxhandler);
724 fail:
725         return err;
726 }
727
728 #if defined(CONFIG_GK20A_CYCLE_STATS)
729 static int nvhost_ioctl_channel_cycle_stats(
730         struct nvhost_channel_userctx *ctx,
731         struct nvhost_cycle_stats_args *args)
732 {
733         int ret;
734         BUG_ON(!channel_op(ctx->ch).cycle_stats);
735         ret = channel_op(ctx->ch).cycle_stats(ctx->hwctx, args);
736         return ret;
737 }
738 #endif
739
740 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
741         struct nvhost_read_3d_reg_args *args)
742 {
743         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
744                         args->offset, &args->value);
745 }
746
747 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
748 {
749         int i;
750         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
751
752         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
753                 if (pdata->clocks[i].moduleid == moduleid)
754                         return i;
755         }
756
757         /* Old user space is sending a random number in args. Return clock
758          * zero in these cases. */
759         return 0;
760 }
761
762 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
763         struct nvhost_clk_rate_args *arg)
764 {
765         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
766                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
767         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
768                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
769         int index = moduleid ?
770                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
771
772         return nvhost_module_set_rate(ctx->ch->dev,
773                         ctx, arg->rate, index, attr);
774 }
775
776 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
777         u32 moduleid, u32 *rate)
778 {
779         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
780
781         return nvhost_module_get_rate(ctx->ch->dev,
782                         (unsigned long *)rate, index);
783 }
784
785 static int nvhost_ioctl_channel_module_regrdwr(
786         struct nvhost_channel_userctx *ctx,
787         struct nvhost_ctrl_module_regrdwr_args *args)
788 {
789         u32 num_offsets = args->num_offsets;
790         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
791         u32 __user *values = (u32 *)(uintptr_t)args->values;
792         u32 vals[64];
793         struct platform_device *ndev;
794
795         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
796                 args->num_offsets, args->write);
797
798         /* Check that there is something to read and that block size is
799          * u32 aligned */
800         if (num_offsets == 0 || args->block_size & 3)
801                 return -EINVAL;
802
803         ndev = ctx->ch->dev;
804
805         while (num_offsets--) {
806                 int err;
807                 u32 offs;
808                 int remaining = args->block_size >> 2;
809
810                 if (get_user(offs, offsets))
811                         return -EFAULT;
812
813                 offsets++;
814                 while (remaining) {
815                         int batch = min(remaining, 64);
816                         if (args->write) {
817                                 if (copy_from_user(vals, values,
818                                                 batch * sizeof(u32)))
819                                         return -EFAULT;
820
821                                 err = nvhost_write_module_regs(ndev,
822                                         offs, batch, vals);
823                                 if (err)
824                                         return err;
825                         } else {
826                                 err = nvhost_read_module_regs(ndev,
827                                                 offs, batch, vals);
828                                 if (err)
829                                         return err;
830
831                                 if (copy_to_user(values, vals,
832                                                 batch * sizeof(u32)))
833                                         return -EFAULT;
834                         }
835
836                         remaining -= batch;
837                         offs += batch * sizeof(u32);
838                         values += batch;
839                 }
840         }
841
842         return 0;
843 }
844
845 static u32 create_mask(u32 *words, int num)
846 {
847         int i;
848         u32 word = 0;
849         for (i = 0; i < num && words[i] && words[i] < 32; i++)
850                 word |= BIT(words[i]);
851
852         return word;
853 }
854
855 static long nvhost_channelctl(struct file *filp,
856         unsigned int cmd, unsigned long arg)
857 {
858         struct nvhost_channel_userctx *priv = filp->private_data;
859         struct device *dev = &priv->ch->dev->dev;
860         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
861         int err = 0;
862
863         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
864                 (_IOC_NR(cmd) == 0) ||
865                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
866                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
867                 return -EFAULT;
868
869         if (_IOC_DIR(cmd) & _IOC_WRITE) {
870                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
871                         return -EFAULT;
872         }
873
874         switch (cmd) {
875         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
876         {
877                 struct nvhost_device_data *pdata = \
878                         platform_get_drvdata(priv->ch->dev);
879                 ((struct nvhost_get_param_args *)buf)->value =
880                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
881                 break;
882         }
883         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
884         {
885                 struct nvhost_device_data *pdata = \
886                         platform_get_drvdata(priv->ch->dev);
887                 struct nvhost_get_param_arg *arg =
888                         (struct nvhost_get_param_arg *)buf;
889                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
890                                 || !pdata->syncpts[arg->param])
891                         return -EINVAL;
892                 arg->value = pdata->syncpts[arg->param];
893                 break;
894         }
895         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
896         {
897                 struct nvhost_device_data *pdata = \
898                         platform_get_drvdata(priv->ch->dev);
899                 ((struct nvhost_get_param_args *)buf)->value =
900                         create_mask(pdata->waitbases,
901                                         NVHOST_MODULE_MAX_WAITBASES);
902                 break;
903         }
904         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
905         {
906                 struct nvhost_device_data *pdata = \
907                         platform_get_drvdata(priv->ch->dev);
908                 struct nvhost_get_param_arg *arg =
909                         (struct nvhost_get_param_arg *)buf;
910                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
911                                 || !pdata->waitbases[arg->param])
912                         return -EINVAL;
913                 arg->value = pdata->waitbases[arg->param];
914                 break;
915         }
916         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
917         {
918                 struct nvhost_device_data *pdata = \
919                         platform_get_drvdata(priv->ch->dev);
920                 ((struct nvhost_get_param_args *)buf)->value =
921                         create_mask(pdata->modulemutexes,
922                                         NVHOST_MODULE_MAX_MODMUTEXES);
923                 break;
924         }
925         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
926         {
927                 struct nvhost_device_data *pdata = \
928                         platform_get_drvdata(priv->ch->dev);
929                 struct nvhost_get_param_arg *arg =
930                         (struct nvhost_get_param_arg *)buf;
931                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
932                                 || !pdata->modulemutexes[arg->param])
933                         return -EINVAL;
934                 arg->value = pdata->modulemutexes[arg->param];
935                 break;
936         }
937         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
938         {
939                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
940                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
941
942                 if (IS_ERR(new_client)) {
943                         err = PTR_ERR(new_client);
944                         break;
945                 }
946                 if (priv->memmgr)
947                         nvhost_memmgr_put_mgr(priv->memmgr);
948
949                 priv->memmgr = new_client;
950
951                 if (priv->hwctx)
952                         priv->hwctx->memmgr = new_client;
953
954                 break;
955         }
956         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
957                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
958                 break;
959         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
960                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
961                 break;
962         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
963                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
964                 break;
965         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
966                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
967                 break;
968         case NVHOST_IOCTL_CHANNEL_WAIT:
969                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
970                 break;
971         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
972                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
973                 break;
974         case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
975                 err = nvhost_ioctl_channel_set_error_notifier(priv,
976                         (void *)buf);
977                 break;
978 #if defined(CONFIG_GK20A_CYCLE_STATS)
979         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
980                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
981                 break;
982 #endif
983         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
984                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
985                 break;
986         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
987         {
988                 struct nvhost_clk_rate_args *arg =
989                                 (struct nvhost_clk_rate_args *)buf;
990
991                 err = nvhost_ioctl_channel_get_rate(priv,
992                                 arg->moduleid, &arg->rate);
993                 break;
994         }
995         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
996         {
997                 struct nvhost_clk_rate_args *arg =
998                                 (struct nvhost_clk_rate_args *)buf;
999
1000                 err = nvhost_ioctl_channel_set_rate(priv, arg);
1001                 break;
1002         }
1003         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1004         {
1005                 u32 timeout =
1006                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1007
1008                 priv->timeout = timeout;
1009                 dev_dbg(&priv->ch->dev->dev,
1010                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1011                         __func__, priv->timeout, priv);
1012                 if (priv->hwctx)
1013                         priv->hwctx->timeout_ms_max = timeout;
1014                 break;
1015         }
1016         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1017                 ((struct nvhost_get_param_args *)buf)->value =
1018                                 priv->hwctx->has_timedout;
1019                 break;
1020         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1021                 nvhost_ioctl_channel_set_priority(priv, (void *)buf);
1022                 priv->priority =
1023                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
1024                 break;
1025         case NVHOST32_IOCTL_CHANNEL_MODULE_REGRDWR:
1026         {
1027                 struct nvhost32_ctrl_module_regrdwr_args *args32 =
1028                         (struct nvhost32_ctrl_module_regrdwr_args *)buf;
1029                 struct nvhost_ctrl_module_regrdwr_args args;
1030                 args.id = args32->id;
1031                 args.num_offsets = args32->num_offsets;
1032                 args.block_size = args32->block_size;
1033                 args.offsets = args32->offsets;
1034                 args.values = args32->values;
1035                 args.write = args32->write;
1036                 err = nvhost_ioctl_channel_module_regrdwr(priv, &args);
1037                 break;
1038         }
1039         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1040                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1041                 break;
1042         case NVHOST32_IOCTL_CHANNEL_SUBMIT:
1043         {
1044                 struct nvhost32_submit_args *args32 = (void *)buf;
1045                 struct nvhost_submit_args args;
1046
1047                 memset(&args, 0, sizeof(args));
1048                 args.submit_version = args32->submit_version;
1049                 args.num_syncpt_incrs = args32->num_syncpt_incrs;
1050                 args.num_cmdbufs = args32->num_cmdbufs;
1051                 args.num_relocs = args32->num_relocs;
1052                 args.num_waitchks = args32->num_waitchks;
1053                 args.timeout = args32->timeout;
1054                 args.syncpt_incrs = args32->syncpt_incrs;
1055                 args.fence = args32->fence;
1056
1057                 args.cmdbufs = args32->cmdbufs;
1058                 args.relocs = args32->relocs;
1059                 args.reloc_shifts = args32->reloc_shifts;
1060                 args.waitchks = args32->waitchks;
1061                 args.waitbases = args32->waitbases;
1062                 args.class_ids = args32->class_ids;
1063                 args.fences = args32->fences;
1064
1065                 err = nvhost_ioctl_channel_submit(priv, &args);
1066                 args32->fence = args.fence;
1067                 break;
1068         }
1069         case NVHOST_IOCTL_CHANNEL_SUBMIT:
1070                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1071                 break;
1072         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1073         {
1074                 u32 timeout =
1075                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1076                 bool timeout_debug_dump = !((u32)
1077                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1078                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1079                 priv->timeout = timeout;
1080                 priv->timeout_debug_dump = timeout_debug_dump;
1081                 dev_dbg(&priv->ch->dev->dev,
1082                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1083                         __func__, priv->timeout, priv);
1084                 if (priv->hwctx) {
1085                         priv->hwctx->timeout_ms_max = timeout;
1086                         priv->hwctx->timeout_debug_dump = timeout_debug_dump;
1087                 }
1088                 break;
1089         }
1090         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
1091                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
1092                 break;
1093         default:
1094                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
1095                 err = -ENOTTY;
1096                 break;
1097         }
1098
1099         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1100                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1101
1102         return err;
1103 }
1104
1105 static const struct file_operations nvhost_channelops = {
1106         .owner = THIS_MODULE,
1107         .release = nvhost_channelrelease,
1108         .open = nvhost_channelopen,
1109 #ifdef CONFIG_COMPAT
1110         .compat_ioctl = nvhost_channelctl,
1111 #endif
1112         .unlocked_ioctl = nvhost_channelctl
1113 };
1114
1115 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
1116 {
1117         struct nvhost_channel_userctx *userctx;
1118         struct file *f = fget(fd);
1119         if (!f)
1120                 return 0;
1121
1122         if (f->f_op != &nvhost_channelops) {
1123                 fput(f);
1124                 return 0;
1125         }
1126
1127         userctx = (struct nvhost_channel_userctx *)f->private_data;
1128         fput(f);
1129         return userctx->hwctx;
1130 }
1131
1132
1133 static const struct file_operations nvhost_asops = {
1134         .owner = THIS_MODULE,
1135         .release = nvhost_as_dev_release,
1136         .open = nvhost_as_dev_open,
1137 #ifdef CONFIG_COMPAT
1138         .compat_ioctl = nvhost_as_dev_ctl,
1139 #endif
1140         .unlocked_ioctl = nvhost_as_dev_ctl,
1141 };
1142
1143 static struct {
1144         int class_id;
1145         const char *dev_name;
1146 } class_id_dev_name_map[] = {
1147         /*      { NV_HOST1X_CLASS_ID, ""}, */
1148         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1149         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1150         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1151         { NV_GRAPHICS_GPU_CLASS_ID, "gpu"},
1152         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1153         { NV_TSEC_CLASS_ID, "tsec" },
1154 };
1155
1156 static struct {
1157         int module_id;
1158         const char *dev_name;
1159 } module_id_dev_name_map[] = {
1160         { NVHOST_MODULE_VI, "vi"},
1161         { NVHOST_MODULE_ISP, "isp"},
1162         { NVHOST_MODULE_MPE, "mpe"},
1163         { NVHOST_MODULE_MSENC, "msenc"},
1164         { NVHOST_MODULE_TSEC, "tsec"},
1165         { NVHOST_MODULE_GPU, "gpu"},
1166         { NVHOST_MODULE_VIC, "vic"},
1167 };
1168
1169 static const char *get_device_name_for_dev(struct platform_device *dev)
1170 {
1171         int i;
1172         /* first choice is to use the class id if specified */
1173         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1174                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1175                 if (pdata->class == class_id_dev_name_map[i].class_id)
1176                         return class_id_dev_name_map[i].dev_name;
1177         }
1178
1179         /* second choice is module name if specified */
1180         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1181                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1182                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1183                         return module_id_dev_name_map[i].dev_name;
1184         }
1185
1186         /* last choice is to just use the given dev name */
1187         return dev->name;
1188 }
1189
1190 static struct device *nvhost_client_device_create(
1191         struct platform_device *pdev, struct cdev *cdev,
1192         const char *cdev_name, int devno,
1193         const struct file_operations *ops)
1194 {
1195         struct nvhost_master *host = nvhost_get_host(pdev);
1196         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1197         const char *use_dev_name;
1198         struct device *dev;
1199         int err;
1200
1201         nvhost_dbg_fn("");
1202
1203         BUG_ON(!host);
1204
1205         cdev_init(cdev, ops);
1206         cdev->owner = THIS_MODULE;
1207
1208         err = cdev_add(cdev, devno, 1);
1209         if (err < 0) {
1210                 dev_err(&pdev->dev,
1211                         "failed to add chan %i cdev\n", pdata->index);
1212                 return NULL;
1213         }
1214         use_dev_name = get_device_name_for_dev(pdev);
1215
1216         dev = device_create(host->nvhost_class,
1217                         NULL, devno, NULL,
1218                         (pdev->id <= 0) ?
1219                         IFACE_NAME "-%s%s" :
1220                         IFACE_NAME "-%s%s.%d",
1221                         cdev_name, use_dev_name, pdev->id);
1222
1223         if (IS_ERR(dev)) {
1224                 err = PTR_ERR(dev);
1225                 dev_err(&pdev->dev,
1226                         "failed to create %s %s device for %s\n",
1227                         use_dev_name, cdev_name, pdev->name);
1228                 return NULL;
1229         }
1230
1231         return dev;
1232 }
1233
1234 int nvhost_client_user_init(struct platform_device *dev)
1235 {
1236         int err, devno;
1237         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1238         struct nvhost_channel *ch = pdata->channel;
1239
1240         BUG_ON(!ch);
1241         /* reserve 5 minor #s for <dev> and as-<dev>, ctrl-<dev>,
1242          * dbg-<dev> and prof-<dev> */
1243
1244         err = alloc_chrdev_region(&devno, 0, 5, IFACE_NAME);
1245         if (err < 0) {
1246                 dev_err(&dev->dev, "failed to allocate devno\n");
1247                 goto fail;
1248         }
1249
1250         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1251                                 "", devno, &nvhost_channelops);
1252         if (ch->node == NULL)
1253                 goto fail;
1254         if (pdata->as_ops) {
1255                 ++devno;
1256                 ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1257                                         "as-", devno, &nvhost_asops);
1258                 if (ch->as_node == NULL)
1259                         goto fail;
1260         }
1261
1262         /* module control (npn-channel based, global) interface */
1263         if (pdata->ctrl_ops) {
1264                 ++devno;
1265                 pdata->ctrl_node = nvhost_client_device_create(dev,
1266                                         &pdata->ctrl_cdev, "ctrl-",
1267                                         devno, pdata->ctrl_ops);
1268                 if (pdata->ctrl_node == NULL)
1269                         goto fail;
1270         }
1271
1272         /* module debugger interface (per channel and global) */
1273         if (pdata->dbg_ops) {
1274                 ++devno;
1275                 pdata->dbg_node = nvhost_client_device_create(dev,
1276                                         &pdata->dbg_cdev, "dbg-",
1277                                         devno, pdata->dbg_ops);
1278                 if (pdata->dbg_node == NULL)
1279                         goto fail;
1280         }
1281
1282         /* module profiler interface (per channel and global) */
1283         if (pdata->prof_ops) {
1284                 ++devno;
1285                 pdata->prof_node = nvhost_client_device_create(dev,
1286                                         &pdata->prof_cdev, "prof-",
1287                                         devno, pdata->prof_ops);
1288                 if (pdata->prof_node == NULL)
1289                         goto fail;
1290         }
1291
1292
1293
1294         return 0;
1295 fail:
1296         return err;
1297 }
1298
1299 void nvhost_client_user_deinit(struct platform_device *dev)
1300 {
1301         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1302         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1303         struct nvhost_channel *ch = pdata->channel;
1304
1305         BUG_ON(!ch);
1306
1307         if (ch->node) {
1308                 device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1309                 cdev_del(&ch->cdev);
1310         }
1311
1312         if (ch->as_node) {
1313                 device_destroy(nvhost_master->nvhost_class, ch->as_cdev.dev);
1314                 cdev_del(&ch->as_cdev);
1315         }
1316
1317         if (pdata->ctrl_node) {
1318                 device_destroy(nvhost_master->nvhost_class,
1319                                pdata->ctrl_cdev.dev);
1320                 cdev_del(&pdata->ctrl_cdev);
1321         }
1322
1323         if (pdata->dbg_node) {
1324                 device_destroy(nvhost_master->nvhost_class,
1325                                pdata->dbg_cdev.dev);
1326                 cdev_del(&pdata->dbg_cdev);
1327         }
1328
1329         if (pdata->prof_node) {
1330                 device_destroy(nvhost_master->nvhost_class,
1331                                pdata->prof_cdev.dev);
1332                 cdev_del(&pdata->prof_cdev);
1333         }
1334 }
1335
1336 int nvhost_client_device_init(struct platform_device *dev)
1337 {
1338         int err;
1339         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1340         struct nvhost_channel *ch;
1341         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1342
1343         ch = nvhost_alloc_channel(dev);
1344         if (ch == NULL)
1345                 return -ENODEV;
1346
1347         /* store the pointer to this device for channel */
1348         ch->dev = dev;
1349
1350         /* Create debugfs directory for the device */
1351         nvhost_device_debug_init(dev);
1352
1353         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1354         if (err)
1355                 goto fail1;
1356
1357         err = nvhost_client_user_init(dev);
1358         if (err)
1359                 goto fail;
1360
1361         if (tickctrl_op().init_channel)
1362                 tickctrl_op().init_channel(dev);
1363
1364         err = nvhost_device_list_add(dev);
1365         if (err)
1366                 goto fail;
1367
1368         if (pdata->scaling_init)
1369                 pdata->scaling_init(dev);
1370
1371         /* reset syncpoint values for this unit */
1372         nvhost_module_busy(nvhost_master->dev);
1373         nvhost_syncpt_reset_client(dev);
1374         nvhost_module_idle(nvhost_master->dev);
1375
1376         /* Initialize dma parameters */
1377         dev->dev.dma_parms = &pdata->dma_parms;
1378         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1379
1380         dev_info(&dev->dev, "initialized\n");
1381
1382         if (pdata->slave && !pdata->slave_initialized) {
1383                 struct nvhost_device_data *slave_pdata =
1384                                         pdata->slave->dev.platform_data;
1385                 slave_pdata->master = dev;
1386                 pdata->slave->dev.parent = dev->dev.parent;
1387                 platform_device_register(pdata->slave);
1388                 pdata->slave_initialized = 1;
1389         }
1390
1391         return 0;
1392
1393 fail:
1394         /* Add clean-up */
1395         dev_err(&dev->dev, "failed to init client device\n");
1396         nvhost_client_user_deinit(dev);
1397 fail1:
1398         nvhost_device_debug_deinit(dev);
1399         nvhost_free_channel(ch);
1400         return err;
1401 }
1402 EXPORT_SYMBOL(nvhost_client_device_init);
1403
1404 int nvhost_client_device_release(struct platform_device *dev)
1405 {
1406         struct nvhost_channel *ch;
1407         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1408
1409         ch = pdata->channel;
1410
1411         /* Release nvhost module resources */
1412         nvhost_module_deinit(dev);
1413
1414         /* Remove from nvhost device list */
1415         nvhost_device_list_remove(dev);
1416
1417         /* Release chardev and device node for user space */
1418         nvhost_client_user_deinit(dev);
1419
1420         /* Remove debugFS */
1421         nvhost_device_debug_deinit(dev);
1422
1423         /* Free nvhost channel */
1424         nvhost_free_channel(ch);
1425
1426         return 0;
1427 }
1428 EXPORT_SYMBOL(nvhost_client_device_release);
1429
1430 int nvhost_client_device_get_resources(struct platform_device *dev)
1431 {
1432         int i;
1433         void __iomem *regs = NULL;
1434         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1435
1436         for (i = 0; i < dev->num_resources; i++) {
1437                 struct resource *r = NULL;
1438
1439                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1440                 /* We've run out of mem resources */
1441                 if (!r)
1442                         break;
1443
1444                 regs = devm_request_and_ioremap(&dev->dev, r);
1445                 if (!regs)
1446                         goto fail;
1447
1448                 pdata->aperture[i] = regs;
1449         }
1450
1451         return 0;
1452
1453 fail:
1454         dev_err(&dev->dev, "failed to get register memory\n");
1455
1456         return -ENXIO;
1457 }
1458 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1459
1460 /* This is a simple wrapper around request_firmware that takes
1461  * 'fw_name' and if available applies a SOC relative path prefix to it.
1462  * The caller is responsible for calling release_firmware later.
1463  */
1464 const struct firmware *
1465 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1466 {
1467         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1468         const struct firmware *fw;
1469         char *fw_path = NULL;
1470         int path_len, err;
1471
1472         /* This field is NULL when calling from SYS_EXIT.
1473            Add a check here to prevent crash in request_firmware */
1474         if (!current->fs) {
1475                 BUG();
1476                 return NULL;
1477         }
1478
1479         if (!fw_name)
1480                 return NULL;
1481
1482         if (op->soc_name) {
1483                 path_len = strlen(fw_name) + strlen(op->soc_name);
1484                 path_len += 2; /* for the path separator and zero terminator*/
1485
1486                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1487                                      GFP_KERNEL);
1488                 if (!fw_path)
1489                         return NULL;
1490
1491                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1492                 fw_name = fw_path;
1493         }
1494
1495         err = request_firmware(&fw, fw_name, &dev->dev);
1496         kfree(fw_path);
1497         if (err) {
1498                 dev_err(&dev->dev, "failed to get firmware\n");
1499                 return NULL;
1500         }
1501
1502         /* note: caller must release_firmware */
1503         return fw;
1504 }