video: tegra: host: Prevent the race between channel open and close
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * Tegra Graphics Host Client Module
3  *
4  * Copyright (c) 2010-2016, NVIDIA Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/firmware.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/tegra-soc.h>
32 #include <linux/anon_inodes.h>
33
34 #include <trace/events/nvhost.h>
35
36 #include <linux/io.h>
37 #include <linux/string.h>
38
39 #include <linux/nvhost.h>
40 #include <linux/nvhost_ioctl.h>
41
42 #include <mach/gpufuse.h>
43
44 #include "debug.h"
45 #include "bus_client.h"
46 #include "dev.h"
47 #include "class_ids.h"
48 #include "chip_support.h"
49 #include "nvhost_acm.h"
50
51 #include "nvhost_syncpt.h"
52 #include "nvhost_channel.h"
53 #include "nvhost_job.h"
54 #include "nvhost_sync.h"
55 #include "vhost/vhost.h"
56
57 int nvhost_check_bondout(unsigned int id)
58 {
59 #ifdef CONFIG_NVHOST_BONDOUT_CHECK
60         if (!tegra_platform_is_silicon())
61                 return tegra_bonded_out_dev(id);
62 #endif
63         return 0;
64 }
65 EXPORT_SYMBOL(nvhost_check_bondout);
66
67 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
68 {
69         int err = 0;
70         struct resource *r;
71
72         /* check if offset is u32 aligned */
73         if (offset & 3)
74                 return -EINVAL;
75
76         r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
77         if (!r) {
78                 dev_err(&ndev->dev, "failed to get memory resource\n");
79                 return -ENODEV;
80         }
81
82         if (offset + 4 * count > resource_size(r)
83                         || (offset + 4 * count < offset))
84                 err = -EPERM;
85
86         return err;
87 }
88
89 int validate_max_size(struct platform_device *ndev, u32 size)
90 {
91         struct resource *r;
92
93         /* check if size is non-zero and u32 aligned */
94         if (!size || size & 3)
95                 return -EINVAL;
96
97         r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
98         if (!r) {
99                 dev_err(&ndev->dev, "failed to get memory resource\n");
100                 return -ENODEV;
101         }
102
103         if (size > resource_size(r))
104                 return -EPERM;
105
106         return 0;
107 }
108
109 void __iomem *get_aperture(struct platform_device *pdev, int index)
110 {
111         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
112
113         return pdata->aperture[index];
114 }
115
116 void host1x_writel(struct platform_device *pdev, u32 r, u32 v)
117 {
118         void __iomem *addr = get_aperture(pdev, 0) + r;
119         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
120         writel(v, addr);
121 }
122 EXPORT_SYMBOL_GPL(host1x_writel);
123
124 u32 host1x_readl(struct platform_device *pdev, u32 r)
125 {
126         void __iomem *addr = get_aperture(pdev, 0) + r;
127         u32 v;
128
129         nvhost_dbg(dbg_reg, " d=%s r=0x%x", pdev->name, r);
130         v = readl(addr);
131         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
132
133         return v;
134 }
135 EXPORT_SYMBOL_GPL(host1x_readl);
136
137 void host1x_channel_writel(struct nvhost_channel *ch, u32 r, u32 v)
138 {
139         void __iomem *addr = ch->aperture + r;
140         nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
141         writel(v, addr);
142 }
143 EXPORT_SYMBOL_GPL(host1x_channel_writel);
144
145 u32 host1x_channel_readl(struct nvhost_channel *ch, u32 r)
146 {
147         void __iomem *addr = ch->aperture + r;
148         u32 v;
149
150         nvhost_dbg(dbg_reg, " chid=%d r=0x%x", ch->chid, r);
151         v = readl(addr);
152         nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
153
154         return v;
155 }
156 EXPORT_SYMBOL_GPL(host1x_channel_readl);
157
158 void host1x_sync_writel(struct nvhost_master *dev, u32 r, u32 v)
159 {
160         void __iomem *addr = dev->sync_aperture + r;
161         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
162         writel(v, addr);
163 }
164 EXPORT_SYMBOL_GPL(host1x_sync_writel);
165
166 u32 host1x_sync_readl(struct nvhost_master *dev, u32 r)
167 {
168         void __iomem *addr = dev->sync_aperture + r;
169         u32 v;
170
171         nvhost_dbg(dbg_reg, " d=%s r=0x%x", dev->dev->name, r);
172         v = readl(addr);
173         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
174
175         return v;
176 }
177 EXPORT_SYMBOL_GPL(host1x_sync_readl);
178
179 int nvhost_read_module_regs(struct platform_device *ndev,
180                         u32 offset, int count, u32 *values)
181 {
182         int err;
183
184         /* verify offset */
185         err = validate_reg(ndev, offset, count);
186         if (err)
187                 return err;
188
189         err = nvhost_module_busy(ndev);
190         if (err)
191                 return err;
192
193         while (count--) {
194                 *(values++) = host1x_readl(ndev, offset);
195                 offset += 4;
196         }
197         rmb();
198         nvhost_module_idle(ndev);
199
200         return 0;
201 }
202
203 int nvhost_write_module_regs(struct platform_device *ndev,
204                         u32 offset, int count, const u32 *values)
205 {
206         int err;
207
208         /* verify offset */
209         err = validate_reg(ndev, offset, count);
210         if (err)
211                 return err;
212
213         err = nvhost_module_busy(ndev);
214         if (err)
215                 return err;
216
217         while (count--) {
218                 host1x_writel(ndev, offset, *(values++));
219                 offset += 4;
220         }
221         wmb();
222         nvhost_module_idle(ndev);
223
224         return 0;
225 }
226
227 struct nvhost_channel_userctx {
228         struct nvhost_channel *ch;
229         u32 timeout;
230         u32 priority;
231         int clientid;
232         bool timeout_debug_dump;
233         struct platform_device *pdev;
234         u32 syncpts[NVHOST_MODULE_MAX_SYNCPTS];
235         u32 client_managed_syncpt;
236
237         /* error notificatiers used channel submit timeout */
238         struct dma_buf *error_notifier_ref;
239         u64 error_notifier_offset;
240
241         /* lock to protect this structure from concurrent ioctl usage */
242         struct mutex ioctl_lock;
243
244         /* used for attaching to ctx list in device pdata */
245         struct list_head node;
246 };
247
248 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
249 {
250         struct nvhost_channel_userctx *priv = filp->private_data;
251         struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
252         struct nvhost_master *host = nvhost_get_host(pdata->pdev);
253         void *identifier;
254         int i = 0;
255
256         trace_nvhost_channel_release(dev_name(&priv->pdev->dev));
257
258         mutex_lock(&pdata->userctx_list_lock);
259         list_del(&priv->node);
260         mutex_unlock(&pdata->userctx_list_lock);
261
262         /* remove this client from acm */
263         nvhost_module_remove_client(priv->pdev, priv);
264
265         /* drop error notifier reference */
266         if (priv->error_notifier_ref)
267                 dma_buf_put(priv->error_notifier_ref);
268
269         /* Clear the identifier */
270         if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
271             !pdata->exclusive)
272                 identifier = (void *)pdata;
273         else
274                 identifier = (void *)priv;
275         nvhost_channel_remove_identifier(pdata, identifier);
276
277         /* If the device is in exclusive mode, drop the reference */
278         if (pdata->exclusive)
279                 pdata->num_mapped_chs--;
280
281         /* drop channel reference if we took one at open time */
282         if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
283                 nvhost_putchannel(priv->ch, 1);
284         } else {
285                 /* drop instance syncpoints reference */
286                 for (i = 0; i < NVHOST_MODULE_MAX_SYNCPTS; ++i) {
287                         if (priv->syncpts[i]) {
288                                 nvhost_syncpt_put_ref(&host->syncpt,
289                                                 priv->syncpts[i]);
290                                 priv->syncpts[i] = 0;
291                         }
292                 }
293
294                 if (priv->client_managed_syncpt) {
295                         nvhost_syncpt_put_ref(&host->syncpt,
296                                         priv->client_managed_syncpt);
297                         priv->client_managed_syncpt = 0;
298                 }
299         }
300
301         if (pdata->keepalive)
302                 nvhost_module_enable_poweroff(priv->pdev);
303
304         kfree(priv);
305         return 0;
306 }
307
308 static int __nvhost_channelopen(struct inode *inode,
309                 struct platform_device *pdev,
310                 struct file *filp)
311 {
312         struct nvhost_channel_userctx *priv;
313         struct nvhost_device_data *pdata, *host1x_pdata;
314         struct nvhost_master *host;
315         int ret;
316
317         /* grab pdev and pdata based on inputs */
318         if (pdev) {
319                 pdata = platform_get_drvdata(pdev);
320         } else if (inode) {
321                 pdata = container_of(inode->i_cdev,
322                                 struct nvhost_device_data, cdev);
323                 pdev = pdata->pdev;
324         } else
325                 return -EINVAL;
326
327         /* ..and host1x specific data */
328         host1x_pdata = dev_get_drvdata(pdev->dev.parent);
329         host = nvhost_get_host(pdev);
330
331         trace_nvhost_channel_open(dev_name(&pdev->dev));
332
333         /* If the device is in exclusive mode, make channel reservation here */
334         if (pdata->exclusive) {
335                 if (pdata->num_mapped_chs == pdata->num_channels)
336                         goto fail_mark_used;
337                 pdata->num_mapped_chs++;
338         }
339
340         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
341         if (!priv)
342                 goto fail_allocate_priv;
343         filp->private_data = priv;
344
345         /* Register this client to acm */
346         if (nvhost_module_add_client(pdev, priv))
347                 goto fail_add_client;
348
349         /* Keep devices with keepalive flag powered */
350         if (pdata->keepalive)
351                 nvhost_module_disable_poweroff(pdev);
352
353         /* Check that the device can be powered */
354         ret = nvhost_module_busy(pdev);
355         if (ret)
356                 goto fail_power_on;
357         nvhost_module_idle(pdev);
358
359         if (nvhost_dev_is_virtual(pdev)) {
360                 /* If virtual, allocate a client id on the server side. This is
361                  * needed for channel recovery, to distinguish which clients
362                  * own which gathers.
363                  */
364
365                 int virt_moduleid = vhost_virt_moduleid(pdata->moduleid);
366                 struct nvhost_virt_ctx *virt_ctx =
367                                         nvhost_get_virt_data(pdev);
368
369                 if (virt_moduleid < 0) {
370                         ret = -EINVAL;
371                         goto fail_virt_clientid;
372                 }
373
374                 priv->clientid =
375                         vhost_channel_alloc_clientid(virt_ctx->handle,
376                                                         virt_moduleid);
377                 if (priv->clientid == 0) {
378                         dev_err(&pdev->dev,
379                                 "vhost_channel_alloc_clientid failed\n");
380                         ret = -ENOMEM;
381                         goto fail_virt_clientid;
382                 }
383         } else {
384                 /* Get client id */
385                 priv->clientid = atomic_add_return(1, &host->clientid);
386                 if (!priv->clientid)
387                         priv->clientid = atomic_add_return(1, &host->clientid);
388         }
389
390         /* Initialize private structure */
391         priv->timeout = host1x_pdata->nvhost_timeout_default;
392         priv->priority = NVHOST_PRIORITY_MEDIUM;
393         priv->timeout_debug_dump = true;
394         mutex_init(&priv->ioctl_lock);
395         priv->pdev = pdev;
396
397         if (!tegra_platform_is_silicon())
398                 priv->timeout = 0;
399
400         /* if we run in map-at-submit mode but device has override
401          * flag set, respect the override flag */
402         if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
403                 if (pdata->exclusive)
404                         ret = nvhost_channel_map(pdata, &priv->ch, priv);
405                 else
406                         ret = nvhost_channel_map(pdata, &priv->ch, pdata);
407                 if (ret) {
408                         pr_err("%s: failed to map channel, error: %d\n",
409                                __func__, ret);
410                         goto fail_get_channel;
411                 }
412         }
413
414         INIT_LIST_HEAD(&priv->node);
415         mutex_lock(&pdata->userctx_list_lock);
416         list_add_tail(&priv->node, &pdata->userctx_list);
417         mutex_unlock(&pdata->userctx_list_lock);
418
419         return 0;
420
421 fail_get_channel:
422 fail_virt_clientid:
423 fail_power_on:
424         if (pdata->keepalive)
425                 nvhost_module_enable_poweroff(pdev);
426         nvhost_module_remove_client(pdev, priv);
427 fail_add_client:
428         kfree(priv);
429 fail_allocate_priv:
430         if  (pdata->exclusive)
431                 pdata->num_mapped_chs--;
432 fail_mark_used:
433         return -ENOMEM;
434 }
435
436 static int nvhost_channelopen(struct inode *inode, struct file *filp)
437 {
438         return __nvhost_channelopen(inode, NULL, filp);
439 }
440
441 static int nvhost_init_error_notifier(struct nvhost_channel_userctx *ctx,
442                                       struct nvhost_set_error_notifier *args)
443 {
444         struct dma_buf *dmabuf;
445         void *va;
446         u64 end = args->offset + sizeof(struct nvhost_notification);
447
448         /* are we releasing old reference? */
449         if (!args->mem) {
450                 if (ctx->error_notifier_ref)
451                         dma_buf_put(ctx->error_notifier_ref);
452                 ctx->error_notifier_ref = NULL;
453                 return 0;
454         }
455
456         /* take reference for the userctx */
457         dmabuf = dma_buf_get(args->mem);
458         if (IS_ERR(dmabuf)) {
459                 pr_err("%s: Invalid handle: %d\n", __func__, args->mem);
460                 return -EINVAL;
461         }
462
463         if (end > dmabuf->size || end < sizeof(struct nvhost_notification)) {
464                 dma_buf_put(dmabuf);
465                 pr_err("%s: invalid offset\n", __func__);
466                 return -EINVAL;
467         }
468
469         /* map handle and clear error notifier struct */
470         va = dma_buf_vmap(dmabuf);
471         if (!va) {
472                 dma_buf_put(dmabuf);
473                 pr_err("%s: Cannot map notifier handle\n", __func__);
474                 return -ENOMEM;
475         }
476
477         memset(va + args->offset, 0, sizeof(struct nvhost_notification));
478         dma_buf_vunmap(dmabuf, va);
479
480         /* release old reference */
481         if (ctx->error_notifier_ref)
482                 dma_buf_put(ctx->error_notifier_ref);
483
484         /* finally, store error notifier data */
485         ctx->error_notifier_ref = dmabuf;
486         ctx->error_notifier_offset = args->offset;
487
488         return 0;
489 }
490
491 static inline u32 get_job_fence(struct nvhost_job *job, u32 id)
492 {
493         struct nvhost_channel *ch = job->ch;
494         struct nvhost_device_data *pdata = platform_get_drvdata(ch->dev);
495         u32 fence = job->sp[id].fence;
496
497         /* take into account work done increment */
498         if (pdata->push_work_done && id == 0)
499                 return fence - 1;
500
501         /* otherwise the fence is valid "as is" */
502         return fence;
503 }
504
505 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
506                 struct nvhost_submit_args *args)
507 {
508         struct nvhost_job *job;
509         int num_cmdbufs = args->num_cmdbufs;
510         int num_relocs = args->num_relocs;
511         int num_waitchks = args->num_waitchks;
512         int num_syncpt_incrs = args->num_syncpt_incrs;
513         struct nvhost_cmdbuf __user *cmdbufs =
514                 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbufs;
515         struct nvhost_cmdbuf __user *cmdbuf_exts =
516                 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbuf_exts;
517         struct nvhost_reloc __user *relocs =
518                 (struct nvhost_reloc __user *)(uintptr_t)args->relocs;
519         struct nvhost_reloc_shift __user *reloc_shifts =
520                 (struct nvhost_reloc_shift __user *)
521                                 (uintptr_t)args->reloc_shifts;
522         struct nvhost_waitchk __user *waitchks =
523                 (struct nvhost_waitchk __user *)(uintptr_t)args->waitchks;
524         struct nvhost_syncpt_incr __user *syncpt_incrs =
525                 (struct nvhost_syncpt_incr __user *)
526                                 (uintptr_t)args->syncpt_incrs;
527         u32 __user *fences = (u32 __user *)(uintptr_t)args->fences;
528         u32 __user *class_ids = (u32 __user *)(uintptr_t)args->class_ids;
529         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
530
531         const u32 *syncpt_array =
532                 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
533                 ctx->syncpts :
534                 ctx->ch->syncpts;
535         u32 *local_class_ids = NULL;
536         int err, i;
537
538         if (num_cmdbufs < 0)
539                 return -EINVAL;
540
541         if ((num_syncpt_incrs < 1) || (num_syncpt_incrs >
542                      nvhost_syncpt_nb_pts(&nvhost_get_host(ctx->pdev)->syncpt)))
543                 return -EINVAL;
544
545         job = nvhost_job_alloc(ctx->ch,
546                         num_cmdbufs,
547                         num_relocs,
548                         num_waitchks,
549                         num_syncpt_incrs);
550         if (!job)
551                 return -ENOMEM;
552
553         job->num_relocs = args->num_relocs;
554         job->num_waitchk = args->num_waitchks;
555         job->num_syncpts = args->num_syncpt_incrs;
556         job->priority = ctx->priority;
557         job->clientid = ctx->clientid;
558         job->client_managed_syncpt =
559                 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
560                 ctx->client_managed_syncpt : ctx->ch->client_managed_syncpt;
561
562         /* copy error notifier settings for this job */
563         if (ctx->error_notifier_ref) {
564                 get_dma_buf(ctx->error_notifier_ref);
565                 job->error_notifier_ref = ctx->error_notifier_ref;
566                 job->error_notifier_offset = ctx->error_notifier_offset;
567         }
568
569         /* mass copy class_ids */
570         if (args->class_ids) {
571                 local_class_ids = kzalloc(sizeof(u32) * num_cmdbufs,
572                         GFP_KERNEL);
573                 if (!local_class_ids) {
574                         err = -ENOMEM;
575                         goto fail;
576                 }
577                 err = copy_from_user(local_class_ids, class_ids,
578                         sizeof(u32) * num_cmdbufs);
579                 if (err) {
580                         err = -EINVAL;
581                         goto fail;
582                 }
583         }
584
585         for (i = 0; i < num_cmdbufs; ++i) {
586                 struct nvhost_cmdbuf cmdbuf;
587                 struct nvhost_cmdbuf_ext cmdbuf_ext;
588                 u32 class_id = class_ids ? local_class_ids[i] : 0;
589
590                 err = copy_from_user(&cmdbuf, cmdbufs + i, sizeof(cmdbuf));
591                 if (err)
592                         goto fail;
593
594                 cmdbuf_ext.pre_fence = -1;
595                 if (cmdbuf_exts)
596                         err = copy_from_user(&cmdbuf_ext,
597                                         cmdbuf_exts + i, sizeof(cmdbuf_ext));
598                 if (err)
599                         cmdbuf_ext.pre_fence = -1;
600
601                 /* verify that the given class id is valid for this engine */
602                 if (class_id &&
603                     class_id != pdata->class &&
604                     class_id != NV_HOST1X_CLASS_ID) {
605                         err = -EINVAL;
606                         goto fail;
607                 }
608
609                 nvhost_job_add_gather(job, cmdbuf.mem, cmdbuf.words,
610                                       cmdbuf.offset, class_id,
611                                       cmdbuf_ext.pre_fence);
612         }
613
614         kfree(local_class_ids);
615         local_class_ids = NULL;
616
617         err = copy_from_user(job->relocarray,
618                         relocs, sizeof(*relocs) * num_relocs);
619         if (err)
620                 goto fail;
621
622         err = copy_from_user(job->relocshiftarray,
623                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
624         if (err)
625                 goto fail;
626
627         err = copy_from_user(job->waitchk,
628                         waitchks, sizeof(*waitchks) * num_waitchks);
629         if (err)
630                 goto fail;
631
632         /*
633          * Go through each syncpoint from userspace. Here we:
634          * - Copy syncpoint information
635          * - Validate each syncpoint
636          * - Determine the index of hwctx syncpoint in the table
637          */
638
639         for (i = 0; i < num_syncpt_incrs; ++i) {
640                 struct nvhost_syncpt_incr sp;
641                 bool found = false;
642                 int j;
643
644                 /* Copy */
645                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
646                 if (err)
647                         goto fail;
648
649                 /* Validate the trivial case */
650                 if (sp.syncpt_id == 0) {
651                         err = -EINVAL;
652                         goto fail;
653                 }
654
655                 /* ..and then ensure that the syncpoints have been reserved
656                  * for this client */
657                 for (j = 0; j < NVHOST_MODULE_MAX_SYNCPTS; j++) {
658                         if (syncpt_array[j] == sp.syncpt_id) {
659                                 found = true;
660                                 break;
661                         }
662                 }
663
664                 if (!found) {
665                         err = -EINVAL;
666                         goto fail;
667                 }
668
669                 /* Store and get a reference */
670                 job->sp[i].id = sp.syncpt_id;
671                 job->sp[i].incrs = sp.syncpt_incrs;
672         }
673
674         trace_nvhost_channel_submit(ctx->pdev->name,
675                 job->num_gathers, job->num_relocs, job->num_waitchk,
676                 job->sp[0].id,
677                 job->sp[0].incrs);
678
679         err = nvhost_module_busy(ctx->pdev);
680         if (err)
681                 goto fail;
682
683         err = nvhost_job_pin(job, &nvhost_get_host(ctx->pdev)->syncpt);
684         nvhost_module_idle(ctx->pdev);
685         if (err)
686                 goto fail;
687
688         if (args->timeout)
689                 job->timeout = min(ctx->timeout, args->timeout);
690         else
691                 job->timeout = ctx->timeout;
692         job->timeout_debug_dump = ctx->timeout_debug_dump;
693
694         err = nvhost_channel_submit(job);
695         if (err)
696                 goto fail_submit;
697
698         /* Deliver multiple fences back to the userspace */
699         if (fences)
700                 for (i = 0; i < num_syncpt_incrs; ++i) {
701                         u32 fence = get_job_fence(job, i);
702                         err = copy_to_user(fences, &fence, sizeof(u32));
703                         if (err)
704                                 break;
705                         fences++;
706                 }
707
708         /* Deliver the fence using the old mechanism _only_ if a single
709          * syncpoint is used. */
710
711         if (args->flags & BIT(NVHOST_SUBMIT_FLAG_SYNC_FENCE_FD)) {
712                 struct nvhost_ctrl_sync_fence_info *pts;
713
714                 pts = kzalloc(num_syncpt_incrs *
715                               sizeof(struct nvhost_ctrl_sync_fence_info),
716                               GFP_KERNEL);
717                 if (!pts) {
718                         err = -ENOMEM;
719                         goto fail;
720                 }
721
722                 for (i = 0; i < num_syncpt_incrs; i++) {
723                         pts[i].id = job->sp[i].id;
724                         pts[i].thresh = get_job_fence(job, i);
725                 }
726
727                 err = nvhost_sync_create_fence_fd(ctx->pdev,
728                                 pts, num_syncpt_incrs, "fence", &args->fence);
729                 kfree(pts);
730                 if (err)
731                         goto fail;
732         } else if (num_syncpt_incrs == 1)
733                 args->fence =  get_job_fence(job, 0);
734         else
735                 args->fence = 0;
736
737         nvhost_job_put(job);
738
739         return 0;
740
741 fail_submit:
742         nvhost_job_unpin(job);
743 fail:
744         nvhost_job_put(job);
745         kfree(local_class_ids);
746
747         nvhost_err(&pdata->pdev->dev, "failed with err %d\n", err);
748
749         return err;
750 }
751
752 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
753 {
754         int i;
755         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
756
757         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
758                 if (pdata->clocks[i].moduleid == moduleid)
759                         return i;
760         }
761
762         /* Old user space is sending a random number in args. Return clock
763          * zero in these cases. */
764         return 0;
765 }
766
767 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
768         struct nvhost_clk_rate_args *arg)
769 {
770         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
771                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
772         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
773                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
774         int index = moduleid ?
775                         moduleid_to_index(ctx->pdev, moduleid) : 0;
776         int err;
777
778         err = nvhost_module_set_rate(ctx->pdev, ctx, arg->rate, index, attr);
779         if (!tegra_platform_is_silicon() && err) {
780                 nvhost_dbg(dbg_clk, "ignoring error: module=%u, attr=%u, index=%d, err=%d",
781                            moduleid, attr, index, err);
782                 err = 0;
783         }
784
785         return err;
786 }
787
788 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
789         u32 moduleid, u32 *rate)
790 {
791         int index = moduleid ? moduleid_to_index(ctx->pdev, moduleid) : 0;
792         int err;
793
794         err = nvhost_module_get_rate(ctx->pdev, (unsigned long *)rate, index);
795         if (!tegra_platform_is_silicon() && err) {
796                 nvhost_dbg(dbg_clk, "ignoring error: module=%u, rate=%u, error=%d",
797                            moduleid, *rate, err);
798                 err = 0;
799                 /* fake the return value */
800                 *rate = 32 * 1024;
801         }
802
803         return err;
804 }
805
806 static int nvhost_ioctl_channel_module_regrdwr(
807         struct nvhost_channel_userctx *ctx,
808         struct nvhost_ctrl_module_regrdwr_args *args)
809 {
810         u32 num_offsets = args->num_offsets;
811         u32 __user *offsets = (u32 __user *)(uintptr_t)args->offsets;
812         u32 __user *values = (u32 __user *)(uintptr_t)args->values;
813         u32 vals[64];
814         struct platform_device *ndev;
815
816         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
817                 args->num_offsets, args->write);
818
819         /* Check that there is something to read and that block size is
820          * u32 aligned */
821         if (num_offsets == 0 || args->block_size & 3)
822                 return -EINVAL;
823
824         ndev = ctx->pdev;
825
826         if (nvhost_dev_is_virtual(ndev))
827                 return vhost_rdwr_module_regs(ndev, num_offsets,
828                                 args->block_size, offsets, values, args->write);
829
830         while (num_offsets--) {
831                 int err;
832                 u32 offs;
833                 int remaining = args->block_size >> 2;
834
835                 if (get_user(offs, offsets))
836                         return -EFAULT;
837
838                 offsets++;
839                 while (remaining) {
840                         int batch = min(remaining, 64);
841                         if (args->write) {
842                                 if (copy_from_user(vals, values,
843                                                 batch * sizeof(u32)))
844                                         return -EFAULT;
845
846                                 err = nvhost_write_module_regs(ndev,
847                                         offs, batch, vals);
848                                 if (err)
849                                         return err;
850                         } else {
851                                 err = nvhost_read_module_regs(ndev,
852                                                 offs, batch, vals);
853                                 if (err)
854                                         return err;
855
856                                 if (copy_to_user(values, vals,
857                                                 batch * sizeof(u32)))
858                                         return -EFAULT;
859                         }
860
861                         remaining -= batch;
862                         offs += batch * sizeof(u32);
863                         values += batch;
864                 }
865         }
866
867         return 0;
868 }
869
870 static u32 create_mask(u32 *words, int num)
871 {
872         int i;
873         u32 word = 0;
874         for (i = 0; i < num; i++) {
875                 if (!words[i] || words[i] > 31)
876                         continue;
877                 word |= BIT(words[i]);
878         }
879
880         return word;
881 }
882
883 static u32 nvhost_ioctl_channel_get_syncpt_mask(
884                 struct nvhost_channel_userctx *priv)
885 {
886         struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
887         u32 mask;
888
889         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
890                 mask = create_mask(priv->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
891         else
892                 mask = create_mask(priv->ch->syncpts,
893                                                 NVHOST_MODULE_MAX_SYNCPTS);
894
895         return mask;
896 }
897
898 static u32 nvhost_ioctl_channel_get_syncpt_channel(struct nvhost_channel *ch,
899                 struct nvhost_device_data *pdata, u32 index)
900 {
901         u32 id;
902
903         mutex_lock(&ch->syncpts_lock);
904
905         /* if we already have required syncpt then return it ... */
906         id = ch->syncpts[index];
907         if (id)
908                 goto exit_unlock;
909
910         /* ... otherwise get a new syncpt dynamically */
911         id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
912         if (!id)
913                 goto exit_unlock;
914
915         /* ... and store it for further references */
916         ch->syncpts[index] = id;
917
918 exit_unlock:
919         mutex_unlock(&ch->syncpts_lock);
920         return id;
921 }
922
923 static u32 nvhost_ioctl_channel_get_syncpt_instance(
924                 struct nvhost_channel_userctx *ctx,
925                 struct nvhost_device_data *pdata, u32 index)
926 {
927         u32 id;
928
929         /* if we already have required syncpt then return it ... */
930         if (ctx->syncpts[index]) {
931                 id = ctx->syncpts[index];
932                 return id;
933         }
934
935         /* ... otherwise get a new syncpt dynamically */
936         id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
937         if (!id)
938                 return 0;
939
940         /* ... and store it for further references */
941         ctx->syncpts[index] = id;
942
943         return id;
944 }
945
946 static int nvhost_ioctl_channel_get_client_syncpt(
947                 struct nvhost_channel_userctx *ctx,
948                 struct nvhost_get_client_managed_syncpt_arg *args)
949 {
950         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
951         const char __user *args_name =
952                 (const char __user *)(uintptr_t)args->name;
953         char name[32];
954         char set_name[32];
955
956         /* prepare syncpoint name (in case it is needed) */
957         if (args_name) {
958                 if (strncpy_from_user(name, args_name, sizeof(name)) < 0)
959                         return -EFAULT;
960                 name[sizeof(name) - 1] = '\0';
961         } else {
962                 name[0] = '\0';
963         }
964
965         snprintf(set_name, sizeof(set_name),
966                 "%s_%s", dev_name(&ctx->pdev->dev), name);
967
968         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
969                 if (!ctx->client_managed_syncpt)
970                         ctx->client_managed_syncpt =
971                                 nvhost_get_syncpt_client_managed(pdata->pdev,
972                                                                 set_name);
973                 args->value = ctx->client_managed_syncpt;
974         } else {
975                 struct nvhost_channel *ch = ctx->ch;
976                 mutex_lock(&ch->syncpts_lock);
977                 if (!ch->client_managed_syncpt)
978                         ch->client_managed_syncpt =
979                                 nvhost_get_syncpt_client_managed(pdata->pdev,
980                                                                 set_name);
981                 mutex_unlock(&ch->syncpts_lock);
982                 args->value = ch->client_managed_syncpt;
983         }
984
985         if (!args->value)
986                 return -EAGAIN;
987
988         return 0;
989 }
990
991 static long nvhost_channelctl(struct file *filp,
992         unsigned int cmd, unsigned long arg)
993 {
994         struct nvhost_channel_userctx *priv = filp->private_data;
995         struct nvhost_master *host;
996         struct device *dev;
997         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
998         int err = 0;
999
1000         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
1001                 (_IOC_NR(cmd) == 0) ||
1002                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
1003                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
1004                 return -EFAULT;
1005
1006         if (_IOC_DIR(cmd) & _IOC_WRITE) {
1007                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
1008                         return -EFAULT;
1009         }
1010
1011         /* serialize calls from this fd */
1012         mutex_lock(&priv->ioctl_lock);
1013         if (!priv->pdev) {
1014                 pr_warn("Channel already unmapped\n");
1015                 mutex_unlock(&priv->ioctl_lock);
1016                 return -EFAULT;
1017         }
1018
1019         host = nvhost_get_host(priv->pdev);
1020         dev = &priv->pdev->dev;
1021         switch (cmd) {
1022         case NVHOST_IOCTL_CHANNEL_OPEN:
1023         {
1024                 int fd;
1025                 struct file *file;
1026                 char *name;
1027
1028                 err = get_unused_fd_flags(O_RDWR);
1029                 if (err < 0)
1030                         break;
1031                 fd = err;
1032
1033                 name = kasprintf(GFP_KERNEL, "nvhost-%s-fd%d",
1034                                 dev_name(dev), fd);
1035                 if (!name) {
1036                         err = -ENOMEM;
1037                         put_unused_fd(fd);
1038                         break;
1039                 }
1040
1041                 file = anon_inode_getfile(name, filp->f_op, NULL, O_RDWR);
1042                 kfree(name);
1043                 if (IS_ERR(file)) {
1044                         err = PTR_ERR(file);
1045                         put_unused_fd(fd);
1046                         break;
1047                 }
1048
1049                 err = __nvhost_channelopen(NULL, priv->pdev, file);
1050                 if (err) {
1051                         put_unused_fd(fd);
1052                         fput(file);
1053                         break;
1054                 }
1055
1056                 ((struct nvhost_channel_open_args *)buf)->channel_fd = fd;
1057                 fd_install(fd, file);
1058                 break;
1059         }
1060         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
1061         {
1062                 ((struct nvhost_get_param_args *)buf)->value =
1063                         nvhost_ioctl_channel_get_syncpt_mask(priv);
1064                 break;
1065         }
1066         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
1067         {
1068                 struct nvhost_device_data *pdata =
1069                         platform_get_drvdata(priv->pdev);
1070                 struct nvhost_get_param_arg *arg =
1071                         (struct nvhost_get_param_arg *)buf;
1072
1073                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS) {
1074                         err = -EINVAL;
1075                         break;
1076                 }
1077
1078                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
1079                         arg->value = nvhost_ioctl_channel_get_syncpt_instance(
1080                                                 priv, pdata, arg->param);
1081                 else
1082                         arg->value = nvhost_ioctl_channel_get_syncpt_channel(
1083                                                 priv->ch, pdata, arg->param);
1084                 if (!arg->value) {
1085                         err = -EAGAIN;
1086                         break;
1087                 }
1088                 break;
1089         }
1090         case NVHOST_IOCTL_CHANNEL_GET_CLIENT_MANAGED_SYNCPOINT:
1091         {
1092                 err = nvhost_ioctl_channel_get_client_syncpt(priv,
1093                         (struct nvhost_get_client_managed_syncpt_arg *)buf);
1094                 break;
1095         }
1096         case NVHOST_IOCTL_CHANNEL_FREE_CLIENT_MANAGED_SYNCPOINT:
1097                 break;
1098         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
1099         {
1100                 ((struct nvhost_get_param_args *)buf)->value = 0;
1101                 break;
1102         }
1103         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
1104         {
1105                 err = -EINVAL;
1106                 break;
1107         }
1108         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
1109         {
1110                 struct nvhost_device_data *pdata = \
1111                         platform_get_drvdata(priv->pdev);
1112                 ((struct nvhost_get_param_args *)buf)->value =
1113                         create_mask(pdata->modulemutexes,
1114                                         NVHOST_MODULE_MAX_MODMUTEXES);
1115                 break;
1116         }
1117         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
1118         {
1119                 struct nvhost_device_data *pdata = \
1120                         platform_get_drvdata(priv->pdev);
1121                 struct nvhost_get_param_arg *arg =
1122                         (struct nvhost_get_param_arg *)buf;
1123
1124                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES ||
1125                     !pdata->modulemutexes[arg->param]) {
1126                         err = -EINVAL;
1127                         break;
1128                 }
1129
1130                 arg->value = pdata->modulemutexes[arg->param];
1131                 break;
1132         }
1133         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
1134                 break;
1135         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
1136         {
1137                 struct nvhost_clk_rate_args *arg =
1138                                 (struct nvhost_clk_rate_args *)buf;
1139
1140                 err = nvhost_ioctl_channel_get_rate(priv,
1141                                 arg->moduleid, &arg->rate);
1142                 break;
1143         }
1144         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
1145         {
1146                 struct nvhost_clk_rate_args *arg =
1147                                 (struct nvhost_clk_rate_args *)buf;
1148
1149                 /* if virtualized, client requests to change clock rate
1150                  * are ignored
1151                  */
1152                 if (nvhost_dev_is_virtual(priv->pdev))
1153                         break;
1154
1155                 err = nvhost_ioctl_channel_set_rate(priv, arg);
1156                 break;
1157         }
1158         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1159         {
1160                 u32 timeout =
1161                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1162
1163                 priv->timeout = timeout;
1164                 dev_dbg(&priv->pdev->dev,
1165                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1166                         __func__, priv->timeout, priv);
1167                 break;
1168         }
1169         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1170                 ((struct nvhost_get_param_args *)buf)->value = false;
1171                 break;
1172         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1173                 priv->priority =
1174                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
1175                 break;
1176         case NVHOST32_IOCTL_CHANNEL_MODULE_REGRDWR:
1177         {
1178                 struct nvhost32_ctrl_module_regrdwr_args *args32 =
1179                         (struct nvhost32_ctrl_module_regrdwr_args *)buf;
1180                 struct nvhost_ctrl_module_regrdwr_args args;
1181                 args.id = args32->id;
1182                 args.num_offsets = args32->num_offsets;
1183                 args.block_size = args32->block_size;
1184                 args.offsets = args32->offsets;
1185                 args.values = args32->values;
1186                 args.write = args32->write;
1187                 err = nvhost_ioctl_channel_module_regrdwr(priv, &args);
1188                 break;
1189         }
1190         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1191                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1192                 break;
1193         case NVHOST32_IOCTL_CHANNEL_SUBMIT:
1194         {
1195                 struct nvhost_device_data *pdata =
1196                         platform_get_drvdata(priv->pdev);
1197                 struct nvhost32_submit_args *args32 = (void *)buf;
1198                 struct nvhost_submit_args args;
1199                 void *identifier;
1200
1201                 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1202                     !pdata->exclusive)
1203                         identifier = (void *)pdata;
1204                 else
1205                         identifier = (void *)priv;
1206
1207                 memset(&args, 0, sizeof(args));
1208                 args.submit_version = args32->submit_version;
1209                 args.num_syncpt_incrs = args32->num_syncpt_incrs;
1210                 args.num_cmdbufs = args32->num_cmdbufs;
1211                 args.num_relocs = args32->num_relocs;
1212                 args.num_waitchks = args32->num_waitchks;
1213                 args.timeout = args32->timeout;
1214                 args.syncpt_incrs = args32->syncpt_incrs;
1215                 args.fence = args32->fence;
1216
1217                 args.cmdbufs = args32->cmdbufs;
1218                 args.relocs = args32->relocs;
1219                 args.reloc_shifts = args32->reloc_shifts;
1220                 args.waitchks = args32->waitchks;
1221                 args.class_ids = args32->class_ids;
1222                 args.fences = args32->fences;
1223
1224                 /* first, get a channel */
1225                 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1226                 if (err)
1227                         break;
1228
1229                 /* ..then, synchronize syncpoint information.
1230                  *
1231                  * This information is updated only in this ioctl and
1232                  * channel destruction. We already hold channel
1233                  * reference and this ioctl is serialized => no-one is
1234                  * modifying the syncpoint field concurrently.
1235                  *
1236                  * Synchronization is not destructing anything
1237                  * in the structure; We can only allocate new
1238                  * syncpoints, and hence old ones cannot be released
1239                  * by following operation. If some syncpoint is stored
1240                  * into the channel structure, it remains there. */
1241
1242                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1243                         memcpy(priv->ch->syncpts, priv->syncpts,
1244                                sizeof(priv->syncpts));
1245                         priv->ch->client_managed_syncpt =
1246                                 priv->client_managed_syncpt;
1247                 }
1248
1249                 /* submit work */
1250                 err = nvhost_ioctl_channel_submit(priv, &args);
1251
1252                 /* ..and drop the local reference */
1253                 nvhost_putchannel(priv->ch, 1);
1254
1255                 args32->fence = args.fence;
1256
1257                 break;
1258         }
1259         case NVHOST_IOCTL_CHANNEL_SUBMIT:
1260         {
1261                 struct nvhost_device_data *pdata =
1262                         platform_get_drvdata(priv->pdev);
1263                 void *identifier;
1264
1265                 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1266                     !pdata->exclusive)
1267                         identifier = (void *)pdata;
1268                 else
1269                         identifier = (void *)priv;
1270
1271                 /* first, get a channel */
1272                 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1273                 if (err)
1274                         break;
1275
1276                 /* ..then, synchronize syncpoint information.
1277                  *
1278                  * This information is updated only in this ioctl and
1279                  * channel destruction. We already hold channel
1280                  * reference and this ioctl is serialized => no-one is
1281                  * modifying the syncpoint field concurrently.
1282                  *
1283                  * Synchronization is not destructing anything
1284                  * in the structure; We can only allocate new
1285                  * syncpoints, and hence old ones cannot be released
1286                  * by following operation. If some syncpoint is stored
1287                  * into the channel structure, it remains there. */
1288
1289                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1290                         memcpy(priv->ch->syncpts, priv->syncpts,
1291                                sizeof(priv->syncpts));
1292                         priv->ch->client_managed_syncpt =
1293                                 priv->client_managed_syncpt;
1294                 }
1295
1296                 /* submit work */
1297                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1298
1299                 /* ..and drop the local reference */
1300                 nvhost_putchannel(priv->ch, 1);
1301
1302                 break;
1303         }
1304         case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
1305                 err = nvhost_init_error_notifier(priv,
1306                         (struct nvhost_set_error_notifier *)buf);
1307                 break;
1308         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1309         {
1310                 u32 timeout =
1311                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1312                 bool timeout_debug_dump = !((u32)
1313                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1314                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1315                 priv->timeout = timeout;
1316                 priv->timeout_debug_dump = timeout_debug_dump;
1317                 dev_dbg(&priv->pdev->dev,
1318                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1319                         __func__, priv->timeout, priv);
1320                 break;
1321         }
1322         default:
1323                 nvhost_dbg_info("unrecognized ioctl cmd: 0x%x", cmd);
1324                 err = -ENOTTY;
1325                 break;
1326         }
1327
1328         mutex_unlock(&priv->ioctl_lock);
1329
1330         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1331                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1332
1333         return err;
1334 }
1335
1336 static const struct file_operations nvhost_channelops = {
1337         .owner = THIS_MODULE,
1338         .release = nvhost_channelrelease,
1339         .open = nvhost_channelopen,
1340 #ifdef CONFIG_COMPAT
1341         .compat_ioctl = nvhost_channelctl,
1342 #endif
1343         .unlocked_ioctl = nvhost_channelctl
1344 };
1345
1346 static const char *get_device_name_for_dev(struct platform_device *dev)
1347 {
1348         struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1349
1350         if (pdata->devfs_name)
1351                 return pdata->devfs_name;
1352
1353         return dev->name;
1354 }
1355
1356 static struct device *nvhost_client_device_create(
1357         struct platform_device *pdev, struct cdev *cdev,
1358         const char *cdev_name, dev_t devno,
1359         const struct file_operations *ops)
1360 {
1361         struct nvhost_master *host = nvhost_get_host(pdev);
1362         const char *use_dev_name;
1363         struct device *dev;
1364         int err;
1365
1366         nvhost_dbg_fn("");
1367
1368         BUG_ON(!host);
1369
1370         cdev_init(cdev, ops);
1371         cdev->owner = THIS_MODULE;
1372
1373         err = cdev_add(cdev, devno, 1);
1374         if (err < 0) {
1375                 dev_err(&pdev->dev,
1376                         "failed to add cdev\n");
1377                 return NULL;
1378         }
1379         use_dev_name = get_device_name_for_dev(pdev);
1380
1381         dev = device_create(host->nvhost_class,
1382                         NULL, devno, NULL,
1383                         (pdev->id <= 0) ?
1384                         IFACE_NAME "-%s%s" :
1385                         IFACE_NAME "-%s%s.%d",
1386                         cdev_name, use_dev_name, pdev->id);
1387
1388         if (IS_ERR(dev)) {
1389                 err = PTR_ERR(dev);
1390                 dev_err(&pdev->dev,
1391                         "failed to create %s %s device for %s\n",
1392                         use_dev_name, cdev_name, pdev->name);
1393                 return NULL;
1394         }
1395
1396         return dev;
1397 }
1398
1399 #define NVHOST_NUM_CDEV 4
1400 int nvhost_client_user_init(struct platform_device *dev)
1401 {
1402         dev_t devno;
1403         int err;
1404         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1405
1406         /* reserve 3 minor #s for <dev>, and ctrl-<dev> */
1407
1408         err = alloc_chrdev_region(&devno, 0, NVHOST_NUM_CDEV, IFACE_NAME);
1409         if (err < 0) {
1410                 dev_err(&dev->dev, "failed to allocate devno\n");
1411                 goto fail;
1412         }
1413         pdata->cdev_region = devno;
1414
1415         pdata->node = nvhost_client_device_create(dev, &pdata->cdev,
1416                                 "", devno, &nvhost_channelops);
1417         if (pdata->node == NULL)
1418                 goto fail;
1419
1420         /* module control (npn-channel based, global) interface */
1421         if (pdata->ctrl_ops) {
1422                 ++devno;
1423                 pdata->ctrl_node = nvhost_client_device_create(dev,
1424                                         &pdata->ctrl_cdev, "ctrl-",
1425                                         devno, pdata->ctrl_ops);
1426                 if (pdata->ctrl_node == NULL)
1427                         goto fail;
1428         }
1429
1430         return 0;
1431 fail:
1432         return err;
1433 }
1434
1435 static void nvhost_client_user_deinit(struct platform_device *dev)
1436 {
1437         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1438         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1439
1440         if (pdata->node) {
1441                 device_destroy(nvhost_master->nvhost_class, pdata->cdev.dev);
1442                 cdev_del(&pdata->cdev);
1443         }
1444
1445         if (pdata->as_node) {
1446                 device_destroy(nvhost_master->nvhost_class, pdata->as_cdev.dev);
1447                 cdev_del(&pdata->as_cdev);
1448         }
1449
1450         if (pdata->ctrl_node) {
1451                 device_destroy(nvhost_master->nvhost_class,
1452                                pdata->ctrl_cdev.dev);
1453                 cdev_del(&pdata->ctrl_cdev);
1454         }
1455
1456         unregister_chrdev_region(pdata->cdev_region, NVHOST_NUM_CDEV);
1457 }
1458
1459 int nvhost_client_device_init(struct platform_device *dev)
1460 {
1461         int err;
1462         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1463         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1464
1465         mutex_init(&pdata->userctx_list_lock);
1466         INIT_LIST_HEAD(&pdata->userctx_list);
1467
1468         /* Create debugfs directory for the device */
1469         nvhost_device_debug_init(dev);
1470
1471         err = nvhost_client_user_init(dev);
1472         if (err)
1473                 goto fail;
1474
1475         err = nvhost_device_list_add(dev);
1476         if (err)
1477                 goto fail;
1478
1479         if (pdata->scaling_init)
1480                 pdata->scaling_init(dev);
1481
1482         /* reset syncpoint values for this unit */
1483         err = nvhost_module_busy(nvhost_master->dev);
1484         if (err)
1485                 goto fail_busy;
1486
1487         nvhost_module_idle(nvhost_master->dev);
1488
1489         /* Initialize dma parameters */
1490         dev->dev.dma_parms = &pdata->dma_parms;
1491         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1492
1493         dev_info(&dev->dev, "initialized\n");
1494
1495         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1496                 nvhost_master->info.channel_policy = MAP_CHANNEL_ON_SUBMIT;
1497                 nvhost_update_characteristics(dev);
1498         }
1499
1500         if (pdata->hw_init)
1501                 return pdata->hw_init(dev);
1502
1503         return 0;
1504
1505 fail_busy:
1506         /* Remove from nvhost device list */
1507         nvhost_device_list_remove(dev);
1508 fail:
1509         /* Add clean-up */
1510         dev_err(&dev->dev, "failed to init client device\n");
1511         nvhost_client_user_deinit(dev);
1512         nvhost_device_debug_deinit(dev);
1513         return err;
1514 }
1515 EXPORT_SYMBOL(nvhost_client_device_init);
1516
1517 int nvhost_client_device_release(struct platform_device *dev)
1518 {
1519         /* Release nvhost module resources */
1520         nvhost_module_deinit(dev);
1521
1522         /* Remove from nvhost device list */
1523         nvhost_device_list_remove(dev);
1524
1525         /* Release chardev and device node for user space */
1526         nvhost_client_user_deinit(dev);
1527
1528         /* Remove debugFS */
1529         nvhost_device_debug_deinit(dev);
1530
1531         return 0;
1532 }
1533 EXPORT_SYMBOL(nvhost_client_device_release);
1534
1535 int nvhost_device_get_resources(struct platform_device *dev)
1536 {
1537         int i;
1538         void __iomem *regs = NULL;
1539         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1540
1541         for (i = 0; i < dev->num_resources; i++) {
1542                 struct resource *r = NULL;
1543
1544                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1545                 /* We've run out of mem resources */
1546                 if (!r)
1547                         break;
1548
1549                 regs = devm_request_and_ioremap(&dev->dev, r);
1550                 if (!regs)
1551                         goto fail;
1552
1553                 pdata->aperture[i] = regs;
1554         }
1555
1556         return 0;
1557
1558 fail:
1559         dev_err(&dev->dev, "failed to get register memory\n");
1560
1561         return -ENXIO;
1562 }
1563
1564 int nvhost_client_device_get_resources(struct platform_device *dev)
1565 {
1566         return nvhost_device_get_resources(dev);
1567 }
1568 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1569
1570 /* This is a simple wrapper around request_firmware that takes
1571  * 'fw_name' and if available applies a SOC relative path prefix to it.
1572  * The caller is responsible for calling release_firmware later.
1573  */
1574 const struct firmware *
1575 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1576 {
1577         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1578         const struct firmware *fw;
1579         char *fw_path = NULL;
1580         int path_len, err;
1581
1582         /* This field is NULL when calling from SYS_EXIT.
1583            Add a check here to prevent crash in request_firmware */
1584         if (!current->fs) {
1585                 BUG();
1586                 return NULL;
1587         }
1588
1589         if (!fw_name)
1590                 return NULL;
1591
1592         if (op->soc_name) {
1593                 path_len = strlen(fw_name) + strlen(op->soc_name);
1594                 path_len += 2; /* for the path separator and zero terminator*/
1595
1596                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1597                                      GFP_KERNEL);
1598                 if (!fw_path)
1599                         return NULL;
1600
1601                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1602                 fw_name = fw_path;
1603         }
1604
1605         err = request_firmware(&fw, fw_name, &dev->dev);
1606         kfree(fw_path);
1607         if (err) {
1608                 dev_err(&dev->dev, "failed to get firmware\n");
1609                 return NULL;
1610         }
1611
1612         /* note: caller must release_firmware */
1613         return fw;
1614 }
1615 EXPORT_SYMBOL(nvhost_client_request_firmware);
1616
1617 struct nvhost_channel *nvhost_find_chan_by_clientid(
1618                                 struct platform_device *pdev,
1619                                 int clientid)
1620 {
1621         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
1622         struct nvhost_channel_userctx *ctx;
1623         struct nvhost_channel *ch = NULL;
1624
1625         mutex_lock(&pdata->userctx_list_lock);
1626         list_for_each_entry(ctx, &pdata->userctx_list, node) {
1627                 if (ctx->clientid == clientid) {
1628                         ch = ctx->ch;
1629                         break;
1630                 }
1631         }
1632         mutex_unlock(&pdata->userctx_list_lock);
1633
1634         return ch;
1635 }