add6ac6320b3a60565cd90b7f4d7b475ecff9f05
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * Tegra Graphics Host Client Module
3  *
4  * Copyright (c) 2010-2016, NVIDIA Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/firmware.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/tegra-soc.h>
32 #include <linux/anon_inodes.h>
33
34 #include <trace/events/nvhost.h>
35
36 #include <linux/io.h>
37 #include <linux/string.h>
38
39 #include <linux/nvhost.h>
40 #include <linux/nvhost_ioctl.h>
41
42 #include <mach/gpufuse.h>
43
44 #include "debug.h"
45 #include "bus_client.h"
46 #include "dev.h"
47 #include "class_ids.h"
48 #include "chip_support.h"
49 #include "nvhost_acm.h"
50
51 #include "nvhost_syncpt.h"
52 #include "nvhost_channel.h"
53 #include "nvhost_job.h"
54 #include "nvhost_sync.h"
55 #include "vhost/vhost.h"
56
57 int nvhost_check_bondout(unsigned int id)
58 {
59 #ifdef CONFIG_NVHOST_BONDOUT_CHECK
60         if (!tegra_platform_is_silicon())
61                 return tegra_bonded_out_dev(id);
62 #endif
63         return 0;
64 }
65 EXPORT_SYMBOL(nvhost_check_bondout);
66
67 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
68 {
69         int err = 0;
70         struct resource *r;
71
72         /* check if offset is u32 aligned */
73         if (offset & 3)
74                 return -EINVAL;
75
76         r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
77         if (!r) {
78                 dev_err(&ndev->dev, "failed to get memory resource\n");
79                 return -ENODEV;
80         }
81
82         if (offset + 4 * count > resource_size(r)
83                         || (offset + 4 * count < offset))
84                 err = -EPERM;
85
86         return err;
87 }
88
89 int validate_max_size(struct platform_device *ndev, u32 size)
90 {
91         struct resource *r;
92
93         /* check if size is non-zero and u32 aligned */
94         if (!size || size & 3)
95                 return -EINVAL;
96
97         r = platform_get_resource(ndev, IORESOURCE_MEM, 0);
98         if (!r) {
99                 dev_err(&ndev->dev, "failed to get memory resource\n");
100                 return -ENODEV;
101         }
102
103         if (size > resource_size(r))
104                 return -EPERM;
105
106         return 0;
107 }
108
109 void __iomem *get_aperture(struct platform_device *pdev, int index)
110 {
111         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
112
113         return pdata->aperture[index];
114 }
115
116 void host1x_writel(struct platform_device *pdev, u32 r, u32 v)
117 {
118         void __iomem *addr = get_aperture(pdev, 0) + r;
119         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
120         writel(v, addr);
121 }
122 EXPORT_SYMBOL_GPL(host1x_writel);
123
124 u32 host1x_readl(struct platform_device *pdev, u32 r)
125 {
126         void __iomem *addr = get_aperture(pdev, 0) + r;
127         u32 v;
128
129         nvhost_dbg(dbg_reg, " d=%s r=0x%x", pdev->name, r);
130         v = readl(addr);
131         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", pdev->name, r, v);
132
133         return v;
134 }
135 EXPORT_SYMBOL_GPL(host1x_readl);
136
137 void host1x_channel_writel(struct nvhost_channel *ch, u32 r, u32 v)
138 {
139         void __iomem *addr = ch->aperture + r;
140         nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
141         writel(v, addr);
142 }
143 EXPORT_SYMBOL_GPL(host1x_channel_writel);
144
145 u32 host1x_channel_readl(struct nvhost_channel *ch, u32 r)
146 {
147         void __iomem *addr = ch->aperture + r;
148         u32 v;
149
150         nvhost_dbg(dbg_reg, " chid=%d r=0x%x", ch->chid, r);
151         v = readl(addr);
152         nvhost_dbg(dbg_reg, " chid=%d r=0x%x v=0x%x", ch->chid, r, v);
153
154         return v;
155 }
156 EXPORT_SYMBOL_GPL(host1x_channel_readl);
157
158 void host1x_sync_writel(struct nvhost_master *dev, u32 r, u32 v)
159 {
160         void __iomem *addr = dev->sync_aperture + r;
161         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
162         writel(v, addr);
163 }
164 EXPORT_SYMBOL_GPL(host1x_sync_writel);
165
166 u32 host1x_sync_readl(struct nvhost_master *dev, u32 r)
167 {
168         void __iomem *addr = dev->sync_aperture + r;
169         u32 v;
170
171         nvhost_dbg(dbg_reg, " d=%s r=0x%x", dev->dev->name, r);
172         v = readl(addr);
173         nvhost_dbg(dbg_reg, " d=%s r=0x%x v=0x%x", dev->dev->name, r, v);
174
175         return v;
176 }
177 EXPORT_SYMBOL_GPL(host1x_sync_readl);
178
179 int nvhost_read_module_regs(struct platform_device *ndev,
180                         u32 offset, int count, u32 *values)
181 {
182         int err;
183
184         /* verify offset */
185         err = validate_reg(ndev, offset, count);
186         if (err)
187                 return err;
188
189         err = nvhost_module_busy(ndev);
190         if (err)
191                 return err;
192
193         while (count--) {
194                 *(values++) = host1x_readl(ndev, offset);
195                 offset += 4;
196         }
197         rmb();
198         nvhost_module_idle(ndev);
199
200         return 0;
201 }
202
203 int nvhost_write_module_regs(struct platform_device *ndev,
204                         u32 offset, int count, const u32 *values)
205 {
206         int err;
207
208         /* verify offset */
209         err = validate_reg(ndev, offset, count);
210         if (err)
211                 return err;
212
213         err = nvhost_module_busy(ndev);
214         if (err)
215                 return err;
216
217         while (count--) {
218                 host1x_writel(ndev, offset, *(values++));
219                 offset += 4;
220         }
221         wmb();
222         nvhost_module_idle(ndev);
223
224         return 0;
225 }
226
227 struct nvhost_channel_userctx {
228         struct nvhost_channel *ch;
229         u32 timeout;
230         u32 priority;
231         int clientid;
232         bool timeout_debug_dump;
233         struct platform_device *pdev;
234         u32 syncpts[NVHOST_MODULE_MAX_SYNCPTS];
235         u32 client_managed_syncpt;
236
237         /* error notificatiers used channel submit timeout */
238         struct dma_buf *error_notifier_ref;
239         u64 error_notifier_offset;
240
241         /* lock to protect this structure from concurrent ioctl usage */
242         struct mutex ioctl_lock;
243
244         /* used for attaching to ctx list in device pdata */
245         struct list_head node;
246 };
247
248 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
249 {
250         struct nvhost_channel_userctx *priv = filp->private_data;
251         struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
252         struct nvhost_master *host = nvhost_get_host(pdata->pdev);
253         void *identifier;
254         int i = 0;
255
256         trace_nvhost_channel_release(dev_name(&priv->pdev->dev));
257
258         mutex_lock(&pdata->userctx_list_lock);
259         list_del(&priv->node);
260         mutex_unlock(&pdata->userctx_list_lock);
261
262         /* remove this client from acm */
263         nvhost_module_remove_client(priv->pdev, priv);
264
265         /* drop error notifier reference */
266         if (priv->error_notifier_ref)
267                 dma_buf_put(priv->error_notifier_ref);
268
269         /* Clear the identifier */
270         if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
271             !pdata->exclusive)
272                 identifier = (void *)pdata;
273         else
274                 identifier = (void *)priv;
275         nvhost_channel_remove_identifier(pdata, identifier);
276
277         /* If the device is in exclusive mode, drop the reference */
278         if (pdata->exclusive)
279                 pdata->num_mapped_chs--;
280
281         /* drop channel reference if we took one at open time */
282         if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
283                 nvhost_putchannel(priv->ch, 1);
284         } else {
285                 /* drop instance syncpoints reference */
286                 for (i = 0; i < NVHOST_MODULE_MAX_SYNCPTS; ++i) {
287                         if (priv->syncpts[i]) {
288                                 nvhost_syncpt_put_ref(&host->syncpt,
289                                                 priv->syncpts[i]);
290                                 priv->syncpts[i] = 0;
291                         }
292                 }
293
294                 if (priv->client_managed_syncpt) {
295                         nvhost_syncpt_put_ref(&host->syncpt,
296                                         priv->client_managed_syncpt);
297                         priv->client_managed_syncpt = 0;
298                 }
299         }
300
301         if (pdata->keepalive)
302                 nvhost_module_enable_poweroff(priv->pdev);
303
304         kfree(priv);
305         return 0;
306 }
307
308 static int __nvhost_channelopen(struct inode *inode,
309                 struct platform_device *pdev,
310                 struct file *filp)
311 {
312         struct nvhost_channel_userctx *priv;
313         struct nvhost_device_data *pdata, *host1x_pdata;
314         struct nvhost_master *host;
315         int ret;
316
317         /* grab pdev and pdata based on inputs */
318         if (pdev) {
319                 pdata = platform_get_drvdata(pdev);
320         } else if (inode) {
321                 pdata = container_of(inode->i_cdev,
322                                 struct nvhost_device_data, cdev);
323                 pdev = pdata->pdev;
324         } else
325                 return -EINVAL;
326
327         /* ..and host1x specific data */
328         host1x_pdata = dev_get_drvdata(pdev->dev.parent);
329         host = nvhost_get_host(pdev);
330
331         trace_nvhost_channel_open(dev_name(&pdev->dev));
332
333         /* If the device is in exclusive mode, make channel reservation here */
334         if (pdata->exclusive) {
335                 if (pdata->num_mapped_chs == pdata->num_channels)
336                         goto fail_mark_used;
337                 pdata->num_mapped_chs++;
338         }
339
340         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
341         if (!priv)
342                 goto fail_allocate_priv;
343         filp->private_data = priv;
344
345         /* Register this client to acm */
346         if (nvhost_module_add_client(pdev, priv))
347                 goto fail_add_client;
348
349         /* Keep devices with keepalive flag powered */
350         if (pdata->keepalive)
351                 nvhost_module_disable_poweroff(pdev);
352
353         /* Check that the device can be powered */
354         ret = nvhost_module_busy(pdev);
355         if (ret)
356                 goto fail_power_on;
357         nvhost_module_idle(pdev);
358
359         if (nvhost_dev_is_virtual(pdev)) {
360                 /* If virtual, allocate a client id on the server side. This is
361                  * needed for channel recovery, to distinguish which clients
362                  * own which gathers.
363                  */
364
365                 int virt_moduleid = vhost_virt_moduleid(pdata->moduleid);
366                 struct nvhost_virt_ctx *virt_ctx =
367                                         nvhost_get_virt_data(pdev);
368
369                 if (virt_moduleid < 0) {
370                         ret = -EINVAL;
371                         goto fail_virt_clientid;
372                 }
373
374                 priv->clientid =
375                         vhost_channel_alloc_clientid(virt_ctx->handle,
376                                                         virt_moduleid);
377                 if (priv->clientid == 0) {
378                         dev_err(&pdev->dev,
379                                 "vhost_channel_alloc_clientid failed\n");
380                         ret = -ENOMEM;
381                         goto fail_virt_clientid;
382                 }
383         } else {
384                 /* Get client id */
385                 priv->clientid = atomic_add_return(1, &host->clientid);
386                 if (!priv->clientid)
387                         priv->clientid = atomic_add_return(1, &host->clientid);
388         }
389
390         /* Initialize private structure */
391         priv->timeout = host1x_pdata->nvhost_timeout_default;
392         priv->priority = NVHOST_PRIORITY_MEDIUM;
393         priv->timeout_debug_dump = true;
394         mutex_init(&priv->ioctl_lock);
395         priv->pdev = pdev;
396
397         if (!tegra_platform_is_silicon())
398                 priv->timeout = 0;
399
400         /* if we run in map-at-submit mode but device has override
401          * flag set, respect the override flag */
402         if (pdata->resource_policy == RESOURCE_PER_DEVICE) {
403                 if (pdata->exclusive)
404                         ret = nvhost_channel_map(pdata, &priv->ch, priv);
405                 else
406                         ret = nvhost_channel_map(pdata, &priv->ch, pdata);
407                 if (ret) {
408                         pr_err("%s: failed to map channel, error: %d\n",
409                                __func__, ret);
410                         goto fail_get_channel;
411                 }
412         }
413
414         INIT_LIST_HEAD(&priv->node);
415         mutex_lock(&pdata->userctx_list_lock);
416         list_add_tail(&priv->node, &pdata->userctx_list);
417         mutex_unlock(&pdata->userctx_list_lock);
418
419         return 0;
420
421 fail_get_channel:
422 fail_virt_clientid:
423 fail_power_on:
424         if (pdata->keepalive)
425                 nvhost_module_enable_poweroff(pdev);
426         nvhost_module_remove_client(pdev, priv);
427 fail_add_client:
428         kfree(priv);
429 fail_allocate_priv:
430         if  (pdata->exclusive)
431                 pdata->num_mapped_chs--;
432 fail_mark_used:
433         return -ENOMEM;
434 }
435
436 static int nvhost_channelopen(struct inode *inode, struct file *filp)
437 {
438         return __nvhost_channelopen(inode, NULL, filp);
439 }
440
441 static int nvhost_init_error_notifier(struct nvhost_channel_userctx *ctx,
442                                       struct nvhost_set_error_notifier *args)
443 {
444         struct dma_buf *dmabuf;
445         void *va;
446         u64 end = args->offset + sizeof(struct nvhost_notification);
447
448         /* are we releasing old reference? */
449         if (!args->mem) {
450                 if (ctx->error_notifier_ref)
451                         dma_buf_put(ctx->error_notifier_ref);
452                 ctx->error_notifier_ref = NULL;
453                 return 0;
454         }
455
456         /* take reference for the userctx */
457         dmabuf = dma_buf_get(args->mem);
458         if (IS_ERR(dmabuf)) {
459                 pr_err("%s: Invalid handle: %d\n", __func__, args->mem);
460                 return -EINVAL;
461         }
462
463         if (end > dmabuf->size || end < sizeof(struct nvhost_notification)) {
464                 dma_buf_put(dmabuf);
465                 pr_err("%s: invalid offset\n", __func__);
466                 return -EINVAL;
467         }
468
469         /* map handle and clear error notifier struct */
470         va = dma_buf_vmap(dmabuf);
471         if (!va) {
472                 dma_buf_put(dmabuf);
473                 pr_err("%s: Cannot map notifier handle\n", __func__);
474                 return -ENOMEM;
475         }
476
477         memset(va + args->offset, 0, sizeof(struct nvhost_notification));
478         dma_buf_vunmap(dmabuf, va);
479
480         /* release old reference */
481         if (ctx->error_notifier_ref)
482                 dma_buf_put(ctx->error_notifier_ref);
483
484         /* finally, store error notifier data */
485         ctx->error_notifier_ref = dmabuf;
486         ctx->error_notifier_offset = args->offset;
487
488         return 0;
489 }
490
491 static inline u32 get_job_fence(struct nvhost_job *job, u32 id)
492 {
493         struct nvhost_channel *ch = job->ch;
494         struct nvhost_device_data *pdata = platform_get_drvdata(ch->dev);
495         u32 fence = job->sp[id].fence;
496
497         /* take into account work done increment */
498         if (pdata->push_work_done && id == 0)
499                 return fence - 1;
500
501         /* otherwise the fence is valid "as is" */
502         return fence;
503 }
504
505 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
506                 struct nvhost_submit_args *args)
507 {
508         struct nvhost_job *job;
509         int num_cmdbufs = args->num_cmdbufs;
510         int num_relocs = args->num_relocs;
511         int num_waitchks = args->num_waitchks;
512         int num_syncpt_incrs = args->num_syncpt_incrs;
513         struct nvhost_cmdbuf __user *cmdbufs =
514                 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbufs;
515         struct nvhost_cmdbuf __user *cmdbuf_exts =
516                 (struct nvhost_cmdbuf __user *)(uintptr_t)args->cmdbuf_exts;
517         struct nvhost_reloc __user *relocs =
518                 (struct nvhost_reloc __user *)(uintptr_t)args->relocs;
519         struct nvhost_reloc_shift __user *reloc_shifts =
520                 (struct nvhost_reloc_shift __user *)
521                                 (uintptr_t)args->reloc_shifts;
522         struct nvhost_waitchk __user *waitchks =
523                 (struct nvhost_waitchk __user *)(uintptr_t)args->waitchks;
524         struct nvhost_syncpt_incr __user *syncpt_incrs =
525                 (struct nvhost_syncpt_incr __user *)
526                                 (uintptr_t)args->syncpt_incrs;
527         u32 __user *fences = (u32 __user *)(uintptr_t)args->fences;
528         u32 __user *class_ids = (u32 __user *)(uintptr_t)args->class_ids;
529         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
530
531         const u32 *syncpt_array =
532                 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
533                 ctx->syncpts :
534                 ctx->ch->syncpts;
535         u32 *local_class_ids = NULL;
536         int err, i;
537
538         job = nvhost_job_alloc(ctx->ch,
539                         num_cmdbufs,
540                         num_relocs,
541                         num_waitchks,
542                         num_syncpt_incrs);
543         if (!job)
544                 return -ENOMEM;
545
546         job->num_relocs = args->num_relocs;
547         job->num_waitchk = args->num_waitchks;
548         job->num_syncpts = args->num_syncpt_incrs;
549         job->priority = ctx->priority;
550         job->clientid = ctx->clientid;
551         job->client_managed_syncpt =
552                 (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) ?
553                 ctx->client_managed_syncpt : ctx->ch->client_managed_syncpt;
554
555         /* copy error notifier settings for this job */
556         if (ctx->error_notifier_ref) {
557                 get_dma_buf(ctx->error_notifier_ref);
558                 job->error_notifier_ref = ctx->error_notifier_ref;
559                 job->error_notifier_offset = ctx->error_notifier_offset;
560         }
561
562         /* mass copy class_ids */
563         if (args->class_ids) {
564                 local_class_ids = kzalloc(sizeof(u32) * num_cmdbufs,
565                         GFP_KERNEL);
566                 if (!local_class_ids) {
567                         err = -ENOMEM;
568                         goto fail;
569                 }
570                 err = copy_from_user(local_class_ids, class_ids,
571                         sizeof(u32) * num_cmdbufs);
572                 if (err) {
573                         err = -EINVAL;
574                         goto fail;
575                 }
576         }
577
578         for (i = 0; i < num_cmdbufs; ++i) {
579                 struct nvhost_cmdbuf cmdbuf;
580                 struct nvhost_cmdbuf_ext cmdbuf_ext;
581                 u32 class_id = class_ids ? local_class_ids[i] : 0;
582
583                 err = copy_from_user(&cmdbuf, cmdbufs + i, sizeof(cmdbuf));
584                 if (err)
585                         goto fail;
586
587                 cmdbuf_ext.pre_fence = -1;
588                 if (cmdbuf_exts)
589                         err = copy_from_user(&cmdbuf_ext,
590                                         cmdbuf_exts + i, sizeof(cmdbuf_ext));
591                 if (err)
592                         cmdbuf_ext.pre_fence = -1;
593
594                 /* verify that the given class id is valid for this engine */
595                 if (class_id &&
596                     class_id != pdata->class &&
597                     class_id != NV_HOST1X_CLASS_ID) {
598                         err = -EINVAL;
599                         goto fail;
600                 }
601
602                 nvhost_job_add_gather(job, cmdbuf.mem, cmdbuf.words,
603                                       cmdbuf.offset, class_id,
604                                       cmdbuf_ext.pre_fence);
605         }
606
607         kfree(local_class_ids);
608         local_class_ids = NULL;
609
610         err = copy_from_user(job->relocarray,
611                         relocs, sizeof(*relocs) * num_relocs);
612         if (err)
613                 goto fail;
614
615         err = copy_from_user(job->relocshiftarray,
616                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
617         if (err)
618                 goto fail;
619
620         err = copy_from_user(job->waitchk,
621                         waitchks, sizeof(*waitchks) * num_waitchks);
622         if (err)
623                 goto fail;
624
625         /*
626          * Go through each syncpoint from userspace. Here we:
627          * - Copy syncpoint information
628          * - Validate each syncpoint
629          * - Determine the index of hwctx syncpoint in the table
630          */
631
632         for (i = 0; i < num_syncpt_incrs; ++i) {
633                 struct nvhost_syncpt_incr sp;
634                 bool found = false;
635                 int j;
636
637                 /* Copy */
638                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
639                 if (err)
640                         goto fail;
641
642                 /* Validate the trivial case */
643                 if (sp.syncpt_id == 0) {
644                         err = -EINVAL;
645                         goto fail;
646                 }
647
648                 /* ..and then ensure that the syncpoints have been reserved
649                  * for this client */
650                 for (j = 0; j < NVHOST_MODULE_MAX_SYNCPTS; j++) {
651                         if (syncpt_array[j] == sp.syncpt_id) {
652                                 found = true;
653                                 break;
654                         }
655                 }
656
657                 if (!found) {
658                         err = -EINVAL;
659                         goto fail;
660                 }
661
662                 /* Store and get a reference */
663                 job->sp[i].id = sp.syncpt_id;
664                 job->sp[i].incrs = sp.syncpt_incrs;
665         }
666
667         trace_nvhost_channel_submit(ctx->pdev->name,
668                 job->num_gathers, job->num_relocs, job->num_waitchk,
669                 job->sp[0].id,
670                 job->sp[0].incrs);
671
672         err = nvhost_module_busy(ctx->pdev);
673         if (err)
674                 goto fail;
675
676         err = nvhost_job_pin(job, &nvhost_get_host(ctx->pdev)->syncpt);
677         nvhost_module_idle(ctx->pdev);
678         if (err)
679                 goto fail;
680
681         if (args->timeout)
682                 job->timeout = min(ctx->timeout, args->timeout);
683         else
684                 job->timeout = ctx->timeout;
685         job->timeout_debug_dump = ctx->timeout_debug_dump;
686
687         err = nvhost_channel_submit(job);
688         if (err)
689                 goto fail_submit;
690
691         /* Deliver multiple fences back to the userspace */
692         if (fences)
693                 for (i = 0; i < num_syncpt_incrs; ++i) {
694                         u32 fence = get_job_fence(job, i);
695                         err = copy_to_user(fences, &fence, sizeof(u32));
696                         if (err)
697                                 break;
698                         fences++;
699                 }
700
701         /* Deliver the fence using the old mechanism _only_ if a single
702          * syncpoint is used. */
703
704         if (args->flags & BIT(NVHOST_SUBMIT_FLAG_SYNC_FENCE_FD)) {
705                 struct nvhost_ctrl_sync_fence_info pts[num_syncpt_incrs];
706
707                 for (i = 0; i < num_syncpt_incrs; i++) {
708                         pts[i].id = job->sp[i].id;
709                         pts[i].thresh = get_job_fence(job, i);
710                 }
711
712                 err = nvhost_sync_create_fence_fd(ctx->pdev,
713                                 pts, num_syncpt_incrs, "fence", &args->fence);
714                 if (err)
715                         goto fail;
716         } else if (num_syncpt_incrs == 1)
717                 args->fence =  get_job_fence(job, 0);
718         else
719                 args->fence = 0;
720
721         nvhost_job_put(job);
722
723         return 0;
724
725 fail_submit:
726         nvhost_job_unpin(job);
727 fail:
728         nvhost_job_put(job);
729         kfree(local_class_ids);
730
731         nvhost_err(&pdata->pdev->dev, "failed with err %d\n", err);
732
733         return err;
734 }
735
736 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
737 {
738         int i;
739         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
740
741         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
742                 if (pdata->clocks[i].moduleid == moduleid)
743                         return i;
744         }
745
746         /* Old user space is sending a random number in args. Return clock
747          * zero in these cases. */
748         return 0;
749 }
750
751 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
752         struct nvhost_clk_rate_args *arg)
753 {
754         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
755                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
756         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
757                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
758         int index = moduleid ?
759                         moduleid_to_index(ctx->pdev, moduleid) : 0;
760         int err;
761
762         err = nvhost_module_set_rate(ctx->pdev, ctx, arg->rate, index, attr);
763         if (!tegra_platform_is_silicon() && err) {
764                 nvhost_dbg(dbg_clk, "ignoring error: module=%u, attr=%u, index=%d, err=%d",
765                            moduleid, attr, index, err);
766                 err = 0;
767         }
768
769         return err;
770 }
771
772 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
773         u32 moduleid, u32 *rate)
774 {
775         int index = moduleid ? moduleid_to_index(ctx->pdev, moduleid) : 0;
776         int err;
777
778         err = nvhost_module_get_rate(ctx->pdev, (unsigned long *)rate, index);
779         if (!tegra_platform_is_silicon() && err) {
780                 nvhost_dbg(dbg_clk, "ignoring error: module=%u, rate=%u, error=%d",
781                            moduleid, *rate, err);
782                 err = 0;
783                 /* fake the return value */
784                 *rate = 32 * 1024;
785         }
786
787         return err;
788 }
789
790 static int nvhost_ioctl_channel_module_regrdwr(
791         struct nvhost_channel_userctx *ctx,
792         struct nvhost_ctrl_module_regrdwr_args *args)
793 {
794         u32 num_offsets = args->num_offsets;
795         u32 __user *offsets = (u32 __user *)(uintptr_t)args->offsets;
796         u32 __user *values = (u32 __user *)(uintptr_t)args->values;
797         u32 vals[64];
798         struct platform_device *ndev;
799
800         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
801                 args->num_offsets, args->write);
802
803         /* Check that there is something to read and that block size is
804          * u32 aligned */
805         if (num_offsets == 0 || args->block_size & 3)
806                 return -EINVAL;
807
808         ndev = ctx->pdev;
809
810         if (nvhost_dev_is_virtual(ndev))
811                 return vhost_rdwr_module_regs(ndev, num_offsets,
812                                 args->block_size, offsets, values, args->write);
813
814         while (num_offsets--) {
815                 int err;
816                 u32 offs;
817                 int remaining = args->block_size >> 2;
818
819                 if (get_user(offs, offsets))
820                         return -EFAULT;
821
822                 offsets++;
823                 while (remaining) {
824                         int batch = min(remaining, 64);
825                         if (args->write) {
826                                 if (copy_from_user(vals, values,
827                                                 batch * sizeof(u32)))
828                                         return -EFAULT;
829
830                                 err = nvhost_write_module_regs(ndev,
831                                         offs, batch, vals);
832                                 if (err)
833                                         return err;
834                         } else {
835                                 err = nvhost_read_module_regs(ndev,
836                                                 offs, batch, vals);
837                                 if (err)
838                                         return err;
839
840                                 if (copy_to_user(values, vals,
841                                                 batch * sizeof(u32)))
842                                         return -EFAULT;
843                         }
844
845                         remaining -= batch;
846                         offs += batch * sizeof(u32);
847                         values += batch;
848                 }
849         }
850
851         return 0;
852 }
853
854 static u32 create_mask(u32 *words, int num)
855 {
856         int i;
857         u32 word = 0;
858         for (i = 0; i < num; i++) {
859                 if (!words[i] || words[i] > 31)
860                         continue;
861                 word |= BIT(words[i]);
862         }
863
864         return word;
865 }
866
867 static u32 nvhost_ioctl_channel_get_syncpt_mask(
868                 struct nvhost_channel_userctx *priv)
869 {
870         struct nvhost_device_data *pdata = platform_get_drvdata(priv->pdev);
871         u32 mask;
872
873         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
874                 mask = create_mask(priv->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
875         else
876                 mask = create_mask(priv->ch->syncpts,
877                                                 NVHOST_MODULE_MAX_SYNCPTS);
878
879         return mask;
880 }
881
882 static u32 nvhost_ioctl_channel_get_syncpt_channel(struct nvhost_channel *ch,
883                 struct nvhost_device_data *pdata, u32 index)
884 {
885         u32 id;
886
887         mutex_lock(&ch->syncpts_lock);
888
889         /* if we already have required syncpt then return it ... */
890         id = ch->syncpts[index];
891         if (id)
892                 goto exit_unlock;
893
894         /* ... otherwise get a new syncpt dynamically */
895         id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
896         if (!id)
897                 goto exit_unlock;
898
899         /* ... and store it for further references */
900         ch->syncpts[index] = id;
901
902 exit_unlock:
903         mutex_unlock(&ch->syncpts_lock);
904         return id;
905 }
906
907 static u32 nvhost_ioctl_channel_get_syncpt_instance(
908                 struct nvhost_channel_userctx *ctx,
909                 struct nvhost_device_data *pdata, u32 index)
910 {
911         u32 id;
912
913         /* if we already have required syncpt then return it ... */
914         if (ctx->syncpts[index]) {
915                 id = ctx->syncpts[index];
916                 return id;
917         }
918
919         /* ... otherwise get a new syncpt dynamically */
920         id = nvhost_get_syncpt_host_managed(pdata->pdev, index, NULL);
921         if (!id)
922                 return 0;
923
924         /* ... and store it for further references */
925         ctx->syncpts[index] = id;
926
927         return id;
928 }
929
930 static int nvhost_ioctl_channel_get_client_syncpt(
931                 struct nvhost_channel_userctx *ctx,
932                 struct nvhost_get_client_managed_syncpt_arg *args)
933 {
934         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->pdev);
935         const char __user *args_name =
936                 (const char __user *)(uintptr_t)args->name;
937         char name[32];
938         char set_name[32];
939
940         /* prepare syncpoint name (in case it is needed) */
941         if (args_name) {
942                 if (strncpy_from_user(name, args_name, sizeof(name)) < 0)
943                         return -EFAULT;
944                 name[sizeof(name) - 1] = '\0';
945         } else {
946                 name[0] = '\0';
947         }
948
949         snprintf(set_name, sizeof(set_name),
950                 "%s_%s", dev_name(&ctx->pdev->dev), name);
951
952         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
953                 if (!ctx->client_managed_syncpt)
954                         ctx->client_managed_syncpt =
955                                 nvhost_get_syncpt_client_managed(pdata->pdev,
956                                                                 set_name);
957                 args->value = ctx->client_managed_syncpt;
958         } else {
959                 struct nvhost_channel *ch = ctx->ch;
960                 mutex_lock(&ch->syncpts_lock);
961                 if (!ch->client_managed_syncpt)
962                         ch->client_managed_syncpt =
963                                 nvhost_get_syncpt_client_managed(pdata->pdev,
964                                                                 set_name);
965                 mutex_unlock(&ch->syncpts_lock);
966                 args->value = ch->client_managed_syncpt;
967         }
968
969         if (!args->value)
970                 return -EAGAIN;
971
972         return 0;
973 }
974
975 static long nvhost_channelctl(struct file *filp,
976         unsigned int cmd, unsigned long arg)
977 {
978         struct nvhost_channel_userctx *priv = filp->private_data;
979         struct nvhost_master *host;
980         struct device *dev;
981         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
982         int err = 0;
983
984         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
985                 (_IOC_NR(cmd) == 0) ||
986                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
987                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
988                 return -EFAULT;
989
990         if (_IOC_DIR(cmd) & _IOC_WRITE) {
991                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
992                         return -EFAULT;
993         }
994
995         /* serialize calls from this fd */
996         mutex_lock(&priv->ioctl_lock);
997         if (!priv->pdev) {
998                 pr_warn("Channel already unmapped\n");
999                 mutex_unlock(&priv->ioctl_lock);
1000                 return -EFAULT;
1001         }
1002
1003         host = nvhost_get_host(priv->pdev);
1004         dev = &priv->pdev->dev;
1005         switch (cmd) {
1006         case NVHOST_IOCTL_CHANNEL_OPEN:
1007         {
1008                 int fd;
1009                 struct file *file;
1010                 char *name;
1011
1012                 err = get_unused_fd_flags(O_RDWR);
1013                 if (err < 0)
1014                         break;
1015                 fd = err;
1016
1017                 name = kasprintf(GFP_KERNEL, "nvhost-%s-fd%d",
1018                                 dev_name(dev), fd);
1019                 if (!name) {
1020                         err = -ENOMEM;
1021                         put_unused_fd(fd);
1022                         break;
1023                 }
1024
1025                 file = anon_inode_getfile(name, filp->f_op, NULL, O_RDWR);
1026                 kfree(name);
1027                 if (IS_ERR(file)) {
1028                         err = PTR_ERR(file);
1029                         put_unused_fd(fd);
1030                         break;
1031                 }
1032                 fd_install(fd, file);
1033
1034                 err = __nvhost_channelopen(NULL, priv->pdev, file);
1035                 if (err) {
1036                         put_unused_fd(fd);
1037                         fput(file);
1038                         break;
1039                 }
1040
1041                 ((struct nvhost_channel_open_args *)buf)->channel_fd = fd;
1042                 break;
1043         }
1044         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
1045         {
1046                 ((struct nvhost_get_param_args *)buf)->value =
1047                         nvhost_ioctl_channel_get_syncpt_mask(priv);
1048                 break;
1049         }
1050         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
1051         {
1052                 struct nvhost_device_data *pdata =
1053                         platform_get_drvdata(priv->pdev);
1054                 struct nvhost_get_param_arg *arg =
1055                         (struct nvhost_get_param_arg *)buf;
1056
1057                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS) {
1058                         err = -EINVAL;
1059                         break;
1060                 }
1061
1062                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE)
1063                         arg->value = nvhost_ioctl_channel_get_syncpt_instance(
1064                                                 priv, pdata, arg->param);
1065                 else
1066                         arg->value = nvhost_ioctl_channel_get_syncpt_channel(
1067                                                 priv->ch, pdata, arg->param);
1068                 if (!arg->value) {
1069                         err = -EAGAIN;
1070                         break;
1071                 }
1072                 break;
1073         }
1074         case NVHOST_IOCTL_CHANNEL_GET_CLIENT_MANAGED_SYNCPOINT:
1075         {
1076                 err = nvhost_ioctl_channel_get_client_syncpt(priv,
1077                         (struct nvhost_get_client_managed_syncpt_arg *)buf);
1078                 break;
1079         }
1080         case NVHOST_IOCTL_CHANNEL_FREE_CLIENT_MANAGED_SYNCPOINT:
1081                 break;
1082         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
1083         {
1084                 ((struct nvhost_get_param_args *)buf)->value = 0;
1085                 break;
1086         }
1087         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
1088         {
1089                 err = -EINVAL;
1090                 break;
1091         }
1092         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
1093         {
1094                 struct nvhost_device_data *pdata = \
1095                         platform_get_drvdata(priv->pdev);
1096                 ((struct nvhost_get_param_args *)buf)->value =
1097                         create_mask(pdata->modulemutexes,
1098                                         NVHOST_MODULE_MAX_MODMUTEXES);
1099                 break;
1100         }
1101         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
1102         {
1103                 struct nvhost_device_data *pdata = \
1104                         platform_get_drvdata(priv->pdev);
1105                 struct nvhost_get_param_arg *arg =
1106                         (struct nvhost_get_param_arg *)buf;
1107
1108                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES ||
1109                     !pdata->modulemutexes[arg->param]) {
1110                         err = -EINVAL;
1111                         break;
1112                 }
1113
1114                 arg->value = pdata->modulemutexes[arg->param];
1115                 break;
1116         }
1117         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
1118                 break;
1119         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
1120         {
1121                 struct nvhost_clk_rate_args *arg =
1122                                 (struct nvhost_clk_rate_args *)buf;
1123
1124                 err = nvhost_ioctl_channel_get_rate(priv,
1125                                 arg->moduleid, &arg->rate);
1126                 break;
1127         }
1128         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
1129         {
1130                 struct nvhost_clk_rate_args *arg =
1131                                 (struct nvhost_clk_rate_args *)buf;
1132
1133                 /* if virtualized, client requests to change clock rate
1134                  * are ignored
1135                  */
1136                 if (nvhost_dev_is_virtual(priv->pdev))
1137                         break;
1138
1139                 err = nvhost_ioctl_channel_set_rate(priv, arg);
1140                 break;
1141         }
1142         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
1143         {
1144                 u32 timeout =
1145                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1146
1147                 priv->timeout = timeout;
1148                 dev_dbg(&priv->pdev->dev,
1149                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1150                         __func__, priv->timeout, priv);
1151                 break;
1152         }
1153         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
1154                 ((struct nvhost_get_param_args *)buf)->value = false;
1155                 break;
1156         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
1157                 priv->priority =
1158                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
1159                 break;
1160         case NVHOST32_IOCTL_CHANNEL_MODULE_REGRDWR:
1161         {
1162                 struct nvhost32_ctrl_module_regrdwr_args *args32 =
1163                         (struct nvhost32_ctrl_module_regrdwr_args *)buf;
1164                 struct nvhost_ctrl_module_regrdwr_args args;
1165                 args.id = args32->id;
1166                 args.num_offsets = args32->num_offsets;
1167                 args.block_size = args32->block_size;
1168                 args.offsets = args32->offsets;
1169                 args.values = args32->values;
1170                 args.write = args32->write;
1171                 err = nvhost_ioctl_channel_module_regrdwr(priv, &args);
1172                 break;
1173         }
1174         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
1175                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
1176                 break;
1177         case NVHOST32_IOCTL_CHANNEL_SUBMIT:
1178         {
1179                 struct nvhost_device_data *pdata =
1180                         platform_get_drvdata(priv->pdev);
1181                 struct nvhost32_submit_args *args32 = (void *)buf;
1182                 struct nvhost_submit_args args;
1183                 void *identifier;
1184
1185                 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1186                     !pdata->exclusive)
1187                         identifier = (void *)pdata;
1188                 else
1189                         identifier = (void *)priv;
1190
1191                 memset(&args, 0, sizeof(args));
1192                 args.submit_version = args32->submit_version;
1193                 args.num_syncpt_incrs = args32->num_syncpt_incrs;
1194                 args.num_cmdbufs = args32->num_cmdbufs;
1195                 args.num_relocs = args32->num_relocs;
1196                 args.num_waitchks = args32->num_waitchks;
1197                 args.timeout = args32->timeout;
1198                 args.syncpt_incrs = args32->syncpt_incrs;
1199                 args.fence = args32->fence;
1200
1201                 args.cmdbufs = args32->cmdbufs;
1202                 args.relocs = args32->relocs;
1203                 args.reloc_shifts = args32->reloc_shifts;
1204                 args.waitchks = args32->waitchks;
1205                 args.class_ids = args32->class_ids;
1206                 args.fences = args32->fences;
1207
1208                 /* first, get a channel */
1209                 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1210                 if (err)
1211                         break;
1212
1213                 /* ..then, synchronize syncpoint information.
1214                  *
1215                  * This information is updated only in this ioctl and
1216                  * channel destruction. We already hold channel
1217                  * reference and this ioctl is serialized => no-one is
1218                  * modifying the syncpoint field concurrently.
1219                  *
1220                  * Synchronization is not destructing anything
1221                  * in the structure; We can only allocate new
1222                  * syncpoints, and hence old ones cannot be released
1223                  * by following operation. If some syncpoint is stored
1224                  * into the channel structure, it remains there. */
1225
1226                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1227                         memcpy(priv->ch->syncpts, priv->syncpts,
1228                                sizeof(priv->syncpts));
1229                         priv->ch->client_managed_syncpt =
1230                                 priv->client_managed_syncpt;
1231                 }
1232
1233                 /* submit work */
1234                 err = nvhost_ioctl_channel_submit(priv, &args);
1235
1236                 /* ..and drop the local reference */
1237                 nvhost_putchannel(priv->ch, 1);
1238
1239                 args32->fence = args.fence;
1240
1241                 break;
1242         }
1243         case NVHOST_IOCTL_CHANNEL_SUBMIT:
1244         {
1245                 struct nvhost_device_data *pdata =
1246                         platform_get_drvdata(priv->pdev);
1247                 void *identifier;
1248
1249                 if (pdata->resource_policy == RESOURCE_PER_DEVICE &&
1250                     !pdata->exclusive)
1251                         identifier = (void *)pdata;
1252                 else
1253                         identifier = (void *)priv;
1254
1255                 /* first, get a channel */
1256                 err = nvhost_channel_map(pdata, &priv->ch, identifier);
1257                 if (err)
1258                         break;
1259
1260                 /* ..then, synchronize syncpoint information.
1261                  *
1262                  * This information is updated only in this ioctl and
1263                  * channel destruction. We already hold channel
1264                  * reference and this ioctl is serialized => no-one is
1265                  * modifying the syncpoint field concurrently.
1266                  *
1267                  * Synchronization is not destructing anything
1268                  * in the structure; We can only allocate new
1269                  * syncpoints, and hence old ones cannot be released
1270                  * by following operation. If some syncpoint is stored
1271                  * into the channel structure, it remains there. */
1272
1273                 if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1274                         memcpy(priv->ch->syncpts, priv->syncpts,
1275                                sizeof(priv->syncpts));
1276                         priv->ch->client_managed_syncpt =
1277                                 priv->client_managed_syncpt;
1278                 }
1279
1280                 /* submit work */
1281                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
1282
1283                 /* ..and drop the local reference */
1284                 nvhost_putchannel(priv->ch, 1);
1285
1286                 break;
1287         }
1288         case NVHOST_IOCTL_CHANNEL_SET_ERROR_NOTIFIER:
1289                 err = nvhost_init_error_notifier(priv,
1290                         (struct nvhost_set_error_notifier *)buf);
1291                 break;
1292         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
1293         {
1294                 u32 timeout =
1295                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
1296                 bool timeout_debug_dump = !((u32)
1297                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
1298                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
1299                 priv->timeout = timeout;
1300                 priv->timeout_debug_dump = timeout_debug_dump;
1301                 dev_dbg(&priv->pdev->dev,
1302                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
1303                         __func__, priv->timeout, priv);
1304                 break;
1305         }
1306         default:
1307                 nvhost_dbg_info("unrecognized ioctl cmd: 0x%x", cmd);
1308                 err = -ENOTTY;
1309                 break;
1310         }
1311
1312         mutex_unlock(&priv->ioctl_lock);
1313
1314         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
1315                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
1316
1317         return err;
1318 }
1319
1320 static const struct file_operations nvhost_channelops = {
1321         .owner = THIS_MODULE,
1322         .release = nvhost_channelrelease,
1323         .open = nvhost_channelopen,
1324 #ifdef CONFIG_COMPAT
1325         .compat_ioctl = nvhost_channelctl,
1326 #endif
1327         .unlocked_ioctl = nvhost_channelctl
1328 };
1329
1330 static const char *get_device_name_for_dev(struct platform_device *dev)
1331 {
1332         struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1333
1334         if (pdata->devfs_name)
1335                 return pdata->devfs_name;
1336
1337         return dev->name;
1338 }
1339
1340 static struct device *nvhost_client_device_create(
1341         struct platform_device *pdev, struct cdev *cdev,
1342         const char *cdev_name, dev_t devno,
1343         const struct file_operations *ops)
1344 {
1345         struct nvhost_master *host = nvhost_get_host(pdev);
1346         const char *use_dev_name;
1347         struct device *dev;
1348         int err;
1349
1350         nvhost_dbg_fn("");
1351
1352         BUG_ON(!host);
1353
1354         cdev_init(cdev, ops);
1355         cdev->owner = THIS_MODULE;
1356
1357         err = cdev_add(cdev, devno, 1);
1358         if (err < 0) {
1359                 dev_err(&pdev->dev,
1360                         "failed to add cdev\n");
1361                 return NULL;
1362         }
1363         use_dev_name = get_device_name_for_dev(pdev);
1364
1365         dev = device_create(host->nvhost_class,
1366                         NULL, devno, NULL,
1367                         (pdev->id <= 0) ?
1368                         IFACE_NAME "-%s%s" :
1369                         IFACE_NAME "-%s%s.%d",
1370                         cdev_name, use_dev_name, pdev->id);
1371
1372         if (IS_ERR(dev)) {
1373                 err = PTR_ERR(dev);
1374                 dev_err(&pdev->dev,
1375                         "failed to create %s %s device for %s\n",
1376                         use_dev_name, cdev_name, pdev->name);
1377                 return NULL;
1378         }
1379
1380         return dev;
1381 }
1382
1383 #define NVHOST_NUM_CDEV 4
1384 int nvhost_client_user_init(struct platform_device *dev)
1385 {
1386         dev_t devno;
1387         int err;
1388         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1389
1390         /* reserve 3 minor #s for <dev>, and ctrl-<dev> */
1391
1392         err = alloc_chrdev_region(&devno, 0, NVHOST_NUM_CDEV, IFACE_NAME);
1393         if (err < 0) {
1394                 dev_err(&dev->dev, "failed to allocate devno\n");
1395                 goto fail;
1396         }
1397         pdata->cdev_region = devno;
1398
1399         pdata->node = nvhost_client_device_create(dev, &pdata->cdev,
1400                                 "", devno, &nvhost_channelops);
1401         if (pdata->node == NULL)
1402                 goto fail;
1403
1404         /* module control (npn-channel based, global) interface */
1405         if (pdata->ctrl_ops) {
1406                 ++devno;
1407                 pdata->ctrl_node = nvhost_client_device_create(dev,
1408                                         &pdata->ctrl_cdev, "ctrl-",
1409                                         devno, pdata->ctrl_ops);
1410                 if (pdata->ctrl_node == NULL)
1411                         goto fail;
1412         }
1413
1414         return 0;
1415 fail:
1416         return err;
1417 }
1418
1419 static void nvhost_client_user_deinit(struct platform_device *dev)
1420 {
1421         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1422         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1423
1424         if (pdata->node) {
1425                 device_destroy(nvhost_master->nvhost_class, pdata->cdev.dev);
1426                 cdev_del(&pdata->cdev);
1427         }
1428
1429         if (pdata->as_node) {
1430                 device_destroy(nvhost_master->nvhost_class, pdata->as_cdev.dev);
1431                 cdev_del(&pdata->as_cdev);
1432         }
1433
1434         if (pdata->ctrl_node) {
1435                 device_destroy(nvhost_master->nvhost_class,
1436                                pdata->ctrl_cdev.dev);
1437                 cdev_del(&pdata->ctrl_cdev);
1438         }
1439
1440         unregister_chrdev_region(pdata->cdev_region, NVHOST_NUM_CDEV);
1441 }
1442
1443 int nvhost_client_device_init(struct platform_device *dev)
1444 {
1445         int err;
1446         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1447         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1448
1449         mutex_init(&pdata->userctx_list_lock);
1450         INIT_LIST_HEAD(&pdata->userctx_list);
1451
1452         /* Create debugfs directory for the device */
1453         nvhost_device_debug_init(dev);
1454
1455         err = nvhost_client_user_init(dev);
1456         if (err)
1457                 goto fail;
1458
1459         err = nvhost_device_list_add(dev);
1460         if (err)
1461                 goto fail;
1462
1463         if (pdata->scaling_init)
1464                 pdata->scaling_init(dev);
1465
1466         /* reset syncpoint values for this unit */
1467         err = nvhost_module_busy(nvhost_master->dev);
1468         if (err)
1469                 goto fail_busy;
1470
1471         nvhost_module_idle(nvhost_master->dev);
1472
1473         /* Initialize dma parameters */
1474         dev->dev.dma_parms = &pdata->dma_parms;
1475         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1476
1477         dev_info(&dev->dev, "initialized\n");
1478
1479         if (pdata->resource_policy == RESOURCE_PER_CHANNEL_INSTANCE) {
1480                 nvhost_master->info.channel_policy = MAP_CHANNEL_ON_SUBMIT;
1481                 nvhost_update_characteristics(dev);
1482         }
1483
1484         if (pdata->hw_init)
1485                 return pdata->hw_init(dev);
1486
1487         return 0;
1488
1489 fail_busy:
1490         /* Remove from nvhost device list */
1491         nvhost_device_list_remove(dev);
1492 fail:
1493         /* Add clean-up */
1494         dev_err(&dev->dev, "failed to init client device\n");
1495         nvhost_client_user_deinit(dev);
1496         nvhost_device_debug_deinit(dev);
1497         return err;
1498 }
1499 EXPORT_SYMBOL(nvhost_client_device_init);
1500
1501 int nvhost_client_device_release(struct platform_device *dev)
1502 {
1503         /* Release nvhost module resources */
1504         nvhost_module_deinit(dev);
1505
1506         /* Remove from nvhost device list */
1507         nvhost_device_list_remove(dev);
1508
1509         /* Release chardev and device node for user space */
1510         nvhost_client_user_deinit(dev);
1511
1512         /* Remove debugFS */
1513         nvhost_device_debug_deinit(dev);
1514
1515         return 0;
1516 }
1517 EXPORT_SYMBOL(nvhost_client_device_release);
1518
1519 int nvhost_device_get_resources(struct platform_device *dev)
1520 {
1521         int i;
1522         void __iomem *regs = NULL;
1523         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1524
1525         for (i = 0; i < dev->num_resources; i++) {
1526                 struct resource *r = NULL;
1527
1528                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1529                 /* We've run out of mem resources */
1530                 if (!r)
1531                         break;
1532
1533                 regs = devm_request_and_ioremap(&dev->dev, r);
1534                 if (!regs)
1535                         goto fail;
1536
1537                 pdata->aperture[i] = regs;
1538         }
1539
1540         return 0;
1541
1542 fail:
1543         dev_err(&dev->dev, "failed to get register memory\n");
1544
1545         return -ENXIO;
1546 }
1547
1548 int nvhost_client_device_get_resources(struct platform_device *dev)
1549 {
1550         return nvhost_device_get_resources(dev);
1551 }
1552 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1553
1554 /* This is a simple wrapper around request_firmware that takes
1555  * 'fw_name' and if available applies a SOC relative path prefix to it.
1556  * The caller is responsible for calling release_firmware later.
1557  */
1558 const struct firmware *
1559 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1560 {
1561         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1562         const struct firmware *fw;
1563         char *fw_path = NULL;
1564         int path_len, err;
1565
1566         /* This field is NULL when calling from SYS_EXIT.
1567            Add a check here to prevent crash in request_firmware */
1568         if (!current->fs) {
1569                 BUG();
1570                 return NULL;
1571         }
1572
1573         if (!fw_name)
1574                 return NULL;
1575
1576         if (op->soc_name) {
1577                 path_len = strlen(fw_name) + strlen(op->soc_name);
1578                 path_len += 2; /* for the path separator and zero terminator*/
1579
1580                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1581                                      GFP_KERNEL);
1582                 if (!fw_path)
1583                         return NULL;
1584
1585                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1586                 fw_name = fw_path;
1587         }
1588
1589         err = request_firmware(&fw, fw_name, &dev->dev);
1590         kfree(fw_path);
1591         if (err) {
1592                 dev_err(&dev->dev, "failed to get firmware\n");
1593                 return NULL;
1594         }
1595
1596         /* note: caller must release_firmware */
1597         return fw;
1598 }
1599 EXPORT_SYMBOL(nvhost_client_request_firmware);
1600
1601 struct nvhost_channel *nvhost_find_chan_by_clientid(
1602                                 struct platform_device *pdev,
1603                                 int clientid)
1604 {
1605         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
1606         struct nvhost_channel_userctx *ctx;
1607         struct nvhost_channel *ch = NULL;
1608
1609         mutex_lock(&pdata->userctx_list_lock);
1610         list_for_each_entry(ctx, &pdata->userctx_list, node) {
1611                 if (ctx->clientid == clientid) {
1612                         ch = ctx->ch;
1613                         break;
1614                 }
1615         }
1616         mutex_unlock(&pdata->userctx_list_lock);
1617
1618         return ch;
1619 }