video: tegra: host: Userspace deliver class ids
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * Tegra Graphics Host Client Module
3  *
4  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/firmware.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/tegra-soc.h>
32
33 #include <trace/events/nvhost.h>
34
35 #include <linux/io.h>
36 #include <linux/string.h>
37
38 #include <linux/nvhost.h>
39 #include <linux/nvhost_ioctl.h>
40
41 #include <mach/gpufuse.h>
42
43 #include "debug.h"
44 #include "bus_client.h"
45 #include "dev.h"
46 #include "class_ids.h"
47 #include "nvhost_as.h"
48 #include "nvhost_memmgr.h"
49 #include "chip_support.h"
50 #include "nvhost_acm.h"
51
52 #include "nvhost_syncpt.h"
53 #include "nvhost_channel.h"
54 #include "nvhost_job.h"
55 #include "nvhost_hwctx.h"
56 #include "user_hwctx.h"
57
58 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
59 {
60         int err = 0;
61         struct resource *r;
62         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
63
64         r = platform_get_resource(pdata->master ? pdata->master : ndev,
65                         IORESOURCE_MEM, 0);
66         if (!r) {
67                 dev_err(&ndev->dev, "failed to get memory resource\n");
68                 return -ENODEV;
69         }
70
71         if (offset + 4 * count > resource_size(r)
72                         || (offset + 4 * count < offset))
73                 err = -EPERM;
74
75         return err;
76 }
77
78 static __iomem void *get_aperture(struct platform_device *pdev)
79 {
80         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
81
82         if (pdata->master)
83                 pdata = platform_get_drvdata(pdata->master);
84
85         return pdata->aperture[0];
86 }
87
88 int nvhost_read_module_regs(struct platform_device *ndev,
89                         u32 offset, int count, u32 *values)
90 {
91         void __iomem *p = get_aperture(ndev);
92         int err;
93
94         if (!p)
95                 return -ENODEV;
96
97         /* verify offset */
98         err = validate_reg(ndev, offset, count);
99         if (err)
100                 return err;
101
102         nvhost_module_busy(ndev);
103         p += offset;
104         while (count--) {
105                 *(values++) = readl(p);
106                 p += 4;
107         }
108         rmb();
109         nvhost_module_idle(ndev);
110
111         return 0;
112 }
113
114 int nvhost_write_module_regs(struct platform_device *ndev,
115                         u32 offset, int count, const u32 *values)
116 {
117         int err;
118         void __iomem *p = get_aperture(ndev);
119
120         if (!p)
121                 return -ENODEV;
122
123         /* verify offset */
124         err = validate_reg(ndev, offset, count);
125         if (err)
126                 return err;
127
128         nvhost_module_busy(ndev);
129         p += offset;
130         while (count--) {
131                 writel(*(values++), p);
132                 p += 4;
133         }
134         wmb();
135         nvhost_module_idle(ndev);
136
137         return 0;
138 }
139
140 bool nvhost_client_can_writel(struct platform_device *pdev)
141 {
142         return !!get_aperture(pdev);
143 }
144 EXPORT_SYMBOL(nvhost_client_can_writel);
145
146 void nvhost_client_writel(struct platform_device *pdev, u32 val, u32 reg)
147 {
148         writel(val, get_aperture(pdev) + reg * 4);
149 }
150
151 u32 nvhost_client_readl(struct platform_device *pdev, u32 reg)
152 {
153         return readl(get_aperture(pdev) + reg * 4);
154 }
155
156 struct nvhost_channel_userctx {
157         struct nvhost_channel *ch;
158         struct nvhost_hwctx *hwctx;
159         struct nvhost_job *job;
160         struct mem_mgr *memmgr;
161         u32 timeout;
162         u32 priority;
163         int clientid;
164         bool timeout_debug_dump;
165 };
166
167 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
168 {
169         struct nvhost_channel_userctx *priv = filp->private_data;
170
171         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
172
173         filp->private_data = NULL;
174
175         nvhost_module_remove_client(priv->ch->dev, priv);
176
177         if (priv->hwctx) {
178                 struct nvhost_channel *ch = priv->ch;
179                 struct nvhost_hwctx *ctx = priv->hwctx;
180
181                 mutex_lock(&ch->submitlock);
182                 if (ch->cur_ctx == ctx)
183                         ch->cur_ctx = NULL;
184                 mutex_unlock(&ch->submitlock);
185
186                 priv->hwctx->h->put(priv->hwctx);
187         }
188
189         if (priv->job)
190                 nvhost_job_put(priv->job);
191
192         nvhost_putchannel(priv->ch);
193
194         nvhost_memmgr_put_mgr(priv->memmgr);
195         kfree(priv);
196         return 0;
197 }
198
199 static int nvhost_channelopen(struct inode *inode, struct file *filp)
200 {
201         struct nvhost_channel_userctx *priv;
202         struct nvhost_channel *ch;
203         struct nvhost_device_data *pdata;
204
205         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
206         ch = nvhost_getchannel(ch, false);
207         if (!ch)
208                 return -ENOMEM;
209         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
210
211         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
212         if (!priv) {
213                 nvhost_putchannel(ch);
214                 return -ENOMEM;
215         }
216         filp->private_data = priv;
217         priv->ch = ch;
218         if (nvhost_module_add_client(ch->dev, priv))
219                 goto fail;
220
221         if (ch->ctxhandler && ch->ctxhandler->alloc) {
222                 nvhost_module_busy(ch->dev);
223                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
224                 nvhost_module_idle(ch->dev);
225                 if (!priv->hwctx)
226                         goto fail;
227         }
228         priv->priority = NVHOST_PRIORITY_MEDIUM;
229         priv->clientid = atomic_add_return(1,
230                         &nvhost_get_host(ch->dev)->clientid);
231         pdata = platform_get_drvdata(ch->dev);
232         priv->timeout = pdata->nvhost_timeout_default;
233         priv->timeout_debug_dump = true;
234         if (!tegra_platform_is_silicon())
235                 priv->timeout = 0;
236
237         return 0;
238 fail:
239         nvhost_channelrelease(inode, filp);
240         return -ENOMEM;
241 }
242
243 static int nvhost_ioctl_channel_alloc_obj_ctx(
244         struct nvhost_channel_userctx *ctx,
245         struct nvhost_alloc_obj_ctx_args *args)
246 {
247         int ret;
248
249         BUG_ON(!channel_op(ctx->ch).alloc_obj);
250         nvhost_module_busy(ctx->ch->dev);
251         ret = channel_op(ctx->ch).alloc_obj(ctx->hwctx, args);
252         nvhost_module_idle(ctx->ch->dev);
253         return ret;
254 }
255
256 static int nvhost_ioctl_channel_free_obj_ctx(
257         struct nvhost_channel_userctx *ctx,
258         struct nvhost_free_obj_ctx_args *args)
259 {
260         int ret;
261
262         BUG_ON(!channel_op(ctx->ch).free_obj);
263         nvhost_module_busy(ctx->ch->dev);
264         ret = channel_op(ctx->ch).free_obj(ctx->hwctx, args);
265         nvhost_module_idle(ctx->ch->dev);
266         return ret;
267 }
268
269 static int nvhost_ioctl_channel_alloc_gpfifo(
270         struct nvhost_channel_userctx *ctx,
271         struct nvhost_alloc_gpfifo_args *args)
272 {
273         int ret;
274
275         BUG_ON(!channel_op(ctx->ch).alloc_gpfifo);
276         nvhost_module_busy(ctx->ch->dev);
277         ret = channel_op(ctx->ch).alloc_gpfifo(ctx->hwctx, args);
278         nvhost_module_idle(ctx->ch->dev);
279         return ret;
280 }
281
282 static int nvhost_ioctl_channel_submit_gpfifo(
283         struct nvhost_channel_userctx *ctx,
284         struct nvhost_submit_gpfifo_args *args)
285 {
286         void *gpfifo;
287         u32 size;
288         int ret = 0;
289
290         if (!ctx->hwctx || ctx->hwctx->has_timedout)
291                 return -ETIMEDOUT;
292
293         size = args->num_entries * sizeof(struct nvhost_gpfifo);
294
295         gpfifo = kzalloc(size, GFP_KERNEL);
296         if (!gpfifo)
297                 return -ENOMEM;
298
299         if (copy_from_user(gpfifo,
300                            (void __user *)(uintptr_t)args->gpfifo, size)) {
301                 ret = -EINVAL;
302                 goto clean_up;
303         }
304
305         BUG_ON(!channel_op(ctx->ch).submit_gpfifo);
306
307         nvhost_module_busy(ctx->ch->dev);
308         ret = channel_op(ctx->ch).submit_gpfifo(ctx->hwctx, gpfifo,
309                         args->num_entries, &args->fence, args->flags);
310         nvhost_module_idle(ctx->ch->dev);
311 clean_up:
312         kfree(gpfifo);
313         return ret;
314 }
315
316 static int nvhost_ioctl_channel_wait(
317         struct nvhost_channel_userctx *ctx,
318         struct nvhost_wait_args *args)
319 {
320         int ret;
321
322         BUG_ON(!channel_op(ctx->ch).wait);
323         nvhost_module_busy(ctx->ch->dev);
324         ret = channel_op(ctx->ch).wait(ctx->hwctx, args);
325         nvhost_module_idle(ctx->ch->dev);
326         return ret;
327 }
328
329 static int nvhost_ioctl_channel_zcull_bind(
330         struct nvhost_channel_userctx *ctx,
331         struct nvhost_zcull_bind_args *args)
332 {
333         int ret;
334
335         BUG_ON(!channel_zcull_op(ctx->ch).bind);
336         nvhost_module_busy(ctx->ch->dev);
337         ret = channel_zcull_op(ctx->ch).bind(ctx->hwctx, args);
338         nvhost_module_idle(ctx->ch->dev);
339         return ret;
340 }
341
342 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
343                 struct nvhost_submit_args *args)
344 {
345         struct nvhost_job *job;
346         int num_cmdbufs = args->num_cmdbufs;
347         int num_relocs = args->num_relocs;
348         int num_waitchks = args->num_waitchks;
349         int num_syncpt_incrs = args->num_syncpt_incrs;
350         struct nvhost_cmdbuf __user *cmdbufs =
351                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
352         struct nvhost_reloc __user *relocs =
353                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
354         struct nvhost_reloc_shift __user *reloc_shifts =
355                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
356         struct nvhost_waitchk __user *waitchks =
357                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
358         struct nvhost_syncpt_incr __user *syncpt_incrs =
359                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
360         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
361         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
362         u32 __user *class_ids = (u32 *)(uintptr_t)args->class_ids;
363
364         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
365         u32 *local_waitbases = NULL, *local_class_ids = NULL;
366         int err, i, hwctx_syncpt_idx = -1;
367
368         if (num_syncpt_incrs > host->info.nb_pts)
369                 return -EINVAL;
370
371         job = nvhost_job_alloc(ctx->ch,
372                         ctx->hwctx,
373                         num_cmdbufs,
374                         num_relocs,
375                         num_waitchks,
376                         num_syncpt_incrs,
377                         ctx->memmgr);
378         if (!job)
379                 return -ENOMEM;
380
381         job->num_relocs = args->num_relocs;
382         job->num_waitchk = args->num_waitchks;
383         job->num_syncpts = args->num_syncpt_incrs;
384         job->priority = ctx->priority;
385         job->clientid = ctx->clientid;
386
387         /* mass copy class_ids */
388         if (args->class_ids) {
389                 local_class_ids = kzalloc(sizeof(u32) * num_cmdbufs,
390                         GFP_KERNEL);
391                 if (!local_class_ids) {
392                         err = -ENOMEM;
393                         goto fail;
394                 }
395                 err = copy_from_user(local_class_ids, class_ids,
396                         sizeof(u32) * num_cmdbufs);
397                 if (err) {
398                         err = -EINVAL;
399                         goto fail;
400                 }
401         }
402
403         for (i = 0; i < num_cmdbufs; ++i) {
404                 struct nvhost_cmdbuf cmdbuf;
405                 u32 class_id = class_ids ? local_class_ids[i] : 0;
406
407                 err = copy_from_user(&cmdbuf, cmdbufs + i, sizeof(cmdbuf));
408                 if (err)
409                         goto fail;
410
411                 nvhost_job_add_gather(job, cmdbuf.mem, cmdbuf.words,
412                                 cmdbuf.offset, class_id);
413         }
414
415         kfree(local_class_ids);
416         local_class_ids = NULL;
417
418         err = copy_from_user(job->relocarray,
419                         relocs, sizeof(*relocs) * num_relocs);
420         if (err)
421                 goto fail;
422
423         err = copy_from_user(job->relocshiftarray,
424                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
425         if (err)
426                 goto fail;
427
428         err = copy_from_user(job->waitchk,
429                         waitchks, sizeof(*waitchks) * num_waitchks);
430         if (err)
431                 goto fail;
432
433         /* mass copy waitbases */
434         if (args->waitbases) {
435                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
436                         GFP_KERNEL);
437                 if (!local_waitbases) {
438                         err = -ENOMEM;
439                         goto fail;
440                 }
441
442                 err = copy_from_user(local_waitbases, waitbases,
443                         sizeof(u32) * num_syncpt_incrs);
444                 if (err) {
445                         err = -EINVAL;
446                         goto fail;
447                 }
448         }
449
450         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
451         if (!ctx->hwctx)
452                 hwctx_syncpt_idx = 0;
453
454         /*
455          * Go through each syncpoint from userspace. Here we:
456          * - Copy syncpoint information
457          * - Validate each syncpoint
458          * - Determine waitbase for each syncpoint
459          * - Determine the index of hwctx syncpoint in the table
460          */
461
462         for (i = 0; i < num_syncpt_incrs; ++i) {
463                 u32 waitbase;
464                 struct nvhost_syncpt_incr sp;
465
466                 /* Copy */
467                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
468                 if (err)
469                         goto fail;
470
471                 /* Validate */
472                 if (sp.syncpt_id > host->info.nb_pts) {
473                         err = -EINVAL;
474                         goto fail;
475                 }
476
477                 /* Determine waitbase */
478                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
479                         waitbase = local_waitbases[i];
480                 else
481                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
482                                 sp.syncpt_id);
483
484                 /* Store */
485                 job->sp[i].id = sp.syncpt_id;
486                 job->sp[i].incrs = sp.syncpt_incrs;
487                 job->sp[i].waitbase = waitbase;
488
489                 /* Find hwctx syncpoint */
490                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
491                         hwctx_syncpt_idx = i;
492         }
493
494         /* not needed anymore */
495         kfree(local_waitbases);
496         local_waitbases = NULL;
497
498         /* Is hwctx_syncpt_idx valid? */
499         if (hwctx_syncpt_idx == -1) {
500                 err = -EINVAL;
501                 goto fail;
502         }
503
504         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
505
506         trace_nvhost_channel_submit(ctx->ch->dev->name,
507                 job->num_gathers, job->num_relocs, job->num_waitchk,
508                 job->sp[job->hwctx_syncpt_idx].id,
509                 job->sp[job->hwctx_syncpt_idx].incrs);
510
511         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
512         if (err)
513                 goto fail;
514
515         if (args->timeout)
516                 job->timeout = min(ctx->timeout, args->timeout);
517         else
518                 job->timeout = ctx->timeout;
519         job->timeout_debug_dump = ctx->timeout_debug_dump;
520
521         err = nvhost_channel_submit(job);
522         if (err)
523                 goto fail_submit;
524
525         /* Deliver multiple fences back to the userspace */
526         if (fences)
527                 for (i = 0; i < num_syncpt_incrs; ++i) {
528                         u32 fence = job->sp[i].fence;
529                         err = copy_to_user(fences, &fence, sizeof(u32));
530                         if (err)
531                                 break;
532                         fences++;
533                 }
534
535         /* Deliver the fence using the old mechanism _only_ if a single
536          * syncpoint is used. */
537
538         if (num_syncpt_incrs == 1)
539                 args->fence = job->sp[job->hwctx_syncpt_idx].fence;
540         else
541                 args->fence = 0;
542
543         nvhost_job_put(job);
544
545         return 0;
546
547 fail_submit:
548         nvhost_job_unpin(job);
549 fail:
550         nvhost_job_put(job);
551         kfree(local_class_ids);
552         kfree(local_waitbases);
553         return err;
554 }
555
556 static int nvhost_ioctl_channel_set_ctxswitch(
557                 struct nvhost_channel_userctx *ctx,
558                 struct nvhost_set_ctxswitch_args *args)
559 {
560         struct nvhost_cmdbuf cmdbuf_save;
561         struct nvhost_cmdbuf cmdbuf_restore;
562         struct nvhost_syncpt_incr save_incr, restore_incr;
563         u32 save_waitbase, restore_waitbase;
564         struct nvhost_reloc reloc;
565         struct nvhost_hwctx_handler *ctxhandler = NULL;
566         struct nvhost_hwctx *nhwctx = NULL;
567         struct user_hwctx *hwctx;
568         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
569         int err;
570
571         /* Only channels with context support */
572         if (!ctx->hwctx)
573                 return -EFAULT;
574
575         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
576         if (args->num_cmdbufs_save != 1
577                         || args->num_cmdbufs_restore != 1
578                         || args->num_save_incrs != 1
579                         || args->num_restore_incrs != 1
580                         || args->num_relocs != 1)
581                 return -EINVAL;
582
583         err = copy_from_user(&cmdbuf_save,
584                         (void *)(uintptr_t)args->cmdbuf_save,
585                         sizeof(cmdbuf_save));
586         if (err)
587                 goto fail;
588
589         err = copy_from_user(&cmdbuf_restore,
590                         (void *)(uintptr_t)args->cmdbuf_restore,
591                         sizeof(cmdbuf_restore));
592         if (err)
593                 goto fail;
594
595         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
596                         sizeof(reloc));
597         if (err)
598                 goto fail;
599
600         err = copy_from_user(&save_incr,
601                         (void *)(uintptr_t)args->save_incrs,
602                         sizeof(save_incr));
603         if (err)
604                 goto fail;
605         err = copy_from_user(&save_waitbase,
606                         (void *)(uintptr_t)args->save_waitbases,
607                         sizeof(save_waitbase));
608
609         err = copy_from_user(&restore_incr,
610                         (void *)(uintptr_t)args->restore_incrs,
611                         sizeof(restore_incr));
612         if (err)
613                 goto fail;
614         err = copy_from_user(&restore_waitbase,
615                         (void *)(uintptr_t)args->restore_waitbases,
616                         sizeof(restore_waitbase));
617
618         if (save_incr.syncpt_id != pdata->syncpts[0]
619                         || restore_incr.syncpt_id != pdata->syncpts[0]
620                         || save_waitbase != pdata->waitbases[0]
621                         || restore_waitbase != pdata->waitbases[0]) {
622                 err = -EINVAL;
623                 goto fail;
624         }
625         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
626                         save_waitbase, ctx->ch);
627         if (!ctxhandler) {
628                 err = -ENOMEM;
629                 goto fail;
630         }
631
632         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
633         if (!nhwctx) {
634                 err = -ENOMEM;
635                 goto fail_hwctx;
636         }
637         hwctx = to_user_hwctx(nhwctx);
638
639         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
640                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
641                         cmdbuf_restore.mem, cmdbuf_restore.offset,
642                         cmdbuf_restore.words,
643                         pdata->syncpts[0], pdata->waitbases[0],
644                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
645
646         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
647         if (!nhwctx->memmgr)
648                 goto fail_set_restore;
649
650         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
651                         cmdbuf_restore.offset, cmdbuf_restore.words);
652         if (err)
653                 goto fail_set_restore;
654
655         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
656                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
657         if (err)
658                 goto fail_set_save;
659
660         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
661         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
662
663         /* Free old context */
664         ctx->hwctx->h->put(ctx->hwctx);
665         ctx->hwctx = nhwctx;
666
667         return 0;
668
669 fail_set_save:
670 fail_set_restore:
671         ctxhandler->put(&hwctx->hwctx);
672 fail_hwctx:
673         user_ctxhandler_free(ctxhandler);
674 fail:
675         return err;
676 }
677
678 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
679 static int nvhost_ioctl_channel_cycle_stats(
680         struct nvhost_channel_userctx *ctx,
681         struct nvhost_cycle_stats_args *args)
682 {
683         int ret;
684         BUG_ON(!channel_op(ctx->ch).cycle_stats);
685         ret = channel_op(ctx->ch).cycle_stats(ctx->hwctx, args);
686         return ret;
687 }
688 #endif
689
690 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
691         struct nvhost_read_3d_reg_args *args)
692 {
693         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
694                         args->offset, &args->value);
695 }
696
697 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
698 {
699         int i;
700         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
701
702         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
703                 if (pdata->clocks[i].moduleid == moduleid)
704                         return i;
705         }
706
707         /* Old user space is sending a random number in args. Return clock
708          * zero in these cases. */
709         return 0;
710 }
711
712 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
713         struct nvhost_clk_rate_args *arg)
714 {
715         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
716                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
717         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
718                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
719         int index = moduleid ?
720                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
721
722         return nvhost_module_set_rate(ctx->ch->dev,
723                         ctx, arg->rate, index, attr);
724 }
725
726 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
727         u32 moduleid, u32 *rate)
728 {
729         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
730
731         return nvhost_module_get_rate(ctx->ch->dev,
732                         (unsigned long *)rate, index);
733 }
734
735 static int nvhost_ioctl_channel_module_regrdwr(
736         struct nvhost_channel_userctx *ctx,
737         struct nvhost_ctrl_module_regrdwr_args *args)
738 {
739         u32 num_offsets = args->num_offsets;
740         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
741         u32 __user *values = (u32 *)(uintptr_t)args->values;
742         u32 vals[64];
743         struct platform_device *ndev;
744
745         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
746                 args->num_offsets, args->write);
747
748         /* Check that there is something to read and that block size is
749          * u32 aligned */
750         if (num_offsets == 0 || args->block_size & 3)
751                 return -EINVAL;
752
753         ndev = ctx->ch->dev;
754
755         while (num_offsets--) {
756                 int err;
757                 u32 offs;
758                 int remaining = args->block_size >> 2;
759
760                 if (get_user(offs, offsets))
761                         return -EFAULT;
762
763                 offsets++;
764                 while (remaining) {
765                         int batch = min(remaining, 64);
766                         if (args->write) {
767                                 if (copy_from_user(vals, values,
768                                                 batch * sizeof(u32)))
769                                         return -EFAULT;
770
771                                 err = nvhost_write_module_regs(ndev,
772                                         offs, batch, vals);
773                                 if (err)
774                                         return err;
775                         } else {
776                                 err = nvhost_read_module_regs(ndev,
777                                                 offs, batch, vals);
778                                 if (err)
779                                         return err;
780
781                                 if (copy_to_user(values, vals,
782                                                 batch * sizeof(u32)))
783                                         return -EFAULT;
784                         }
785
786                         remaining -= batch;
787                         offs += batch * sizeof(u32);
788                         values += batch;
789                 }
790         }
791
792         return 0;
793 }
794
795 static u32 create_mask(u32 *words, int num)
796 {
797         int i;
798         u32 word = 0;
799         for (i = 0; i < num && words[i] && words[i] < BITS_PER_LONG; i++)
800                 word |= BIT(words[i]);
801
802         return word;
803 }
804
805 static long nvhost_channelctl(struct file *filp,
806         unsigned int cmd, unsigned long arg)
807 {
808         struct nvhost_channel_userctx *priv = filp->private_data;
809         struct device *dev = &priv->ch->dev->dev;
810         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
811         int err = 0;
812
813         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
814                 (_IOC_NR(cmd) == 0) ||
815                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
816                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
817                 return -EFAULT;
818
819         if (_IOC_DIR(cmd) & _IOC_WRITE) {
820                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
821                         return -EFAULT;
822         }
823
824         switch (cmd) {
825         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
826         {
827                 struct nvhost_device_data *pdata = \
828                         platform_get_drvdata(priv->ch->dev);
829                 ((struct nvhost_get_param_args *)buf)->value =
830                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
831                 break;
832         }
833         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
834         {
835                 struct nvhost_device_data *pdata = \
836                         platform_get_drvdata(priv->ch->dev);
837                 struct nvhost_get_param_arg *arg =
838                         (struct nvhost_get_param_arg *)buf;
839                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
840                                 || !pdata->syncpts[arg->param])
841                         return -EINVAL;
842                 arg->value = pdata->syncpts[arg->param];
843                 break;
844         }
845         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
846         {
847                 struct nvhost_device_data *pdata = \
848                         platform_get_drvdata(priv->ch->dev);
849                 ((struct nvhost_get_param_args *)buf)->value =
850                         create_mask(pdata->waitbases,
851                                         NVHOST_MODULE_MAX_WAITBASES);
852                 break;
853         }
854         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
855         {
856                 struct nvhost_device_data *pdata = \
857                         platform_get_drvdata(priv->ch->dev);
858                 struct nvhost_get_param_arg *arg =
859                         (struct nvhost_get_param_arg *)buf;
860                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
861                                 || !pdata->waitbases[arg->param])
862                         return -EINVAL;
863                 arg->value = pdata->waitbases[arg->param];
864                 break;
865         }
866         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
867         {
868                 struct nvhost_device_data *pdata = \
869                         platform_get_drvdata(priv->ch->dev);
870                 ((struct nvhost_get_param_args *)buf)->value =
871                         create_mask(pdata->modulemutexes,
872                                         NVHOST_MODULE_MAX_MODMUTEXES);
873                 break;
874         }
875         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
876         {
877                 struct nvhost_device_data *pdata = \
878                         platform_get_drvdata(priv->ch->dev);
879                 struct nvhost_get_param_arg *arg =
880                         (struct nvhost_get_param_arg *)buf;
881                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
882                                 || !pdata->modulemutexes[arg->param])
883                         return -EINVAL;
884                 arg->value = pdata->modulemutexes[arg->param];
885                 break;
886         }
887         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
888         {
889                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
890                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
891
892                 if (IS_ERR(new_client)) {
893                         err = PTR_ERR(new_client);
894                         break;
895                 }
896                 if (priv->memmgr)
897                         nvhost_memmgr_put_mgr(priv->memmgr);
898
899                 priv->memmgr = new_client;
900
901                 if (priv->hwctx)
902                         priv->hwctx->memmgr = new_client;
903
904                 break;
905         }
906         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
907                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
908                 break;
909         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
910                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
911                 break;
912         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
913                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
914                 break;
915         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
916                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
917                 break;
918         case NVHOST_IOCTL_CHANNEL_WAIT:
919                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
920                 break;
921         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
922                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
923                 break;
924 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
925         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
926                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
927                 break;
928 #endif
929         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
930                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
931                 break;
932         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
933         {
934                 struct nvhost_clk_rate_args *arg =
935                                 (struct nvhost_clk_rate_args *)buf;
936
937                 err = nvhost_ioctl_channel_get_rate(priv,
938                                 arg->moduleid, &arg->rate);
939                 break;
940         }
941         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
942         {
943                 struct nvhost_clk_rate_args *arg =
944                                 (struct nvhost_clk_rate_args *)buf;
945
946                 err = nvhost_ioctl_channel_set_rate(priv, arg);
947                 break;
948         }
949         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
950                 priv->timeout =
951                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
952                 dev_dbg(&priv->ch->dev->dev,
953                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
954                         __func__, priv->timeout, priv);
955                 break;
956         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
957                 ((struct nvhost_get_param_args *)buf)->value =
958                                 priv->hwctx->has_timedout;
959                 break;
960         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
961                 priv->priority =
962                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
963                 break;
964         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
965                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
966                 break;
967         case NVHOST_IOCTL_CHANNEL_SUBMIT:
968                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
969                 break;
970         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
971                 priv->timeout = (u32)
972                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
973                 priv->timeout_debug_dump = !((u32)
974                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
975                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
976                 dev_dbg(&priv->ch->dev->dev,
977                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
978                         __func__, priv->timeout, priv);
979                 break;
980         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
981                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
982                 break;
983         default:
984                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
985                 err = -ENOTTY;
986                 break;
987         }
988
989         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
990                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
991
992         return err;
993 }
994
995 static const struct file_operations nvhost_channelops = {
996         .owner = THIS_MODULE,
997         .release = nvhost_channelrelease,
998         .open = nvhost_channelopen,
999 #ifdef CONFIG_COMPAT
1000         .compat_ioctl = nvhost_channelctl,
1001 #endif
1002         .unlocked_ioctl = nvhost_channelctl
1003 };
1004
1005 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
1006 {
1007         struct nvhost_channel_userctx *userctx;
1008         struct file *f = fget(fd);
1009         if (!f)
1010                 return 0;
1011
1012         if (f->f_op != &nvhost_channelops) {
1013                 fput(f);
1014                 return 0;
1015         }
1016
1017         userctx = (struct nvhost_channel_userctx *)f->private_data;
1018         fput(f);
1019         return userctx->hwctx;
1020 }
1021
1022
1023 static const struct file_operations nvhost_asops = {
1024         .owner = THIS_MODULE,
1025         .release = nvhost_as_dev_release,
1026         .open = nvhost_as_dev_open,
1027 #ifdef CONFIG_COMPAT
1028         .compat_ioctl = nvhost_as_dev_ctl,
1029 #endif
1030         .unlocked_ioctl = nvhost_as_dev_ctl,
1031 };
1032
1033 static struct {
1034         int class_id;
1035         const char *dev_name;
1036 } class_id_dev_name_map[] = {
1037         /*      { NV_HOST1X_CLASS_ID, ""}, */
1038         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1039         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1040         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1041         { NV_GRAPHICS_GPU_CLASS_ID, "gpu"},
1042         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1043         { NV_TSEC_CLASS_ID, "tsec" },
1044 };
1045
1046 static struct {
1047         int module_id;
1048         const char *dev_name;
1049 } module_id_dev_name_map[] = {
1050         { NVHOST_MODULE_VI, "vi"},
1051         { NVHOST_MODULE_ISP, "isp"},
1052         { NVHOST_MODULE_MPE, "mpe"},
1053         { NVHOST_MODULE_MSENC, "msenc"},
1054         { NVHOST_MODULE_TSEC, "tsec"},
1055         { NVHOST_MODULE_GPU, "gpu"},
1056         { NVHOST_MODULE_VIC, "vic"},
1057 };
1058
1059 static const char *get_device_name_for_dev(struct platform_device *dev)
1060 {
1061         int i;
1062         /* first choice is to use the class id if specified */
1063         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1064                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1065                 if (pdata->class == class_id_dev_name_map[i].class_id)
1066                         return class_id_dev_name_map[i].dev_name;
1067         }
1068
1069         /* second choice is module name if specified */
1070         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1071                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1072                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1073                         return module_id_dev_name_map[i].dev_name;
1074         }
1075
1076         /* last choice is to just use the given dev name */
1077         return dev->name;
1078 }
1079
1080 static struct device *nvhost_client_device_create(
1081         struct platform_device *pdev, struct cdev *cdev,
1082         const char *cdev_name, int devno,
1083         const struct file_operations *ops)
1084 {
1085         struct nvhost_master *host = nvhost_get_host(pdev);
1086         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1087         const char *use_dev_name;
1088         struct device *dev;
1089         int err;
1090
1091         nvhost_dbg_fn("");
1092
1093         BUG_ON(!host);
1094
1095         cdev_init(cdev, ops);
1096         cdev->owner = THIS_MODULE;
1097
1098         err = cdev_add(cdev, devno, 1);
1099         if (err < 0) {
1100                 dev_err(&pdev->dev,
1101                         "failed to add chan %i cdev\n", pdata->index);
1102                 return NULL;
1103         }
1104         use_dev_name = get_device_name_for_dev(pdev);
1105
1106         dev = device_create(host->nvhost_class,
1107                         NULL, devno, NULL,
1108                         (pdev->id <= 0) ?
1109                         IFACE_NAME "-%s%s" :
1110                         IFACE_NAME "-%s%s.%d",
1111                         cdev_name, use_dev_name, pdev->id);
1112
1113         if (IS_ERR(dev)) {
1114                 err = PTR_ERR(dev);
1115                 dev_err(&pdev->dev,
1116                         "failed to create %s %s device for %s\n",
1117                         use_dev_name, cdev_name, pdev->name);
1118                 return NULL;
1119         }
1120
1121         return dev;
1122 }
1123
1124 int nvhost_client_user_init(struct platform_device *dev)
1125 {
1126         int err, devno;
1127         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1128         struct nvhost_channel *ch = pdata->channel;
1129
1130         BUG_ON(!ch);
1131         /* reserve 4 minor #s for <dev> and as-<dev>, ctrl-<dev>
1132          * and dbg-<dev> */
1133
1134         err = alloc_chrdev_region(&devno, 0, 4, IFACE_NAME);
1135         if (err < 0) {
1136                 dev_err(&dev->dev, "failed to allocate devno\n");
1137                 goto fail;
1138         }
1139
1140         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1141                                 "", devno, &nvhost_channelops);
1142         if (ch->node == NULL)
1143                 goto fail;
1144         ++devno;
1145         ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1146                                 "as-", devno, &nvhost_asops);
1147         if (ch->as_node == NULL)
1148                 goto fail;
1149
1150         if (pdata->ctrl_ops) {
1151                 ++devno;
1152                 pdata->ctrl_node = nvhost_client_device_create(dev,
1153                                         &pdata->ctrl_cdev, "ctrl-",
1154                                         devno, pdata->ctrl_ops);
1155                 if (pdata->ctrl_node == NULL)
1156                         goto fail;
1157         }
1158
1159         if (pdata->dbg_ops) {
1160                 ++devno;
1161                 pdata->dbg_node = nvhost_client_device_create(dev,
1162                                         &pdata->dbg_cdev, "dbg-",
1163                                         devno, pdata->dbg_ops);
1164                 if (pdata->dbg_node == NULL)
1165                         goto fail;
1166         }
1167
1168
1169         return 0;
1170 fail:
1171         return err;
1172 }
1173
1174 int nvhost_client_device_init(struct platform_device *dev)
1175 {
1176         int err;
1177         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1178         struct nvhost_channel *ch;
1179         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1180
1181         ch = nvhost_alloc_channel(dev);
1182         if (ch == NULL)
1183                 return -ENODEV;
1184
1185         /* store the pointer to this device for channel */
1186         ch->dev = dev;
1187
1188         /* Create debugfs directory for the device */
1189         nvhost_device_debug_init(dev);
1190
1191         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1192         if (err)
1193                 goto fail;
1194
1195         err = nvhost_client_user_init(dev);
1196         if (err)
1197                 goto fail;
1198
1199         if (tickctrl_op().init_channel)
1200                 tickctrl_op().init_channel(dev);
1201
1202         err = nvhost_device_list_add(dev);
1203         if (err)
1204                 goto fail;
1205
1206         if (pdata->scaling_init)
1207                 pdata->scaling_init(dev);
1208
1209         /* reset syncpoint values for this unit */
1210         nvhost_module_busy(nvhost_master->dev);
1211         nvhost_syncpt_reset_client(dev);
1212         nvhost_module_idle(nvhost_master->dev);
1213
1214         /* Initialize dma parameters */
1215         dev->dev.dma_parms = &pdata->dma_parms;
1216         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1217
1218         dev_info(&dev->dev, "initialized\n");
1219
1220         if (pdata->slave) {
1221                 pdata->slave->dev.parent = dev->dev.parent;
1222                 platform_device_register(pdata->slave);
1223         }
1224
1225         return 0;
1226
1227 fail:
1228         /* Add clean-up */
1229         nvhost_free_channel(ch);
1230         return err;
1231 }
1232 EXPORT_SYMBOL(nvhost_client_device_init);
1233
1234 int nvhost_client_device_release(struct platform_device *dev)
1235 {
1236         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1237         struct nvhost_channel *ch;
1238         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1239
1240         ch = pdata->channel;
1241
1242         /* Release nvhost module resources */
1243         nvhost_module_deinit(dev);
1244
1245         /* Remove from nvhost device list */
1246         nvhost_device_list_remove(dev);
1247
1248         /* Release chardev and device node for user space */
1249         device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1250         cdev_del(&ch->cdev);
1251
1252         /* Free nvhost channel */
1253         nvhost_free_channel(ch);
1254
1255         return 0;
1256 }
1257 EXPORT_SYMBOL(nvhost_client_device_release);
1258
1259 int nvhost_client_device_suspend(struct device *dev)
1260 {
1261         int ret = 0;
1262         struct nvhost_device_data *pdata = dev_get_drvdata(dev);
1263
1264         ret = nvhost_module_suspend(dev);
1265         if (ret)
1266                 return ret;
1267
1268         ret = nvhost_channel_suspend(pdata->channel);
1269         if (ret)
1270                 return ret;
1271
1272         dev_info(dev, "suspend status: %d\n", ret);
1273
1274         return ret;
1275 }
1276 EXPORT_SYMBOL(nvhost_client_device_suspend);
1277
1278 int nvhost_client_device_resume(struct device *dev)
1279 {
1280         nvhost_module_resume(dev);
1281         dev_info(dev, "resuming\n");
1282         return 0;
1283 }
1284 EXPORT_SYMBOL(nvhost_client_device_resume);
1285
1286 int nvhost_client_device_get_resources(struct platform_device *dev)
1287 {
1288         int i;
1289         void __iomem *regs = NULL;
1290         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1291
1292         for (i = 0; i < dev->num_resources; i++) {
1293                 struct resource *r = NULL;
1294
1295                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1296                 /* We've run out of mem resources */
1297                 if (!r)
1298                         break;
1299
1300                 regs = devm_request_and_ioremap(&dev->dev, r);
1301                 if (!regs)
1302                         goto fail;
1303
1304                 pdata->aperture[i] = regs;
1305         }
1306
1307         return 0;
1308
1309 fail:
1310         dev_err(&dev->dev, "failed to get register memory\n");
1311
1312         return -ENXIO;
1313 }
1314 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1315
1316 /* This is a simple wrapper around request_firmware that takes
1317  * 'fw_name' and if available applies a SOC relative path prefix to it.
1318  * The caller is responsible for calling release_firmware later.
1319  */
1320 const struct firmware *
1321 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1322 {
1323         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1324         const struct firmware *fw;
1325         char *fw_path = NULL;
1326         int path_len, err;
1327
1328         /* This field is NULL when calling from SYS_EXIT.
1329            Add a check here to prevent crash in request_firmware */
1330         if (!current->fs) {
1331                 BUG();
1332                 return NULL;
1333         }
1334
1335         if (!fw_name)
1336                 return NULL;
1337
1338         if (op->soc_name) {
1339                 path_len = strlen(fw_name) + strlen(op->soc_name);
1340                 path_len += 2; /* for the path separator and zero terminator*/
1341
1342                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1343                                      GFP_KERNEL);
1344                 if (!fw_path)
1345                         return NULL;
1346
1347                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1348                 fw_name = fw_path;
1349         }
1350
1351         err = request_firmware(&fw, fw_name, &dev->dev);
1352         kfree(fw_path);
1353         if (err) {
1354                 dev_err(&dev->dev, "failed to get firmware\n");
1355                 return NULL;
1356         }
1357
1358         /* note: caller must release_firmware */
1359         return fw;
1360 }