video: tegra: host: module debugger framework
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * Tegra Graphics Host Client Module
3  *
4  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/slab.h>
20 #include <linux/string.h>
21 #include <linux/spinlock.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/export.h>
29 #include <linux/firmware.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/tegra-soc.h>
32
33 #include <trace/events/nvhost.h>
34
35 #include <linux/io.h>
36 #include <linux/string.h>
37
38 #include <linux/nvhost.h>
39 #include <linux/nvhost_ioctl.h>
40
41 #include <mach/gpufuse.h>
42
43 #include "debug.h"
44 #include "bus_client.h"
45 #include "dev.h"
46 #include "class_ids.h"
47 #include "nvhost_as.h"
48 #include "nvhost_memmgr.h"
49 #include "chip_support.h"
50 #include "nvhost_acm.h"
51
52 #include "nvhost_syncpt.h"
53 #include "nvhost_channel.h"
54 #include "nvhost_job.h"
55 #include "nvhost_hwctx.h"
56 #include "user_hwctx.h"
57
58 static int validate_reg(struct platform_device *ndev, u32 offset, int count)
59 {
60         int err = 0;
61         struct resource *r;
62         struct nvhost_device_data *pdata = platform_get_drvdata(ndev);
63
64         r = platform_get_resource(pdata->master ? pdata->master : ndev,
65                         IORESOURCE_MEM, 0);
66         if (!r) {
67                 dev_err(&ndev->dev, "failed to get memory resource\n");
68                 return -ENODEV;
69         }
70
71         if (offset + 4 * count > resource_size(r)
72                         || (offset + 4 * count < offset))
73                 err = -EPERM;
74
75         return err;
76 }
77
78 static __iomem void *get_aperture(struct platform_device *pdev)
79 {
80         struct nvhost_device_data *pdata = platform_get_drvdata(pdev);
81
82         if (pdata->master)
83                 pdata = platform_get_drvdata(pdata->master);
84
85         return pdata->aperture[0];
86 }
87
88 int nvhost_read_module_regs(struct platform_device *ndev,
89                         u32 offset, int count, u32 *values)
90 {
91         void __iomem *p = get_aperture(ndev);
92         int err;
93
94         if (!p)
95                 return -ENODEV;
96
97         /* verify offset */
98         err = validate_reg(ndev, offset, count);
99         if (err)
100                 return err;
101
102         nvhost_module_busy(ndev);
103         p += offset;
104         while (count--) {
105                 *(values++) = readl(p);
106                 p += 4;
107         }
108         rmb();
109         nvhost_module_idle(ndev);
110
111         return 0;
112 }
113
114 int nvhost_write_module_regs(struct platform_device *ndev,
115                         u32 offset, int count, const u32 *values)
116 {
117         int err;
118         void __iomem *p = get_aperture(ndev);
119
120         if (!p)
121                 return -ENODEV;
122
123         /* verify offset */
124         err = validate_reg(ndev, offset, count);
125         if (err)
126                 return err;
127
128         nvhost_module_busy(ndev);
129         p += offset;
130         while (count--) {
131                 writel(*(values++), p);
132                 p += 4;
133         }
134         wmb();
135         nvhost_module_idle(ndev);
136
137         return 0;
138 }
139
140 bool nvhost_client_can_writel(struct platform_device *pdev)
141 {
142         return !!get_aperture(pdev);
143 }
144 EXPORT_SYMBOL(nvhost_client_can_writel);
145
146 void nvhost_client_writel(struct platform_device *pdev, u32 val, u32 reg)
147 {
148         writel(val, get_aperture(pdev) + reg * 4);
149 }
150
151 u32 nvhost_client_readl(struct platform_device *pdev, u32 reg)
152 {
153         return readl(get_aperture(pdev) + reg * 4);
154 }
155
156 struct nvhost_channel_userctx {
157         struct nvhost_channel *ch;
158         struct nvhost_hwctx *hwctx;
159         struct nvhost_job *job;
160         struct mem_mgr *memmgr;
161         u32 timeout;
162         u32 priority;
163         int clientid;
164         bool timeout_debug_dump;
165 };
166
167 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
168 {
169         struct nvhost_channel_userctx *priv = filp->private_data;
170
171         trace_nvhost_channel_release(dev_name(&priv->ch->dev->dev));
172
173         filp->private_data = NULL;
174
175         nvhost_module_remove_client(priv->ch->dev, priv);
176
177         if (priv->hwctx) {
178                 struct nvhost_channel *ch = priv->ch;
179                 struct nvhost_hwctx *ctx = priv->hwctx;
180
181                 mutex_lock(&ch->submitlock);
182                 if (ch->cur_ctx == ctx)
183                         ch->cur_ctx = NULL;
184                 mutex_unlock(&ch->submitlock);
185
186                 priv->hwctx->h->put(priv->hwctx);
187         }
188
189         if (priv->job)
190                 nvhost_job_put(priv->job);
191
192         nvhost_putchannel(priv->ch);
193
194         nvhost_memmgr_put_mgr(priv->memmgr);
195         kfree(priv);
196         return 0;
197 }
198
199 static int nvhost_channelopen(struct inode *inode, struct file *filp)
200 {
201         struct nvhost_channel_userctx *priv;
202         struct nvhost_channel *ch;
203         struct nvhost_device_data *pdata;
204
205         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
206         ch = nvhost_getchannel(ch, false);
207         if (!ch)
208                 return -ENOMEM;
209         trace_nvhost_channel_open(dev_name(&ch->dev->dev));
210
211         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
212         if (!priv) {
213                 nvhost_putchannel(ch);
214                 return -ENOMEM;
215         }
216         filp->private_data = priv;
217         priv->ch = ch;
218         if (nvhost_module_add_client(ch->dev, priv))
219                 goto fail;
220
221         if (ch->ctxhandler && ch->ctxhandler->alloc) {
222                 nvhost_module_busy(ch->dev);
223                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
224                 nvhost_module_idle(ch->dev);
225                 if (!priv->hwctx)
226                         goto fail;
227         }
228         priv->priority = NVHOST_PRIORITY_MEDIUM;
229         priv->clientid = atomic_add_return(1,
230                         &nvhost_get_host(ch->dev)->clientid);
231         pdata = platform_get_drvdata(ch->dev);
232         priv->timeout = pdata->nvhost_timeout_default;
233         priv->timeout_debug_dump = true;
234         if (!tegra_platform_is_silicon())
235                 priv->timeout = 0;
236
237         return 0;
238 fail:
239         nvhost_channelrelease(inode, filp);
240         return -ENOMEM;
241 }
242
243 static int nvhost_ioctl_channel_alloc_obj_ctx(
244         struct nvhost_channel_userctx *ctx,
245         struct nvhost_alloc_obj_ctx_args *args)
246 {
247         int ret;
248
249         BUG_ON(!channel_op(ctx->ch).alloc_obj);
250         nvhost_module_busy(ctx->ch->dev);
251         ret = channel_op(ctx->ch).alloc_obj(ctx->hwctx, args);
252         nvhost_module_idle(ctx->ch->dev);
253         return ret;
254 }
255
256 static int nvhost_ioctl_channel_free_obj_ctx(
257         struct nvhost_channel_userctx *ctx,
258         struct nvhost_free_obj_ctx_args *args)
259 {
260         int ret;
261
262         BUG_ON(!channel_op(ctx->ch).free_obj);
263         nvhost_module_busy(ctx->ch->dev);
264         ret = channel_op(ctx->ch).free_obj(ctx->hwctx, args);
265         nvhost_module_idle(ctx->ch->dev);
266         return ret;
267 }
268
269 static int nvhost_ioctl_channel_alloc_gpfifo(
270         struct nvhost_channel_userctx *ctx,
271         struct nvhost_alloc_gpfifo_args *args)
272 {
273         int ret;
274
275         BUG_ON(!channel_op(ctx->ch).alloc_gpfifo);
276         nvhost_module_busy(ctx->ch->dev);
277         ret = channel_op(ctx->ch).alloc_gpfifo(ctx->hwctx, args);
278         nvhost_module_idle(ctx->ch->dev);
279         return ret;
280 }
281
282 static int nvhost_ioctl_channel_submit_gpfifo(
283         struct nvhost_channel_userctx *ctx,
284         struct nvhost_submit_gpfifo_args *args)
285 {
286         void *gpfifo;
287         u32 size;
288         int ret = 0;
289
290         if (!ctx->hwctx || ctx->hwctx->has_timedout)
291                 return -ETIMEDOUT;
292
293         size = args->num_entries * sizeof(struct nvhost_gpfifo);
294
295         gpfifo = kzalloc(size, GFP_KERNEL);
296         if (!gpfifo)
297                 return -ENOMEM;
298
299         if (copy_from_user(gpfifo,
300                            (void __user *)(uintptr_t)args->gpfifo, size)) {
301                 ret = -EINVAL;
302                 goto clean_up;
303         }
304
305         BUG_ON(!channel_op(ctx->ch).submit_gpfifo);
306
307         nvhost_module_busy(ctx->ch->dev);
308         ret = channel_op(ctx->ch).submit_gpfifo(ctx->hwctx, gpfifo,
309                         args->num_entries, &args->fence, args->flags);
310         nvhost_module_idle(ctx->ch->dev);
311 clean_up:
312         kfree(gpfifo);
313         return ret;
314 }
315
316 static int nvhost_ioctl_channel_wait(
317         struct nvhost_channel_userctx *ctx,
318         struct nvhost_wait_args *args)
319 {
320         int ret;
321
322         BUG_ON(!channel_op(ctx->ch).wait);
323         nvhost_module_busy(ctx->ch->dev);
324         ret = channel_op(ctx->ch).wait(ctx->hwctx, args);
325         nvhost_module_idle(ctx->ch->dev);
326         return ret;
327 }
328
329 static int nvhost_ioctl_channel_zcull_bind(
330         struct nvhost_channel_userctx *ctx,
331         struct nvhost_zcull_bind_args *args)
332 {
333         int ret;
334
335         BUG_ON(!channel_zcull_op(ctx->ch).bind);
336         nvhost_module_busy(ctx->ch->dev);
337         ret = channel_zcull_op(ctx->ch).bind(ctx->hwctx, args);
338         nvhost_module_idle(ctx->ch->dev);
339         return ret;
340 }
341
342 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
343                 struct nvhost_submit_args *args)
344 {
345         struct nvhost_job *job;
346         int num_cmdbufs = args->num_cmdbufs;
347         int num_relocs = args->num_relocs;
348         int num_waitchks = args->num_waitchks;
349         int num_syncpt_incrs = args->num_syncpt_incrs;
350         struct nvhost_cmdbuf __user *cmdbufs =
351                 (struct nvhost_cmdbuf *)(uintptr_t)args->cmdbufs;
352         struct nvhost_reloc __user *relocs =
353                 (struct nvhost_reloc *)(uintptr_t)args->relocs;
354         struct nvhost_reloc_shift __user *reloc_shifts =
355                 (struct nvhost_reloc_shift *)(uintptr_t)args->reloc_shifts;
356         struct nvhost_waitchk __user *waitchks =
357                 (struct nvhost_waitchk *)(uintptr_t)args->waitchks;
358         struct nvhost_syncpt_incr __user *syncpt_incrs =
359                 (struct nvhost_syncpt_incr *)(uintptr_t)args->syncpt_incrs;
360         u32 __user *waitbases = (u32 *)(uintptr_t)args->waitbases;
361         u32 __user *fences = (u32 *)(uintptr_t)args->fences;
362
363         struct nvhost_master *host = nvhost_get_host(ctx->ch->dev);
364         u32 *local_waitbases = NULL;
365         int err, i, hwctx_syncpt_idx = -1;
366
367         if (num_syncpt_incrs > host->info.nb_pts)
368                 return -EINVAL;
369
370         job = nvhost_job_alloc(ctx->ch,
371                         ctx->hwctx,
372                         num_cmdbufs,
373                         num_relocs,
374                         num_waitchks,
375                         num_syncpt_incrs,
376                         ctx->memmgr);
377         if (!job)
378                 return -ENOMEM;
379
380         job->num_relocs = args->num_relocs;
381         job->num_waitchk = args->num_waitchks;
382         job->num_syncpts = args->num_syncpt_incrs;
383         job->priority = ctx->priority;
384         job->clientid = ctx->clientid;
385
386         while (num_cmdbufs) {
387                 struct nvhost_cmdbuf cmdbuf;
388                 err = copy_from_user(&cmdbuf, cmdbufs, sizeof(cmdbuf));
389                 if (err)
390                         goto fail;
391                 nvhost_job_add_gather(job,
392                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
393                 num_cmdbufs--;
394                 cmdbufs++;
395         }
396
397         err = copy_from_user(job->relocarray,
398                         relocs, sizeof(*relocs) * num_relocs);
399         if (err)
400                 goto fail;
401
402         err = copy_from_user(job->relocshiftarray,
403                         reloc_shifts, sizeof(*reloc_shifts) * num_relocs);
404         if (err)
405                 goto fail;
406
407         err = copy_from_user(job->waitchk,
408                         waitchks, sizeof(*waitchks) * num_waitchks);
409         if (err)
410                 goto fail;
411
412         /* mass copy waitbases */
413         if (args->waitbases) {
414                 local_waitbases = kzalloc(sizeof(u32) * num_syncpt_incrs,
415                         GFP_KERNEL);
416                 if (!local_waitbases) {
417                         err = -ENOMEM;
418                         goto fail;
419                 }
420
421                 err = copy_from_user(local_waitbases, waitbases,
422                         sizeof(u32) * num_syncpt_incrs);
423                 if (err) {
424                         err = -EINVAL;
425                         goto fail;
426                 }
427         }
428
429         /* set valid id for hwctx_syncpt_idx if no hwctx is present */
430         if (!ctx->hwctx)
431                 hwctx_syncpt_idx = 0;
432
433         /*
434          * Go through each syncpoint from userspace. Here we:
435          * - Copy syncpoint information
436          * - Validate each syncpoint
437          * - Determine waitbase for each syncpoint
438          * - Determine the index of hwctx syncpoint in the table
439          */
440
441         for (i = 0; i < num_syncpt_incrs; ++i) {
442                 u32 waitbase;
443                 struct nvhost_syncpt_incr sp;
444
445                 /* Copy */
446                 err = copy_from_user(&sp, syncpt_incrs + i, sizeof(sp));
447                 if (err)
448                         goto fail;
449
450                 /* Validate */
451                 if (sp.syncpt_id > host->info.nb_pts) {
452                         err = -EINVAL;
453                         goto fail;
454                 }
455
456                 /* Determine waitbase */
457                 if (waitbases && local_waitbases[i] != NVSYNCPT_INVALID)
458                         waitbase = local_waitbases[i];
459                 else
460                         waitbase = nvhost_syncpt_get_waitbase(job->ch,
461                                 sp.syncpt_id);
462
463                 /* Store */
464                 job->sp[i].id = sp.syncpt_id;
465                 job->sp[i].incrs = sp.syncpt_incrs;
466                 job->sp[i].waitbase = waitbase;
467
468                 /* Find hwctx syncpoint */
469                 if (ctx->hwctx && (job->sp[i].id == ctx->hwctx->h->syncpt))
470                         hwctx_syncpt_idx = i;
471         }
472
473         /* not needed anymore */
474         kfree(local_waitbases);
475         local_waitbases = NULL;
476
477         /* Is hwctx_syncpt_idx valid? */
478         if (hwctx_syncpt_idx == -1) {
479                 err = -EINVAL;
480                 goto fail;
481         }
482
483         job->hwctx_syncpt_idx = hwctx_syncpt_idx;
484
485         trace_nvhost_channel_submit(ctx->ch->dev->name,
486                 job->num_gathers, job->num_relocs, job->num_waitchk,
487                 job->sp[job->hwctx_syncpt_idx].id,
488                 job->sp[job->hwctx_syncpt_idx].incrs);
489
490         err = nvhost_job_pin(job, &nvhost_get_host(ctx->ch->dev)->syncpt);
491         if (err)
492                 goto fail;
493
494         if (args->timeout)
495                 job->timeout = min(ctx->timeout, args->timeout);
496         else
497                 job->timeout = ctx->timeout;
498         job->timeout_debug_dump = ctx->timeout_debug_dump;
499
500         err = nvhost_channel_submit(job);
501         if (err)
502                 goto fail_submit;
503
504         /* Deliver multiple fences back to the userspace */
505         if (fences)
506                 for (i = 0; i < num_syncpt_incrs; ++i) {
507                         u32 fence = job->sp[i].fence;
508                         err = copy_to_user(fences, &fence, sizeof(u32));
509                         if (err)
510                                 break;
511                         fences++;
512                 }
513
514         /* Deliver the fence using the old mechanism _only_ if a single
515          * syncpoint is used. */
516
517         if (num_syncpt_incrs == 1)
518                 args->fence = job->sp[job->hwctx_syncpt_idx].fence;
519         else
520                 args->fence = 0;
521
522         nvhost_job_put(job);
523
524         return 0;
525
526 fail_submit:
527         nvhost_job_unpin(job);
528 fail:
529         nvhost_job_put(job);
530         kfree(local_waitbases);
531         return err;
532 }
533
534 static int nvhost_ioctl_channel_set_ctxswitch(
535                 struct nvhost_channel_userctx *ctx,
536                 struct nvhost_set_ctxswitch_args *args)
537 {
538         struct nvhost_cmdbuf cmdbuf_save;
539         struct nvhost_cmdbuf cmdbuf_restore;
540         struct nvhost_syncpt_incr save_incr, restore_incr;
541         u32 save_waitbase, restore_waitbase;
542         struct nvhost_reloc reloc;
543         struct nvhost_hwctx_handler *ctxhandler = NULL;
544         struct nvhost_hwctx *nhwctx = NULL;
545         struct user_hwctx *hwctx;
546         struct nvhost_device_data *pdata = platform_get_drvdata(ctx->ch->dev);
547         int err;
548
549         /* Only channels with context support */
550         if (!ctx->hwctx)
551                 return -EFAULT;
552
553         /* We don't yet support other than one nvhost_syncpt_incrs per submit */
554         if (args->num_cmdbufs_save != 1
555                         || args->num_cmdbufs_restore != 1
556                         || args->num_save_incrs != 1
557                         || args->num_restore_incrs != 1
558                         || args->num_relocs != 1)
559                 return -EINVAL;
560
561         err = copy_from_user(&cmdbuf_save,
562                         (void *)(uintptr_t)args->cmdbuf_save,
563                         sizeof(cmdbuf_save));
564         if (err)
565                 goto fail;
566
567         err = copy_from_user(&cmdbuf_restore,
568                         (void *)(uintptr_t)args->cmdbuf_restore,
569                         sizeof(cmdbuf_restore));
570         if (err)
571                 goto fail;
572
573         err = copy_from_user(&reloc, (void *)(uintptr_t)args->relocs,
574                         sizeof(reloc));
575         if (err)
576                 goto fail;
577
578         err = copy_from_user(&save_incr,
579                         (void *)(uintptr_t)args->save_incrs,
580                         sizeof(save_incr));
581         if (err)
582                 goto fail;
583         err = copy_from_user(&save_waitbase,
584                         (void *)(uintptr_t)args->save_waitbases,
585                         sizeof(save_waitbase));
586
587         err = copy_from_user(&restore_incr,
588                         (void *)(uintptr_t)args->restore_incrs,
589                         sizeof(restore_incr));
590         if (err)
591                 goto fail;
592         err = copy_from_user(&restore_waitbase,
593                         (void *)(uintptr_t)args->restore_waitbases,
594                         sizeof(restore_waitbase));
595
596         if (save_incr.syncpt_id != pdata->syncpts[0]
597                         || restore_incr.syncpt_id != pdata->syncpts[0]
598                         || save_waitbase != pdata->waitbases[0]
599                         || restore_waitbase != pdata->waitbases[0]) {
600                 err = -EINVAL;
601                 goto fail;
602         }
603         ctxhandler = user_ctxhandler_init(save_incr.syncpt_id,
604                         save_waitbase, ctx->ch);
605         if (!ctxhandler) {
606                 err = -ENOMEM;
607                 goto fail;
608         }
609
610         nhwctx = ctxhandler->alloc(ctxhandler, ctx->ch);
611         if (!nhwctx) {
612                 err = -ENOMEM;
613                 goto fail_hwctx;
614         }
615         hwctx = to_user_hwctx(nhwctx);
616
617         trace_nvhost_ioctl_channel_set_ctxswitch(ctx->ch->dev->name, nhwctx,
618                         cmdbuf_save.mem, cmdbuf_save.offset, cmdbuf_save.words,
619                         cmdbuf_restore.mem, cmdbuf_restore.offset,
620                         cmdbuf_restore.words,
621                         pdata->syncpts[0], pdata->waitbases[0],
622                         save_incr.syncpt_incrs, restore_incr.syncpt_incrs);
623
624         nhwctx->memmgr = nvhost_memmgr_get_mgr(ctx->memmgr);
625         if (!nhwctx->memmgr)
626                 goto fail_set_restore;
627
628         err = user_hwctx_set_restore(hwctx, cmdbuf_restore.mem,
629                         cmdbuf_restore.offset, cmdbuf_restore.words);
630         if (err)
631                 goto fail_set_restore;
632
633         err = user_hwctx_set_save(hwctx, cmdbuf_save.mem,
634                         cmdbuf_save.offset, cmdbuf_save.words, &reloc);
635         if (err)
636                 goto fail_set_save;
637
638         hwctx->hwctx.save_incrs = save_incr.syncpt_incrs;
639         hwctx->hwctx.restore_incrs = restore_incr.syncpt_incrs;
640
641         /* Free old context */
642         ctx->hwctx->h->put(ctx->hwctx);
643         ctx->hwctx = nhwctx;
644
645         return 0;
646
647 fail_set_save:
648 fail_set_restore:
649         ctxhandler->put(&hwctx->hwctx);
650 fail_hwctx:
651         user_ctxhandler_free(ctxhandler);
652 fail:
653         return err;
654 }
655
656 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
657 static int nvhost_ioctl_channel_cycle_stats(
658         struct nvhost_channel_userctx *ctx,
659         struct nvhost_cycle_stats_args *args)
660 {
661         int ret;
662         BUG_ON(!channel_op(ctx->ch).cycle_stats);
663         ret = channel_op(ctx->ch).cycle_stats(ctx->hwctx, args);
664         return ret;
665 }
666 #endif
667
668 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
669         struct nvhost_read_3d_reg_args *args)
670 {
671         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
672                         args->offset, &args->value);
673 }
674
675 static int moduleid_to_index(struct platform_device *dev, u32 moduleid)
676 {
677         int i;
678         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
679
680         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
681                 if (pdata->clocks[i].moduleid == moduleid)
682                         return i;
683         }
684
685         /* Old user space is sending a random number in args. Return clock
686          * zero in these cases. */
687         return 0;
688 }
689
690 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
691         struct nvhost_clk_rate_args *arg)
692 {
693         u32 moduleid = (arg->moduleid >> NVHOST_MODULE_ID_BIT_POS)
694                         & ((1 << NVHOST_MODULE_ID_BIT_WIDTH) - 1);
695         u32 attr = (arg->moduleid >> NVHOST_CLOCK_ATTR_BIT_POS)
696                         & ((1 << NVHOST_CLOCK_ATTR_BIT_WIDTH) - 1);
697         int index = moduleid ?
698                         moduleid_to_index(ctx->ch->dev, moduleid) : 0;
699
700         return nvhost_module_set_rate(ctx->ch->dev,
701                         ctx, arg->rate, index, attr);
702 }
703
704 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
705         u32 moduleid, u32 *rate)
706 {
707         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
708
709         return nvhost_module_get_rate(ctx->ch->dev,
710                         (unsigned long *)rate, index);
711 }
712
713 static int nvhost_ioctl_channel_module_regrdwr(
714         struct nvhost_channel_userctx *ctx,
715         struct nvhost_ctrl_module_regrdwr_args *args)
716 {
717         u32 num_offsets = args->num_offsets;
718         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
719         u32 __user *values = (u32 *)(uintptr_t)args->values;
720         u32 vals[64];
721         struct platform_device *ndev;
722
723         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
724                 args->num_offsets, args->write);
725
726         /* Check that there is something to read and that block size is
727          * u32 aligned */
728         if (num_offsets == 0 || args->block_size & 3)
729                 return -EINVAL;
730
731         ndev = ctx->ch->dev;
732
733         while (num_offsets--) {
734                 int err;
735                 u32 offs;
736                 int remaining = args->block_size >> 2;
737
738                 if (get_user(offs, offsets))
739                         return -EFAULT;
740
741                 offsets++;
742                 while (remaining) {
743                         int batch = min(remaining, 64);
744                         if (args->write) {
745                                 if (copy_from_user(vals, values,
746                                                 batch * sizeof(u32)))
747                                         return -EFAULT;
748
749                                 err = nvhost_write_module_regs(ndev,
750                                         offs, batch, vals);
751                                 if (err)
752                                         return err;
753                         } else {
754                                 err = nvhost_read_module_regs(ndev,
755                                                 offs, batch, vals);
756                                 if (err)
757                                         return err;
758
759                                 if (copy_to_user(values, vals,
760                                                 batch * sizeof(u32)))
761                                         return -EFAULT;
762                         }
763
764                         remaining -= batch;
765                         offs += batch * sizeof(u32);
766                         values += batch;
767                 }
768         }
769
770         return 0;
771 }
772
773 static u32 create_mask(u32 *words, int num)
774 {
775         int i;
776         u32 word = 0;
777         for (i = 0; i < num && words[i] && words[i] < BITS_PER_LONG; i++)
778                 word |= BIT(words[i]);
779
780         return word;
781 }
782
783 static long nvhost_channelctl(struct file *filp,
784         unsigned int cmd, unsigned long arg)
785 {
786         struct nvhost_channel_userctx *priv = filp->private_data;
787         struct device *dev = &priv->ch->dev->dev;
788         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
789         int err = 0;
790
791         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
792                 (_IOC_NR(cmd) == 0) ||
793                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
794                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
795                 return -EFAULT;
796
797         if (_IOC_DIR(cmd) & _IOC_WRITE) {
798                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
799                         return -EFAULT;
800         }
801
802         switch (cmd) {
803         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
804         {
805                 struct nvhost_device_data *pdata = \
806                         platform_get_drvdata(priv->ch->dev);
807                 ((struct nvhost_get_param_args *)buf)->value =
808                         create_mask(pdata->syncpts, NVHOST_MODULE_MAX_SYNCPTS);
809                 break;
810         }
811         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINT:
812         {
813                 struct nvhost_device_data *pdata = \
814                         platform_get_drvdata(priv->ch->dev);
815                 struct nvhost_get_param_arg *arg =
816                         (struct nvhost_get_param_arg *)buf;
817                 if (arg->param >= NVHOST_MODULE_MAX_SYNCPTS
818                                 || !pdata->syncpts[arg->param])
819                         return -EINVAL;
820                 arg->value = pdata->syncpts[arg->param];
821                 break;
822         }
823         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
824         {
825                 struct nvhost_device_data *pdata = \
826                         platform_get_drvdata(priv->ch->dev);
827                 ((struct nvhost_get_param_args *)buf)->value =
828                         create_mask(pdata->waitbases,
829                                         NVHOST_MODULE_MAX_WAITBASES);
830                 break;
831         }
832         case NVHOST_IOCTL_CHANNEL_GET_WAITBASE:
833         {
834                 struct nvhost_device_data *pdata = \
835                         platform_get_drvdata(priv->ch->dev);
836                 struct nvhost_get_param_arg *arg =
837                         (struct nvhost_get_param_arg *)buf;
838                 if (arg->param >= NVHOST_MODULE_MAX_WAITBASES
839                                 || !pdata->waitbases[arg->param])
840                         return -EINVAL;
841                 arg->value = pdata->waitbases[arg->param];
842                 break;
843         }
844         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
845         {
846                 struct nvhost_device_data *pdata = \
847                         platform_get_drvdata(priv->ch->dev);
848                 ((struct nvhost_get_param_args *)buf)->value =
849                         create_mask(pdata->modulemutexes,
850                                         NVHOST_MODULE_MAX_MODMUTEXES);
851                 break;
852         }
853         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEX:
854         {
855                 struct nvhost_device_data *pdata = \
856                         platform_get_drvdata(priv->ch->dev);
857                 struct nvhost_get_param_arg *arg =
858                         (struct nvhost_get_param_arg *)buf;
859                 if (arg->param >= NVHOST_MODULE_MAX_MODMUTEXES
860                                 || !pdata->modulemutexes[arg->param])
861                         return -EINVAL;
862                 arg->value = pdata->modulemutexes[arg->param];
863                 break;
864         }
865         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
866         {
867                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
868                 struct mem_mgr *new_client = nvhost_memmgr_get_mgr_file(fd);
869
870                 if (IS_ERR(new_client)) {
871                         err = PTR_ERR(new_client);
872                         break;
873                 }
874                 if (priv->memmgr)
875                         nvhost_memmgr_put_mgr(priv->memmgr);
876
877                 priv->memmgr = new_client;
878
879                 if (priv->hwctx)
880                         priv->hwctx->memmgr = new_client;
881
882                 break;
883         }
884         case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
885                 err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
886                 break;
887         case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
888                 err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
889                 break;
890         case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
891                 err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
892                 break;
893         case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
894                 err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
895                 break;
896         case NVHOST_IOCTL_CHANNEL_WAIT:
897                 err = nvhost_ioctl_channel_wait(priv, (void *)buf);
898                 break;
899         case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
900                 err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
901                 break;
902 #if defined(CONFIG_TEGRA_GPU_CYCLE_STATS)
903         case NVHOST_IOCTL_CHANNEL_CYCLE_STATS:
904                 err = nvhost_ioctl_channel_cycle_stats(priv, (void *)buf);
905                 break;
906 #endif
907         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
908                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
909                 break;
910         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
911         {
912                 struct nvhost_clk_rate_args *arg =
913                                 (struct nvhost_clk_rate_args *)buf;
914
915                 err = nvhost_ioctl_channel_get_rate(priv,
916                                 arg->moduleid, &arg->rate);
917                 break;
918         }
919         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
920         {
921                 struct nvhost_clk_rate_args *arg =
922                                 (struct nvhost_clk_rate_args *)buf;
923
924                 err = nvhost_ioctl_channel_set_rate(priv, arg);
925                 break;
926         }
927         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
928                 priv->timeout =
929                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
930                 dev_dbg(&priv->ch->dev->dev,
931                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
932                         __func__, priv->timeout, priv);
933                 break;
934         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
935                 ((struct nvhost_get_param_args *)buf)->value =
936                                 priv->hwctx->has_timedout;
937                 break;
938         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
939                 priv->priority =
940                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
941                 break;
942         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
943                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
944                 break;
945         case NVHOST_IOCTL_CHANNEL_SUBMIT:
946                 err = nvhost_ioctl_channel_submit(priv, (void *)buf);
947                 break;
948         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT_EX:
949                 priv->timeout = (u32)
950                         ((struct nvhost_set_timeout_ex_args *)buf)->timeout;
951                 priv->timeout_debug_dump = !((u32)
952                         ((struct nvhost_set_timeout_ex_args *)buf)->flags &
953                         (1 << NVHOST_TIMEOUT_FLAG_DISABLE_DUMP));
954                 dev_dbg(&priv->ch->dev->dev,
955                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
956                         __func__, priv->timeout, priv);
957                 break;
958         case NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH:
959                 err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
960                 break;
961         default:
962                 nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
963                 err = -ENOTTY;
964                 break;
965         }
966
967         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
968                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
969
970         return err;
971 }
972
973 static const struct file_operations nvhost_channelops = {
974         .owner = THIS_MODULE,
975         .release = nvhost_channelrelease,
976         .open = nvhost_channelopen,
977 #ifdef CONFIG_COMPAT
978         .compat_ioctl = nvhost_channelctl,
979 #endif
980         .unlocked_ioctl = nvhost_channelctl
981 };
982
983 struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
984 {
985         struct nvhost_channel_userctx *userctx;
986         struct file *f = fget(fd);
987         if (!f)
988                 return 0;
989
990         if (f->f_op != &nvhost_channelops) {
991                 fput(f);
992                 return 0;
993         }
994
995         userctx = (struct nvhost_channel_userctx *)f->private_data;
996         fput(f);
997         return userctx->hwctx;
998 }
999
1000
1001 static const struct file_operations nvhost_asops = {
1002         .owner = THIS_MODULE,
1003         .release = nvhost_as_dev_release,
1004         .open = nvhost_as_dev_open,
1005 #ifdef CONFIG_COMPAT
1006         .compat_ioctl = nvhost_as_dev_ctl,
1007 #endif
1008         .unlocked_ioctl = nvhost_as_dev_ctl,
1009 };
1010
1011 static struct {
1012         int class_id;
1013         const char *dev_name;
1014 } class_id_dev_name_map[] = {
1015         /*      { NV_HOST1X_CLASS_ID, ""}, */
1016         { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
1017         { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
1018         { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
1019         { NV_GRAPHICS_GPU_CLASS_ID, "gpu"},
1020         { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
1021         { NV_TSEC_CLASS_ID, "tsec" },
1022 };
1023
1024 static struct {
1025         int module_id;
1026         const char *dev_name;
1027 } module_id_dev_name_map[] = {
1028         { NVHOST_MODULE_VI, "vi"},
1029         { NVHOST_MODULE_ISP, "isp"},
1030         { NVHOST_MODULE_MPE, "mpe"},
1031         { NVHOST_MODULE_MSENC, "msenc"},
1032         { NVHOST_MODULE_TSEC, "tsec"},
1033         { NVHOST_MODULE_GPU, "gpu"},
1034         { NVHOST_MODULE_VIC, "vic"},
1035 };
1036
1037 static const char *get_device_name_for_dev(struct platform_device *dev)
1038 {
1039         int i;
1040         /* first choice is to use the class id if specified */
1041         for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++) {
1042                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1043                 if (pdata->class == class_id_dev_name_map[i].class_id)
1044                         return class_id_dev_name_map[i].dev_name;
1045         }
1046
1047         /* second choice is module name if specified */
1048         for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++) {
1049                 struct nvhost_device_data *pdata = nvhost_get_devdata(dev);
1050                 if (pdata->moduleid == module_id_dev_name_map[i].module_id)
1051                         return module_id_dev_name_map[i].dev_name;
1052         }
1053
1054         /* last choice is to just use the given dev name */
1055         return dev->name;
1056 }
1057
1058 static struct device *nvhost_client_device_create(
1059         struct platform_device *pdev, struct cdev *cdev,
1060         const char *cdev_name, int devno,
1061         const struct file_operations *ops)
1062 {
1063         struct nvhost_master *host = nvhost_get_host(pdev);
1064         struct nvhost_device_data *pdata = nvhost_get_devdata(pdev);
1065         const char *use_dev_name;
1066         struct device *dev;
1067         int err;
1068
1069         nvhost_dbg_fn("");
1070
1071         BUG_ON(!host);
1072
1073         cdev_init(cdev, ops);
1074         cdev->owner = THIS_MODULE;
1075
1076         err = cdev_add(cdev, devno, 1);
1077         if (err < 0) {
1078                 dev_err(&pdev->dev,
1079                         "failed to add chan %i cdev\n", pdata->index);
1080                 return NULL;
1081         }
1082         use_dev_name = get_device_name_for_dev(pdev);
1083
1084         dev = device_create(host->nvhost_class,
1085                         NULL, devno, NULL,
1086                         (pdev->id <= 0) ?
1087                         IFACE_NAME "-%s%s" :
1088                         IFACE_NAME "-%s%s.%d",
1089                         cdev_name, use_dev_name, pdev->id);
1090
1091         if (IS_ERR(dev)) {
1092                 err = PTR_ERR(dev);
1093                 dev_err(&pdev->dev,
1094                         "failed to create %s %s device for %s\n",
1095                         use_dev_name, cdev_name, pdev->name);
1096                 return NULL;
1097         }
1098
1099         return dev;
1100 }
1101
1102 int nvhost_client_user_init(struct platform_device *dev)
1103 {
1104         int err, devno;
1105         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1106         struct nvhost_channel *ch = pdata->channel;
1107
1108         BUG_ON(!ch);
1109         /* reserve 4 minor #s for <dev> and as-<dev>, ctrl-<dev>
1110          * and dbg-<dev> */
1111
1112         err = alloc_chrdev_region(&devno, 0, 4, IFACE_NAME);
1113         if (err < 0) {
1114                 dev_err(&dev->dev, "failed to allocate devno\n");
1115                 goto fail;
1116         }
1117
1118         ch->node = nvhost_client_device_create(dev, &ch->cdev,
1119                                 "", devno, &nvhost_channelops);
1120         if (ch->node == NULL)
1121                 goto fail;
1122         ++devno;
1123         ch->as_node = nvhost_client_device_create(dev, &ch->as_cdev,
1124                                 "as-", devno, &nvhost_asops);
1125         if (ch->as_node == NULL)
1126                 goto fail;
1127
1128         if (pdata->ctrl_ops) {
1129                 ++devno;
1130                 pdata->ctrl_node = nvhost_client_device_create(dev,
1131                                         &pdata->ctrl_cdev, "ctrl-",
1132                                         devno, pdata->ctrl_ops);
1133                 if (pdata->ctrl_node == NULL)
1134                         goto fail;
1135         }
1136
1137         if (pdata->dbg_ops) {
1138                 ++devno;
1139                 pdata->dbg_node = nvhost_client_device_create(dev,
1140                                         &pdata->dbg_cdev, "dbg-",
1141                                         devno, pdata->dbg_ops);
1142                 if (pdata->dbg_node == NULL)
1143                         goto fail;
1144         }
1145
1146
1147         return 0;
1148 fail:
1149         return err;
1150 }
1151
1152 int nvhost_client_device_init(struct platform_device *dev)
1153 {
1154         int err;
1155         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1156         struct nvhost_channel *ch;
1157         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1158
1159         ch = nvhost_alloc_channel(dev);
1160         if (ch == NULL)
1161                 return -ENODEV;
1162
1163         /* store the pointer to this device for channel */
1164         ch->dev = dev;
1165
1166         /* Create debugfs directory for the device */
1167         nvhost_device_debug_init(dev);
1168
1169         err = nvhost_channel_init(ch, nvhost_master, pdata->index);
1170         if (err)
1171                 goto fail;
1172
1173         err = nvhost_client_user_init(dev);
1174         if (err)
1175                 goto fail;
1176
1177         if (tickctrl_op().init_channel)
1178                 tickctrl_op().init_channel(dev);
1179
1180         err = nvhost_device_list_add(dev);
1181         if (err)
1182                 goto fail;
1183
1184         if (pdata->scaling_init)
1185                 pdata->scaling_init(dev);
1186
1187         /* reset syncpoint values for this unit */
1188         nvhost_module_busy(nvhost_master->dev);
1189         nvhost_syncpt_reset_client(dev);
1190         nvhost_module_idle(nvhost_master->dev);
1191
1192         /* Initialize dma parameters */
1193         dev->dev.dma_parms = &pdata->dma_parms;
1194         dma_set_max_seg_size(&dev->dev, UINT_MAX);
1195
1196         dev_info(&dev->dev, "initialized\n");
1197
1198         if (pdata->slave) {
1199                 pdata->slave->dev.parent = dev->dev.parent;
1200                 platform_device_register(pdata->slave);
1201         }
1202
1203         return 0;
1204
1205 fail:
1206         /* Add clean-up */
1207         nvhost_free_channel(ch);
1208         return err;
1209 }
1210 EXPORT_SYMBOL(nvhost_client_device_init);
1211
1212 int nvhost_client_device_release(struct platform_device *dev)
1213 {
1214         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
1215         struct nvhost_channel *ch;
1216         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1217
1218         ch = pdata->channel;
1219
1220         /* Release nvhost module resources */
1221         nvhost_module_deinit(dev);
1222
1223         /* Remove from nvhost device list */
1224         nvhost_device_list_remove(dev);
1225
1226         /* Release chardev and device node for user space */
1227         device_destroy(nvhost_master->nvhost_class, ch->cdev.dev);
1228         cdev_del(&ch->cdev);
1229
1230         /* Free nvhost channel */
1231         nvhost_free_channel(ch);
1232
1233         return 0;
1234 }
1235 EXPORT_SYMBOL(nvhost_client_device_release);
1236
1237 int nvhost_client_device_suspend(struct device *dev)
1238 {
1239         int ret = 0;
1240         struct nvhost_device_data *pdata = dev_get_drvdata(dev);
1241
1242         ret = nvhost_module_suspend(dev);
1243         if (ret)
1244                 return ret;
1245
1246         ret = nvhost_channel_suspend(pdata->channel);
1247         if (ret)
1248                 return ret;
1249
1250         dev_info(dev, "suspend status: %d\n", ret);
1251
1252         return ret;
1253 }
1254 EXPORT_SYMBOL(nvhost_client_device_suspend);
1255
1256 int nvhost_client_device_resume(struct device *dev)
1257 {
1258         nvhost_module_resume(dev);
1259         dev_info(dev, "resuming\n");
1260         return 0;
1261 }
1262 EXPORT_SYMBOL(nvhost_client_device_resume);
1263
1264 int nvhost_client_device_get_resources(struct platform_device *dev)
1265 {
1266         int i;
1267         void __iomem *regs = NULL;
1268         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
1269
1270         for (i = 0; i < dev->num_resources; i++) {
1271                 struct resource *r = NULL;
1272
1273                 r = platform_get_resource(dev, IORESOURCE_MEM, i);
1274                 /* We've run out of mem resources */
1275                 if (!r)
1276                         break;
1277
1278                 regs = devm_request_and_ioremap(&dev->dev, r);
1279                 if (!regs)
1280                         goto fail;
1281
1282                 pdata->aperture[i] = regs;
1283         }
1284
1285         return 0;
1286
1287 fail:
1288         dev_err(&dev->dev, "failed to get register memory\n");
1289
1290         return -ENXIO;
1291 }
1292 EXPORT_SYMBOL(nvhost_client_device_get_resources);
1293
1294 /* This is a simple wrapper around request_firmware that takes
1295  * 'fw_name' and if available applies a SOC relative path prefix to it.
1296  * The caller is responsible for calling release_firmware later.
1297  */
1298 const struct firmware *
1299 nvhost_client_request_firmware(struct platform_device *dev, const char *fw_name)
1300 {
1301         struct nvhost_chip_support *op = nvhost_get_chip_ops();
1302         const struct firmware *fw;
1303         char *fw_path = NULL;
1304         int path_len, err;
1305
1306         /* This field is NULL when calling from SYS_EXIT.
1307            Add a check here to prevent crash in request_firmware */
1308         if (!current->fs) {
1309                 BUG();
1310                 return NULL;
1311         }
1312
1313         if (!fw_name)
1314                 return NULL;
1315
1316         if (op->soc_name) {
1317                 path_len = strlen(fw_name) + strlen(op->soc_name);
1318                 path_len += 2; /* for the path separator and zero terminator*/
1319
1320                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
1321                                      GFP_KERNEL);
1322                 if (!fw_path)
1323                         return NULL;
1324
1325                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
1326                 fw_name = fw_path;
1327         }
1328
1329         err = request_firmware(&fw, fw_name, &dev->dev);
1330         kfree(fw_path);
1331         if (err) {
1332                 dev_err(&dev->dev, "failed to get firmware\n");
1333                 return NULL;
1334         }
1335
1336         /* note: caller must release_firmware */
1337         return fw;
1338 }