c79a22dddad5aad5e5e600dc1173e632422662d7
[linux-3.10.git] / drivers / video / tegra / host / bus_client.c
1 /*
2  * drivers/video/tegra/host/bus_client.c
3  *
4  * Tegra Graphics Host Client Module
5  *
6  * Copyright (c) 2010-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/string.h>
23 #include <linux/spinlock.h>
24 #include <linux/fs.h>
25 #include <linux/cdev.h>
26 #include <linux/uaccess.h>
27 #include <linux/file.h>
28 #include <linux/clk.h>
29 #include <linux/hrtimer.h>
30 #include <linux/export.h>
31 #include <linux/firmware.h>
32
33 #include <trace/events/nvhost.h>
34
35 #include <linux/io.h>
36 #include <linux/string.h>
37
38 #include <linux/nvhost.h>
39 #include <linux/nvhost_ioctl.h>
40
41 #include <mach/gpufuse.h>
42 #include <mach/hardware.h>
43 #include <mach/iomap.h>
44
45 #include "debug.h"
46 #include "bus_client.h"
47 #include "dev.h"
48 #include "nvhost_memmgr.h"
49 #include "chip_support.h"
50 #include "nvhost_acm.h"
51
52 #include "nvhost_channel.h"
53 #include "nvhost_job.h"
54 #include "nvhost_hwctx.h"
55
56 static int validate_reg(struct nvhost_device *ndev, u32 offset, int count)
57 {
58         struct resource *r = nvhost_get_resource(ndev, IORESOURCE_MEM, 0);
59         int err = 0;
60
61         if (offset + 4 * count > resource_size(r)
62                         || (offset + 4 * count < offset))
63                 err = -EPERM;
64
65         return err;
66 }
67
68 int nvhost_read_module_regs(struct nvhost_device *ndev,
69                         u32 offset, int count, u32 *values)
70 {
71         void __iomem *p = ndev->aperture + offset;
72         int err;
73
74         /* verify offset */
75         err = validate_reg(ndev, offset, count);
76         if (err)
77                 return err;
78
79         nvhost_module_busy(ndev);
80         while (count--) {
81                 *(values++) = readl(p);
82                 p += 4;
83         }
84         rmb();
85         nvhost_module_idle(ndev);
86
87         return 0;
88 }
89
90 int nvhost_write_module_regs(struct nvhost_device *ndev,
91                         u32 offset, int count, const u32 *values)
92 {
93         void __iomem *p = ndev->aperture + offset;
94         int err;
95
96         /* verify offset */
97         err = validate_reg(ndev, offset, count);
98         if (err)
99                 return err;
100
101         nvhost_module_busy(ndev);
102         while (count--) {
103                 writel(*(values++), p);
104                 p += 4;
105         }
106         wmb();
107         nvhost_module_idle(ndev);
108
109         return 0;
110 }
111
112 struct nvhost_channel_userctx {
113         struct nvhost_channel *ch;
114         struct nvhost_hwctx *hwctx;
115         struct nvhost_submit_hdr_ext hdr;
116         int num_relocshifts;
117         struct nvhost_job *job;
118         struct mem_mgr *memmgr;
119         u32 timeout;
120         u32 priority;
121         int clientid;
122 };
123
124 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
125 {
126         struct nvhost_channel_userctx *priv = filp->private_data;
127
128         trace_nvhost_channel_release(priv->ch->dev->name);
129
130         filp->private_data = NULL;
131
132         nvhost_module_remove_client(priv->ch->dev, priv);
133         nvhost_putchannel(priv->ch, priv->hwctx);
134
135         if (priv->hwctx)
136                 priv->ch->ctxhandler->put(priv->hwctx);
137
138         if (priv->job)
139                 nvhost_job_put(priv->job);
140
141         mem_op().put_mgr(priv->memmgr);
142         kfree(priv);
143         return 0;
144 }
145
146 static int nvhost_channelopen(struct inode *inode, struct file *filp)
147 {
148         struct nvhost_channel_userctx *priv;
149         struct nvhost_channel *ch;
150
151         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
152         ch = nvhost_getchannel(ch);
153         if (!ch)
154                 return -ENOMEM;
155         trace_nvhost_channel_open(ch->dev->name);
156
157         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
158         if (!priv) {
159                 nvhost_putchannel(ch, NULL);
160                 return -ENOMEM;
161         }
162         filp->private_data = priv;
163         priv->ch = ch;
164         if(nvhost_module_add_client(ch->dev, priv))
165                 goto fail;
166
167         if (ch->ctxhandler && ch->ctxhandler->alloc) {
168                 priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
169                 if (!priv->hwctx)
170                         goto fail;
171         }
172         priv->priority = NVHOST_PRIORITY_MEDIUM;
173         priv->clientid = atomic_add_return(1,
174                         &nvhost_get_host(ch->dev)->clientid);
175         priv->timeout = CONFIG_TEGRA_GRHOST_DEFAULT_TIMEOUT;
176
177         return 0;
178 fail:
179         nvhost_channelrelease(inode, filp);
180         return -ENOMEM;
181 }
182
183 static int set_submit(struct nvhost_channel_userctx *ctx)
184 {
185         struct nvhost_device *ndev = ctx->ch->dev;
186         struct nvhost_master *host = nvhost_get_host(ndev);
187
188         /* submit should have at least 1 cmdbuf */
189         if (!ctx->hdr.num_cmdbufs ||
190                         !nvhost_syncpt_is_valid(&host->syncpt,
191                                 ctx->hdr.syncpt_id))
192                 return -EIO;
193
194         if (!ctx->memmgr) {
195                 dev_err(&ndev->dev, "no nvmap context set\n");
196                 return -EFAULT;
197         }
198
199         if (ctx->job) {
200                 dev_warn(&ndev->dev, "performing channel submit when a job already exists\n");
201                 nvhost_job_put(ctx->job);
202         }
203         ctx->job = nvhost_job_alloc(ctx->ch,
204                         ctx->hwctx,
205                         &ctx->hdr,
206                         ctx->memmgr,
207                         ctx->priority,
208                         ctx->clientid);
209         if (!ctx->job)
210                 return -ENOMEM;
211         ctx->job->timeout = ctx->timeout;
212
213         if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
214                 ctx->num_relocshifts = ctx->hdr.num_relocs;
215
216         return 0;
217 }
218
219 static void reset_submit(struct nvhost_channel_userctx *ctx)
220 {
221         ctx->hdr.num_cmdbufs = 0;
222         ctx->hdr.num_relocs = 0;
223         ctx->num_relocshifts = 0;
224         ctx->hdr.num_waitchks = 0;
225
226         if (ctx->job) {
227                 nvhost_job_put(ctx->job);
228                 ctx->job = NULL;
229         }
230 }
231
232 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
233                                 size_t count, loff_t *offp)
234 {
235         struct nvhost_channel_userctx *priv = filp->private_data;
236         size_t remaining = count;
237         int err = 0;
238         struct nvhost_job *job = priv->job;
239         struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
240         const char *chname = priv->ch->dev->name;
241
242         if (!job)
243                 return -EIO;
244
245         while (remaining) {
246                 size_t consumed;
247                 if (!hdr->num_relocs &&
248                     !priv->num_relocshifts &&
249                     !hdr->num_cmdbufs &&
250                     !hdr->num_waitchks) {
251                         consumed = sizeof(struct nvhost_submit_hdr);
252                         if (remaining < consumed)
253                                 break;
254                         if (copy_from_user(hdr, buf, consumed)) {
255                                 err = -EFAULT;
256                                 break;
257                         }
258                         hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
259                         err = set_submit(priv);
260                         if (err)
261                                 break;
262                         trace_nvhost_channel_write_submit(chname,
263                           count, hdr->num_cmdbufs, hdr->num_relocs,
264                           hdr->syncpt_id, hdr->syncpt_incrs);
265                 } else if (hdr->num_cmdbufs) {
266                         struct nvhost_cmdbuf cmdbuf;
267                         consumed = sizeof(cmdbuf);
268                         if (remaining < consumed)
269                                 break;
270                         if (copy_from_user(&cmdbuf, buf, consumed)) {
271                                 err = -EFAULT;
272                                 break;
273                         }
274                         trace_nvhost_channel_write_cmdbuf(chname,
275                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
276                         nvhost_job_add_gather(job,
277                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
278                         hdr->num_cmdbufs--;
279                 } else if (hdr->num_relocs) {
280                         int numrelocs = remaining / sizeof(struct nvhost_reloc);
281                         if (!numrelocs)
282                                 break;
283                         numrelocs = min_t(int, numrelocs, priv->hdr.num_relocs);
284                         consumed = numrelocs * sizeof(struct nvhost_reloc);
285                         if (copy_from_user(&job->relocarray[job->num_relocs],
286                                         buf, consumed)) {
287                                 err = -EFAULT;
288                                 break;
289                         }
290                         while (numrelocs) {
291                                 struct nvhost_reloc *reloc =
292                                         &job->relocarray[job->num_relocs];
293                                 trace_nvhost_channel_write_reloc(chname,
294                                         reloc->cmdbuf_mem,
295                                         reloc->cmdbuf_offset,
296                                         reloc->target,
297                                         reloc->target_offset);
298                                 job->num_relocs++;
299                                 hdr->num_relocs--;
300                                 numrelocs--;
301                         }
302                 } else if (hdr->num_waitchks) {
303                         int numwaitchks =
304                                 (remaining / sizeof(struct nvhost_waitchk));
305                         if (!numwaitchks)
306                                 break;
307                         numwaitchks = min_t(int,
308                                 numwaitchks, hdr->num_waitchks);
309                         consumed = numwaitchks * sizeof(struct nvhost_waitchk);
310                         if (copy_from_user(&job->waitchk[job->num_waitchk],
311                                         buf, consumed)) {
312                                 err = -EFAULT;
313                                 break;
314                         }
315                         trace_nvhost_channel_write_waitchks(
316                           chname, numwaitchks,
317                           hdr->waitchk_mask);
318                         job->num_waitchk += numwaitchks;
319                         hdr->num_waitchks -= numwaitchks;
320                 } else if (priv->num_relocshifts) {
321                         int next_shift =
322                                 job->num_relocs - priv->num_relocshifts;
323                         int num =
324                                 (remaining / sizeof(struct nvhost_reloc_shift));
325                         if (!num)
326                                 break;
327                         num = min_t(int, num, priv->num_relocshifts);
328                         consumed = num * sizeof(struct nvhost_reloc_shift);
329                         if (copy_from_user(&job->relocshiftarray[next_shift],
330                                         buf, consumed)) {
331                                 err = -EFAULT;
332                                 break;
333                         }
334                         priv->num_relocshifts -= num;
335                 } else {
336                         err = -EFAULT;
337                         break;
338                 }
339                 remaining -= consumed;
340                 buf += consumed;
341         }
342
343         if (err < 0) {
344                 dev_err(&priv->ch->dev->dev, "channel write error\n");
345                 reset_submit(priv);
346                 return err;
347         }
348
349         return count - remaining;
350 }
351
352 static int nvhost_ioctl_channel_flush(
353         struct nvhost_channel_userctx *ctx,
354         struct nvhost_get_param_args *args,
355         int null_kickoff)
356 {
357         struct nvhost_device *ndev = to_nvhost_device(&ctx->ch->dev->dev);
358         int err;
359
360         trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name);
361
362         if (!ctx->job ||
363             ctx->hdr.num_relocs ||
364             ctx->hdr.num_cmdbufs ||
365             ctx->hdr.num_waitchks) {
366                 reset_submit(ctx);
367                 dev_err(&ndev->dev, "channel submit out of sync\n");
368                 return -EFAULT;
369         }
370
371         err = nvhost_job_pin(ctx->job, &nvhost_get_host(ndev)->syncpt);
372         if (err) {
373                 dev_warn(&ndev->dev, "nvhost_job_pin failed: %d\n", err);
374                 goto fail;
375         }
376
377         if (nvhost_debug_null_kickoff_pid == current->tgid)
378                 null_kickoff = 1;
379         ctx->job->null_kickoff = null_kickoff;
380
381         if ((nvhost_debug_force_timeout_pid == current->tgid) &&
382             (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
383                 ctx->timeout = nvhost_debug_force_timeout_val;
384         }
385
386         /* context switch if needed, and submit user's gathers to the channel */
387         err = nvhost_channel_submit(ctx->job);
388         args->value = ctx->job->syncpt_end;
389
390 fail:
391         if (err)
392                 nvhost_job_unpin(ctx->job);
393
394         nvhost_job_put(ctx->job);
395         ctx->job = NULL;
396
397         return err;
398 }
399
400 static int nvhost_ioctl_channel_read_3d_reg(struct nvhost_channel_userctx *ctx,
401         struct nvhost_read_3d_reg_args *args)
402 {
403         return nvhost_channel_read_reg(ctx->ch, ctx->hwctx,
404                         args->offset, &args->value);
405 }
406
407 static int moduleid_to_index(struct nvhost_device *dev, u32 moduleid)
408 {
409         int i;
410
411         for (i = 0; i < NVHOST_MODULE_MAX_CLOCKS; i++) {
412                 if (dev->clocks[i].moduleid == moduleid)
413                         return i;
414         }
415
416         /* Old user space is sending a random number in args. Return clock
417          * zero in these cases. */
418         return 0;
419 }
420
421 static int nvhost_ioctl_channel_set_rate(struct nvhost_channel_userctx *ctx,
422         u32 moduleid, u32 rate)
423 {
424         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
425
426         return nvhost_module_set_rate(ctx->ch->dev, ctx, rate, index);
427 }
428
429 static int nvhost_ioctl_channel_get_rate(struct nvhost_channel_userctx *ctx,
430         u32 moduleid, u32 *rate)
431 {
432         int index = moduleid ? moduleid_to_index(ctx->ch->dev, moduleid) : 0;
433
434         return nvhost_module_get_rate(ctx->ch->dev,
435                         (unsigned long *)rate, index);
436 }
437
438 static int nvhost_ioctl_channel_module_regrdwr(
439         struct nvhost_channel_userctx *ctx,
440         struct nvhost_ctrl_module_regrdwr_args *args)
441 {
442         u32 num_offsets = args->num_offsets;
443         u32 *offsets = args->offsets;
444         u32 *values = args->values;
445         u32 vals[64];
446         struct nvhost_device *ndev;
447
448         trace_nvhost_ioctl_channel_module_regrdwr(args->id,
449                 args->num_offsets, args->write);
450
451         /* Check that there is something to read and that block size is
452          * u32 aligned */
453         if (num_offsets == 0 || args->block_size & 3)
454                 return -EINVAL;
455
456         ndev = ctx->ch->dev;
457         BUG_ON(!ndev);
458
459         while (num_offsets--) {
460                 int err;
461                 u32 offs;
462                 int remaining = args->block_size >> 2;
463
464                 if (get_user(offs, offsets))
465                         return -EFAULT;
466
467                 offsets++;
468                 while (remaining) {
469                         int batch = min(remaining, 64);
470                         if (args->write) {
471                                 if (copy_from_user(vals, values,
472                                                 batch * sizeof(u32)))
473                                         return -EFAULT;
474
475                                 err = nvhost_write_module_regs(ndev,
476                                         offs, batch, vals);
477                                 if (err)
478                                         return err;
479                         } else {
480                                 err = nvhost_read_module_regs(ndev,
481                                                 offs, batch, vals);
482                                 if (err)
483                                         return err;
484
485                                 if (copy_to_user(values, vals,
486                                                 batch * sizeof(u32)))
487                                         return -EFAULT;
488                         }
489
490                         remaining -= batch;
491                         offs += batch * sizeof(u32);
492                         values += batch;
493                 }
494         }
495
496         return 0;
497 }
498
499 static long nvhost_channelctl(struct file *filp,
500         unsigned int cmd, unsigned long arg)
501 {
502         struct nvhost_channel_userctx *priv = filp->private_data;
503         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
504         int err = 0;
505
506         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
507                 (_IOC_NR(cmd) == 0) ||
508                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST) ||
509                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE))
510                 return -EFAULT;
511
512         if (_IOC_DIR(cmd) & _IOC_WRITE) {
513                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
514                         return -EFAULT;
515         }
516
517         switch (cmd) {
518         case NVHOST_IOCTL_CHANNEL_FLUSH:
519                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
520                 break;
521         case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
522                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
523                 break;
524         case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
525         {
526                 struct nvhost_submit_hdr_ext *hdr;
527
528                 if (priv->hdr.num_relocs ||
529                     priv->num_relocshifts ||
530                     priv->hdr.num_cmdbufs ||
531                     priv->hdr.num_waitchks) {
532                         reset_submit(priv);
533                         dev_err(&priv->ch->dev->dev,
534                                 "channel submit out of sync\n");
535                         err = -EIO;
536                         break;
537                 }
538
539                 hdr = (struct nvhost_submit_hdr_ext *)buf;
540                 if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
541                         dev_err(&priv->ch->dev->dev,
542                                 "submit version %d > max supported %d\n",
543                                 hdr->submit_version,
544                                 NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
545                         err = -EINVAL;
546                         break;
547                 }
548                 memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
549                 err = set_submit(priv);
550                 trace_nvhost_ioctl_channel_submit(priv->ch->dev->name,
551                         priv->hdr.submit_version,
552                         priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
553                         priv->hdr.num_waitchks,
554                         priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
555                 break;
556         }
557         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
558                 /* host syncpt ID is used by the RM (and never be given out) */
559                 BUG_ON(priv->ch->dev->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
560                 ((struct nvhost_get_param_args *)buf)->value =
561                         priv->ch->dev->syncpts;
562                 break;
563         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
564                 ((struct nvhost_get_param_args *)buf)->value =
565                         priv->ch->dev->waitbases;
566                 break;
567         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
568                 ((struct nvhost_get_param_args *)buf)->value =
569                         priv->ch->dev->modulemutexes;
570                 break;
571         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
572         {
573                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
574                 struct mem_mgr *new_client = mem_op().get_mgr_file(fd);
575
576                 if (IS_ERR(new_client)) {
577                         err = PTR_ERR(new_client);
578                         break;
579                 }
580
581                 if (priv->memmgr)
582                         mem_op().put_mgr(priv->memmgr);
583
584                 priv->memmgr = new_client;
585                 break;
586         }
587         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
588                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
589                 break;
590         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
591         {
592                 struct nvhost_clk_rate_args *arg =
593                                 (struct nvhost_clk_rate_args *)buf;
594
595                 err = nvhost_ioctl_channel_get_rate(priv,
596                                 arg->moduleid, &arg->rate);
597                 break;
598         }
599         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
600         {
601                 struct nvhost_clk_rate_args *arg =
602                                 (struct nvhost_clk_rate_args *)buf;
603
604                 err = nvhost_ioctl_channel_set_rate(priv,
605                         arg->moduleid, arg->rate);
606                 break;
607         }
608         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
609                 priv->timeout =
610                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
611                 dev_dbg(&priv->ch->dev->dev,
612                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
613                         __func__, priv->timeout, priv);
614                 break;
615         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
616                 ((struct nvhost_get_param_args *)buf)->value =
617                                 priv->hwctx->has_timedout;
618                 break;
619         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
620                 priv->priority =
621                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
622                 break;
623         case NVHOST_IOCTL_CHANNEL_MODULE_REGRDWR:
624                 err = nvhost_ioctl_channel_module_regrdwr(priv, (void *)buf);
625                 break;
626         default:
627                 err = -ENOTTY;
628                 break;
629         }
630
631         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
632                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
633
634         return err;
635 }
636
637 static const struct file_operations nvhost_channelops = {
638         .owner = THIS_MODULE,
639         .release = nvhost_channelrelease,
640         .open = nvhost_channelopen,
641         .write = nvhost_channelwrite,
642         .unlocked_ioctl = nvhost_channelctl
643 };
644
645 int nvhost_client_user_init(struct nvhost_device *dev)
646 {
647         int err, devno;
648
649         struct nvhost_channel *ch = dev->channel;
650         err = alloc_chrdev_region(&devno, 0, 1, IFACE_NAME);
651         if (err < 0) {
652                 dev_err(&dev->dev, "failed to allocate devno\n");
653                 goto fail;
654         }
655
656         cdev_init(&ch->cdev, &nvhost_channelops);
657         ch->cdev.owner = THIS_MODULE;
658
659         err = cdev_add(&ch->cdev, devno, 1);
660         if (err < 0) {
661                 dev_err(&dev->dev,
662                         "failed to add chan %i cdev\n", dev->index);
663                 goto fail;
664         }
665         ch->node = device_create(nvhost_get_host(dev)->nvhost_class,
666                         NULL, devno, NULL,
667                         IFACE_NAME "-%s", dev->name);
668         if (IS_ERR(ch->node)) {
669                 err = PTR_ERR(ch->node);
670                 dev_err(&dev->dev,
671                         "failed to create %s channel device\n", dev->name);
672                 goto fail;
673         }
674
675         return 0;
676 fail:
677         return err;
678 }
679
680 int nvhost_client_device_init(struct nvhost_device *dev)
681 {
682         int err;
683         struct nvhost_master *nvhost_master = nvhost_get_host(dev);
684         struct nvhost_channel *ch;
685
686         ch = nvhost_alloc_channel(dev);
687         if (ch == NULL)
688                 return -ENODEV;
689
690         /* store the pointer to this device for channel */
691         ch->dev = dev;
692
693         err = nvhost_channel_init(ch, nvhost_master, dev->index);
694         if (err)
695                 goto fail;
696
697         err = nvhost_client_user_init(dev);
698         if (err)
699                 goto fail;
700
701         err = nvhost_module_init(dev);
702         if (err)
703                 goto fail;
704
705         if (tickctrl_op().init_channel)
706                 tickctrl_op().init_channel(dev);
707
708         nvhost_device_debug_init(dev);
709
710         dev_info(&dev->dev, "initialized\n");
711
712         return 0;
713
714 fail:
715         /* Add clean-up */
716         nvhost_free_channel(ch);
717         return err;
718 }
719
720 int nvhost_client_device_suspend(struct nvhost_device *dev)
721 {
722         int ret = 0;
723
724         ret = nvhost_channel_suspend(dev->channel);
725         if (ret)
726                 return ret;
727
728         dev_info(&dev->dev, "suspend status: %d\n", ret);
729
730         return ret;
731 }
732
733 int nvhost_client_device_get_resources(struct nvhost_device *dev)
734 {
735         struct resource *r = NULL;
736         void __iomem *regs = NULL;
737         struct resource *reg_mem = NULL;
738
739         r = nvhost_get_resource(dev, IORESOURCE_MEM, 0);
740         if (!r)
741                 goto fail;
742
743         reg_mem = request_mem_region(r->start, resource_size(r), dev->name);
744         if (!reg_mem)
745                 goto fail;
746
747         regs = ioremap(r->start, resource_size(r));
748         if (!regs)
749                 goto fail;
750
751         dev->reg_mem = reg_mem;
752         dev->aperture = regs;
753
754         return 0;
755
756 fail:
757         if (reg_mem)
758                 release_mem_region(r->start, resource_size(r));
759         if (regs)
760                 iounmap(regs);
761
762         dev_err(&dev->dev, "failed to get register memory\n");
763
764         return -ENXIO;
765 }
766
767 /* This is a simple wrapper around request_firmware that takes
768  * 'fw_name' and if available applies a SOC relative path prefix to it.
769  * The caller is responsible for calling release_firmware later.
770  */
771 const struct firmware *
772 nvhost_client_request_firmware(struct nvhost_device *dev, const char *fw_name)
773 {
774         struct nvhost_chip_support *op = nvhost_get_chip_ops();
775         const struct firmware *fw;
776         char *fw_path = NULL;
777         int path_len, err;
778
779         if (!fw_name)
780                 return NULL;
781
782         if (op->soc_name) {
783                 path_len = strlen(fw_name) + strlen(op->soc_name);
784                 path_len +=2; /* for the path separator and zero terminator*/
785
786                 fw_path = kzalloc(sizeof(*fw_path) * path_len,
787                                      GFP_KERNEL);
788                 if (!fw_path)
789                         return NULL;
790
791                 sprintf(fw_path, "%s/%s", op->soc_name, fw_name);
792                 fw_name = fw_path;
793         }
794
795         err = request_firmware(&fw, fw_name, &dev->dev);
796         kfree(fw_path);
797         if (err) {
798                 dev_err(&dev->dev, "failed to get firmware\n");
799                 return NULL;
800         }
801
802         /* note: caller must release_firmware */
803         return fw;
804 }