[ARM/tegra] nvhost: Tegra3 support
[linux-2.6.git] / drivers / video / tegra / host / dev.c
1 /*
2  * drivers/video/tegra/host/dev.c
3  *
4  * Tegra Graphics Host Driver Entrypoint
5  *
6  * Copyright (c) 2010, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include "dev.h"
24
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/spinlock.h>
28 #include <linux/fs.h>
29 #include <linux/cdev.h>
30 #include <linux/platform_device.h>
31 #include <linux/uaccess.h>
32 #include <linux/file.h>
33 #include <linux/clk.h>
34
35 #include <asm/io.h>
36
37 #include <mach/nvhost.h>
38 #include <mach/nvmap.h>
39
40 #define DRIVER_NAME "tegra_grhost"
41 #define IFACE_NAME "nvhost"
42
43 static int nvhost_major = NVHOST_MAJOR;
44 static int nvhost_minor = NVHOST_CHANNEL_BASE;
45
46 struct nvhost_channel_userctx {
47         struct nvhost_channel *ch;
48         struct nvhost_hwctx *hwctx;
49         u32 syncpt_id;
50         u32 syncpt_incrs;
51         u32 cmdbufs_pending;
52         u32 relocs_pending;
53         u32 null_kickoff;
54         struct nvmap_handle_ref *gather_mem;
55         u32 *gathers;
56         u32 *cur_gather;
57         int pinarray_size;
58         struct nvmap_pinarray_elem pinarray[NVHOST_MAX_HANDLES];
59         struct nvmap_handle *unpinarray[NVHOST_MAX_HANDLES];
60         struct nvmap_client *nvmap;
61 };
62
63 struct nvhost_ctrl_userctx {
64         struct nvhost_master *dev;
65         u32 mod_locks[NV_HOST1X_NB_MLOCKS];
66 };
67
68 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
69 {
70         struct nvhost_channel_userctx *priv = filp->private_data;
71
72         filp->private_data = NULL;
73
74         nvhost_putchannel(priv->ch, priv->hwctx);
75
76         if (priv->hwctx)
77                 priv->ch->ctxhandler.put(priv->hwctx);
78
79         if (priv->gathers)
80                 nvmap_munmap(priv->gather_mem, priv->gathers);
81
82         if (!IS_ERR_OR_NULL(priv->gather_mem))
83                 nvmap_free(priv->ch->dev->nvmap, priv->gather_mem);
84
85         nvmap_client_put(priv->nvmap);
86         kfree(priv);
87         return 0;
88 }
89
90 static int nvhost_channelopen(struct inode *inode, struct file *filp)
91 {
92         struct nvhost_channel_userctx *priv;
93         struct nvhost_channel *ch;
94
95         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
96         ch = nvhost_getchannel(ch);
97         if (!ch)
98                 return -ENOMEM;
99
100         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
101         if (!priv) {
102                 nvhost_putchannel(ch, NULL);
103                 return -ENOMEM;
104         }
105         filp->private_data = priv;
106         priv->ch = ch;
107         priv->gather_mem = nvmap_alloc(ch->dev->nvmap,
108                                 sizeof(u32) * 2 * NVHOST_MAX_GATHERS, 32,
109                                 NVMAP_HANDLE_CACHEABLE);
110         if (IS_ERR(priv->gather_mem))
111                 goto fail;
112
113         if (ch->ctxhandler.alloc) {
114                 priv->hwctx = ch->ctxhandler.alloc(ch);
115                 if (!priv->hwctx)
116                         goto fail;
117         }
118
119         priv->gathers = nvmap_mmap(priv->gather_mem);
120
121         return 0;
122 fail:
123         nvhost_channelrelease(inode, filp);
124         return -ENOMEM;
125 }
126
127 static void add_gather(struct nvhost_channel_userctx *ctx,
128                 u32 mem_id, u32 words, u32 offset)
129 {
130         struct nvmap_pinarray_elem *pin;
131         u32* cur_gather = ctx->cur_gather;
132         pin = &ctx->pinarray[ctx->pinarray_size++];
133         pin->patch_mem = (u32)nvmap_ref_to_handle(ctx->gather_mem);
134         pin->patch_offset = ((cur_gather + 1) - ctx->gathers) * sizeof(u32);
135         pin->pin_mem = mem_id;
136         pin->pin_offset = offset;
137         cur_gather[0] = words;
138         ctx->cur_gather = cur_gather + 2;
139 }
140
141 static void reset_submit(struct nvhost_channel_userctx *ctx)
142 {
143         ctx->cmdbufs_pending = 0;
144         ctx->relocs_pending = 0;
145 }
146
147 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
148                                 size_t count, loff_t *offp)
149 {
150         struct nvhost_channel_userctx *priv = filp->private_data;
151         size_t remaining = count;
152         int err = 0;
153
154         while (remaining) {
155                 size_t consumed;
156                 if (!priv->relocs_pending && !priv->cmdbufs_pending) {
157                         consumed = sizeof(struct nvhost_submit_hdr);
158                         if (remaining < consumed)
159                                 break;
160                         if (copy_from_user(&priv->syncpt_id, buf, consumed)) {
161                                 err = -EFAULT;
162                                 break;
163                         }
164                         if (!priv->cmdbufs_pending) {
165                                 err = -EFAULT;
166                                 break;
167                         }
168                         priv->cur_gather = priv->gathers;
169                         priv->pinarray_size = 0;
170                 } else if (priv->cmdbufs_pending) {
171                         struct nvhost_cmdbuf cmdbuf;
172                         consumed = sizeof(cmdbuf);
173                         if (remaining < consumed)
174                                 break;
175                         if (copy_from_user(&cmdbuf, buf, consumed)) {
176                                 err = -EFAULT;
177                                 break;
178                         }
179                         add_gather(priv,
180                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
181                         priv->cmdbufs_pending--;
182                 } else if (priv->relocs_pending) {
183                         int numrelocs = remaining / sizeof(struct nvhost_reloc);
184                         if (!numrelocs)
185                                 break;
186                         numrelocs = min_t(int, numrelocs, priv->relocs_pending);
187                         consumed = numrelocs * sizeof(struct nvhost_reloc);
188                         if (copy_from_user(&priv->pinarray[priv->pinarray_size],
189                                                 buf, consumed)) {
190                                 err = -EFAULT;
191                                 break;
192                         }
193                         priv->pinarray_size += numrelocs;
194                         priv->relocs_pending -= numrelocs;
195                 } else {
196                         err = -EFAULT;
197                         break;
198                 }
199                 remaining -= consumed;
200                 buf += consumed;
201         }
202
203         if (err < 0) {
204                 dev_err(&priv->ch->dev->pdev->dev, "channel write error\n");
205                 reset_submit(priv);
206                 return err;
207         }
208
209         return (count - remaining);
210 }
211
212 static int nvhost_ioctl_channel_flush(struct nvhost_channel_userctx *ctx,
213                                       struct nvhost_get_param_args *args,
214                                       int null_kickoff)
215 {
216         struct device *device = &ctx->ch->dev->pdev->dev;
217         int num_unpin;
218         int err;
219
220         if (ctx->relocs_pending || ctx->cmdbufs_pending) {
221                 reset_submit(ctx);
222                 dev_err(device, "channel submit out of sync\n");
223                 return -EFAULT;
224         }
225         if (!ctx->nvmap) {
226                 dev_err(device, "no nvmap context set\n");
227                 return -EFAULT;
228         }
229         if (ctx->cur_gather == ctx->gathers)
230                 return 0;
231
232         /* pin mem handles and patch physical addresses */
233         num_unpin = nvmap_pin_array(ctx->nvmap,
234                                     nvmap_ref_to_handle(ctx->gather_mem),
235                                     ctx->pinarray, ctx->pinarray_size,
236                                     ctx->unpinarray);
237         if (num_unpin < 0) {
238                 dev_warn(device, "nvmap_pin_array failed: %d\n", num_unpin);
239                 return num_unpin;
240         }
241
242         /* context switch if needed, and submit user's gathers to the channel */
243         err = nvhost_channel_submit(ctx->ch, ctx->hwctx, ctx->nvmap,
244                                 ctx->gathers, ctx->cur_gather,
245                                 ctx->unpinarray, num_unpin,
246                                 ctx->syncpt_id, ctx->syncpt_incrs,
247                                 &args->value,
248                                 ctx->null_kickoff != 0);
249         if (err)
250                 nvmap_unpin_handles(ctx->nvmap, ctx->unpinarray, num_unpin);
251
252         return 0;
253 }
254
255 static long nvhost_channelctl(struct file *filp,
256         unsigned int cmd, unsigned long arg)
257 {
258         struct nvhost_channel_userctx *priv = filp->private_data;
259         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
260         int err = 0;
261
262         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
263                 (_IOC_NR(cmd) == 0) ||
264                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
265                 return -EFAULT;
266
267         BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
268
269         if (_IOC_DIR(cmd) & _IOC_WRITE) {
270                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
271                         return -EFAULT;
272         }
273
274         switch (cmd) {
275         case NVHOST_IOCTL_CHANNEL_FLUSH:
276                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
277                 break;
278         case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
279                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
280                 break;
281         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
282                 ((struct nvhost_get_param_args *)buf)->value =
283                         priv->ch->desc->syncpts;
284                 break;
285         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
286                 ((struct nvhost_get_param_args *)buf)->value =
287                         priv->ch->desc->waitbases;
288                 break;
289         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
290                 ((struct nvhost_get_param_args *)buf)->value =
291                         priv->ch->desc->modulemutexes;
292                 break;
293         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
294         {
295                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
296                 struct nvmap_client *new_client = nvmap_client_get_file(fd);
297
298                 if (IS_ERR(new_client)) {
299                         err = PTR_ERR(new_client);
300                         break;
301                 }
302
303                 if (priv->nvmap)
304                         nvmap_client_put(priv->nvmap);
305
306                 priv->nvmap = new_client;
307                 break;
308         }
309         default:
310                 err = -ENOTTY;
311                 break;
312         }
313
314         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
315                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
316
317         return err;
318 }
319
320 static struct file_operations nvhost_channelops = {
321         .owner = THIS_MODULE,
322         .release = nvhost_channelrelease,
323         .open = nvhost_channelopen,
324         .write = nvhost_channelwrite,
325         .unlocked_ioctl = nvhost_channelctl
326 };
327
328 static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
329 {
330         struct nvhost_ctrl_userctx *priv = filp->private_data;
331         int i;
332
333         filp->private_data = NULL;
334         if (priv->mod_locks[0])
335                 nvhost_module_idle(&priv->dev->mod);
336         for (i = 1; i < NV_HOST1X_NB_MLOCKS; i++)
337                 if (priv->mod_locks[i])
338                         nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
339         kfree(priv);
340         return 0;
341 }
342
343 static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
344 {
345         struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
346         struct nvhost_ctrl_userctx *priv;
347
348         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
349         if (!priv)
350                 return -ENOMEM;
351
352         priv->dev = host;
353         filp->private_data = priv;
354         return 0;
355 }
356
357 static int nvhost_ioctl_ctrl_syncpt_read(
358         struct nvhost_ctrl_userctx *ctx,
359         struct nvhost_ctrl_syncpt_read_args *args)
360 {
361         if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
362                 return -EINVAL;
363         args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
364         return 0;
365 }
366
367 static int nvhost_ioctl_ctrl_syncpt_incr(
368         struct nvhost_ctrl_userctx *ctx,
369         struct nvhost_ctrl_syncpt_incr_args *args)
370 {
371         if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
372                 return -EINVAL;
373         nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
374         return 0;
375 }
376
377 static int nvhost_ioctl_ctrl_syncpt_wait(
378         struct nvhost_ctrl_userctx *ctx,
379         struct nvhost_ctrl_syncpt_wait_args *args)
380 {
381         u32 timeout;
382         if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
383                 return -EINVAL;
384         if (args->timeout == NVHOST_NO_TIMEOUT)
385                 timeout = MAX_SCHEDULE_TIMEOUT;
386         else
387                 timeout = (u32)msecs_to_jiffies(args->timeout);
388
389         return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
390                                         args->thresh, timeout);
391 }
392
393 static int nvhost_ioctl_ctrl_module_mutex(
394         struct nvhost_ctrl_userctx *ctx,
395         struct nvhost_ctrl_module_mutex_args *args)
396 {
397         int err = 0;
398         if (args->id >= NV_HOST1X_NB_MLOCKS ||
399             args->lock > 1)
400                 return -EINVAL;
401
402         if (args->lock && !ctx->mod_locks[args->id]) {
403                 if (args->id == 0)
404                         nvhost_module_busy(&ctx->dev->mod);
405                 else
406                         err = nvhost_mutex_try_lock(&ctx->dev->cpuaccess, args->id);
407                 if (!err)
408                         ctx->mod_locks[args->id] = 1;
409         }
410         else if (!args->lock && ctx->mod_locks[args->id]) {
411                 if (args->id == 0)
412                         nvhost_module_idle(&ctx->dev->mod);
413                 else
414                         nvhost_mutex_unlock(&ctx->dev->cpuaccess, args->id);
415                 ctx->mod_locks[args->id] = 0;
416         }
417         return err;
418 }
419
420 static int nvhost_ioctl_ctrl_module_regrdwr(
421         struct nvhost_ctrl_userctx *ctx,
422         struct nvhost_ctrl_module_regrdwr_args *args)
423 {
424         u32 num_offsets = args->num_offsets;
425         u32 *offsets = args->offsets;
426         void *values = args->values;
427         u32 vals[64];
428
429         if (!nvhost_access_module_regs(&ctx->dev->cpuaccess, args->id) ||
430             (num_offsets == 0))
431                 return -EINVAL;
432
433         while (num_offsets--) {
434                 u32 remaining = args->block_size;
435                 u32 offs;
436                 if (get_user(offs, offsets))
437                         return -EFAULT;
438                 offsets++;
439                 while (remaining) {
440                         u32 batch = min(remaining, 64*sizeof(u32));
441                         if (args->write) {
442                                 if (copy_from_user(vals, values, batch))
443                                         return -EFAULT;
444                                 nvhost_write_module_regs(&ctx->dev->cpuaccess,
445                                                         args->id, offs, batch, vals);
446                         } else {
447                                 nvhost_read_module_regs(&ctx->dev->cpuaccess,
448                                                         args->id, offs, batch, vals);
449                                 if (copy_to_user(values, vals, batch))
450                                         return -EFAULT;
451                         }
452                         remaining -= batch;
453                         offs += batch;
454                         values += batch;
455                 }
456         }
457
458         return 0;
459 }
460
461 static long nvhost_ctrlctl(struct file *filp,
462         unsigned int cmd, unsigned long arg)
463 {
464         struct nvhost_ctrl_userctx *priv = filp->private_data;
465         u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
466         int err = 0;
467
468         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
469                 (_IOC_NR(cmd) == 0) ||
470                 (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
471                 return -EFAULT;
472
473         BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
474
475         if (_IOC_DIR(cmd) & _IOC_WRITE) {
476                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
477                         return -EFAULT;
478         }
479
480         switch (cmd) {
481         case NVHOST_IOCTL_CTRL_SYNCPT_READ:
482                 err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
483                 break;
484         case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
485                 err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
486                 break;
487         case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
488                 err = nvhost_ioctl_ctrl_syncpt_wait(priv, (void *)buf);
489                 break;
490         case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
491                 err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
492                 break;
493         case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
494                 err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
495                 break;
496         default:
497                 err = -ENOTTY;
498                 break;
499         }
500
501         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
502                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
503
504         return err;
505 }
506
507 static struct file_operations nvhost_ctrlops = {
508         .owner = THIS_MODULE,
509         .release = nvhost_ctrlrelease,
510         .open = nvhost_ctrlopen,
511         .unlocked_ioctl = nvhost_ctrlctl
512 };
513
514 static void power_host(struct nvhost_module *mod, enum nvhost_power_action action)
515 {
516         struct nvhost_master *dev = container_of(mod, struct nvhost_master, mod);
517
518         if (action == NVHOST_POWER_ACTION_ON) {
519                 nvhost_intr_start(&dev->intr, clk_get_rate(mod->clk[0]));
520                 /* don't do it, as display may have changed syncpt
521                  * after the last save
522                  * nvhost_syncpt_reset(&dev->syncpt);
523                  */
524         } else if (action == NVHOST_POWER_ACTION_OFF) {
525                 int i;
526                 for (i = 0; i < NVHOST_NUMCHANNELS; i++)
527                         nvhost_channel_suspend(&dev->channels[i]);
528                 nvhost_syncpt_save(&dev->syncpt);
529                 nvhost_intr_stop(&dev->intr);
530         }
531 }
532
533 static int __devinit nvhost_user_init(struct nvhost_master *host)
534 {
535         int i, err, devno;
536
537         host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
538         if (IS_ERR(host->nvhost_class)) {
539                 err = PTR_ERR(host->nvhost_class);
540                 dev_err(&host->pdev->dev, "failed to create class\n");
541                 goto fail;
542         }
543
544         if (nvhost_major) {
545                 devno = MKDEV(nvhost_major, nvhost_minor);
546                 err = register_chrdev_region(devno, NVHOST_NUMCHANNELS + 1, IFACE_NAME);
547         } else {
548                 err = alloc_chrdev_region(&devno, nvhost_minor,
549                                         NVHOST_NUMCHANNELS + 1, IFACE_NAME);
550                 nvhost_major = MAJOR(devno);
551         }
552         if (err < 0) {
553                 dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
554                 goto fail;
555         }
556
557         for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
558                 struct nvhost_channel *ch = &host->channels[i];
559
560                 if (!strcmp(ch->desc->name, "display") &&
561                     !nvhost_access_module_regs(&host->cpuaccess,
562                                                 NVHOST_MODULE_DISPLAY_A))
563                         continue;
564
565                 cdev_init(&ch->cdev, &nvhost_channelops);
566                 ch->cdev.owner = THIS_MODULE;
567
568                 devno = MKDEV(nvhost_major, nvhost_minor + i);
569                 err = cdev_add(&ch->cdev, devno, 1);
570                 if (err < 0) {
571                         dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
572                         goto fail;
573                 }
574                 ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
575                                 IFACE_NAME "-%s", ch->desc->name);
576                 if (IS_ERR(ch->node)) {
577                         err = PTR_ERR(ch->node);
578                         dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
579                         goto fail;
580                 }
581         }
582
583         cdev_init(&host->cdev, &nvhost_ctrlops);
584         host->cdev.owner = THIS_MODULE;
585         devno = MKDEV(nvhost_major, nvhost_minor + NVHOST_NUMCHANNELS);
586         err = cdev_add(&host->cdev, devno, 1);
587         if (err < 0)
588                 goto fail;
589         host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
590                         IFACE_NAME "-ctrl");
591         if (IS_ERR(host->ctrl)) {
592                 err = PTR_ERR(host->ctrl);
593                 dev_err(&host->pdev->dev, "failed to create ctrl device\n");
594                 goto fail;
595         }
596
597         return 0;
598 fail:
599         return err;
600 }
601
602 static int __devinit nvhost_probe(struct platform_device *pdev)
603 {
604         struct nvhost_master *host;
605         struct resource *regs, *intr0, *intr1;
606         int i, err;
607
608         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
609         intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
610         intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
611
612         if (!regs || !intr0 || !intr1) {
613                 dev_err(&pdev->dev, "missing required platform resources\n");
614                 return -ENXIO;
615         }
616
617         host = kzalloc(sizeof(*host), GFP_KERNEL);
618         if (!host)
619                 return -ENOMEM;
620
621         host->pdev = pdev;
622
623         host->nvmap = nvmap_create_client(nvmap_dev, "nvhost");
624         if (!host->nvmap) {
625                 dev_err(&pdev->dev, "unable to create nvmap client\n");
626                 err = -EIO;
627                 goto fail;
628         }
629
630         host->reg_mem = request_mem_region(regs->start,
631                                         resource_size(regs), pdev->name);
632         if (!host->reg_mem) {
633                 dev_err(&pdev->dev, "failed to get host register memory\n");
634                 err = -ENXIO;
635                 goto fail;
636         }
637         host->aperture = ioremap(regs->start, resource_size(regs));
638         if (!host->aperture) {
639                 dev_err(&pdev->dev, "failed to remap host registers\n");
640                 err = -ENXIO;
641                 goto fail;
642         }
643         host->sync_aperture = host->aperture +
644                 (NV_HOST1X_CHANNEL0_BASE +
645                         HOST1X_CHANNEL_SYNC_REG_BASE);
646
647         for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
648                 struct nvhost_channel *ch = &host->channels[i];
649                 err = nvhost_channel_init(ch, host, i);
650                 if (err < 0) {
651                         dev_err(&pdev->dev, "failed to init channel %d\n", i);
652                         goto fail;
653                 }
654         }
655
656         err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
657         if (err) goto fail;
658         err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
659         if (err) goto fail;
660         err = nvhost_user_init(host);
661         if (err) goto fail;
662         err = nvhost_module_init(&host->mod, "host1x", power_host, NULL, &pdev->dev);
663         if (err) goto fail;
664
665         platform_set_drvdata(pdev, host);
666
667         clk_enable(host->mod.clk[0]);
668         nvhost_syncpt_reset(&host->syncpt);
669         clk_disable(host->mod.clk[0]);
670
671         nvhost_bus_register(host);
672
673         nvhost_debug_init(host);
674
675         dev_info(&pdev->dev, "initialized\n");
676         return 0;
677
678 fail:
679         if (host->nvmap)
680                 nvmap_client_put(host->nvmap);
681         /* TODO: [ahatala 2010-05-04] */
682         kfree(host);
683         return err;
684 }
685
686 static int __exit nvhost_remove(struct platform_device *pdev)
687 {
688         return 0;
689 }
690
691 static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
692 {
693         struct nvhost_master *host = platform_get_drvdata(pdev);
694         dev_info(&pdev->dev, "suspending\n");
695         nvhost_module_suspend(&host->mod, true);
696         clk_enable(host->mod.clk[0]);
697         nvhost_syncpt_save(&host->syncpt);
698         clk_disable(host->mod.clk[0]);
699         dev_info(&pdev->dev, "suspended\n");
700         return 0;
701 }
702
703 static int nvhost_resume(struct platform_device *pdev)
704 {
705         struct nvhost_master *host = platform_get_drvdata(pdev);
706         dev_info(&pdev->dev, "resuming\n");
707         clk_enable(host->mod.clk[0]);
708         nvhost_syncpt_reset(&host->syncpt);
709         clk_disable(host->mod.clk[0]);
710         dev_info(&pdev->dev, "resumed\n");
711         return 0;
712 }
713
714 static struct platform_driver nvhost_driver = {
715         .remove = __exit_p(nvhost_remove),
716         .suspend = nvhost_suspend,
717         .resume = nvhost_resume,
718         .driver = {
719                 .owner = THIS_MODULE,
720                 .name = DRIVER_NAME
721         }
722 };
723
724 static int __init nvhost_mod_init(void)
725 {
726         return platform_driver_probe(&nvhost_driver, nvhost_probe);
727 }
728
729 static void __exit nvhost_mod_exit(void)
730 {
731         platform_driver_unregister(&nvhost_driver);
732 }
733
734 module_init(nvhost_mod_init);
735 module_exit(nvhost_mod_exit);
736
737 MODULE_AUTHOR("NVIDIA");
738 MODULE_DESCRIPTION("Graphics host driver for Tegra products");
739 MODULE_VERSION("1.0");
740 MODULE_LICENSE("Dual BSD/GPL");
741 MODULE_ALIAS("platform-nvhost");