38699bfc5e73c32660bb0bb90e54bfa093ce7899
[linux-3.10.git] / drivers / video / tegra / host / host1x / host1x.c
1 /*
2  * drivers/video/tegra/host/dev.c
3  *
4  * Tegra Graphics Host Driver Entrypoint
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/module.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_platform.h>
33 #include <mach/hardware.h>
34
35 #include "dev.h"
36 #include <trace/events/nvhost.h>
37
38 #include <linux/nvhost.h>
39 #include <linux/nvhost_ioctl.h>
40
41 #include <mach/pm_domains.h>
42
43 #include "debug.h"
44 #include "bus_client.h"
45 #include "nvhost_acm.h"
46 #include "nvhost_channel.h"
47 #include "nvhost_job.h"
48 #include "nvhost_memmgr.h"
49 #include "nvhost_sync.h"
50 #include "nvhost_scale.h"
51 #include "chip_support.h"
52 #include "t20/t20.h"
53 #include "t30/t30.h"
54 #include "t114/t114.h"
55 #include "t148/t148.h"
56 #include "t124/t124.h"
57
58 #define DRIVER_NAME             "host1x"
59
60 struct nvhost_master *nvhost;
61
62 struct nvhost_ctrl_userctx {
63         struct nvhost_master *dev;
64         u32 *mod_locks;
65 };
66
67 static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
68 {
69         struct nvhost_ctrl_userctx *priv = filp->private_data;
70         int i;
71
72         trace_nvhost_ctrlrelease(priv->dev->dev->name);
73
74         filp->private_data = NULL;
75         if (priv->mod_locks[0])
76                 nvhost_module_idle(priv->dev->dev);
77         for (i = 1; i < nvhost_syncpt_nb_mlocks(&priv->dev->syncpt); i++)
78                 if (priv->mod_locks[i])
79                         nvhost_mutex_unlock(&priv->dev->syncpt, i);
80         kfree(priv->mod_locks);
81         kfree(priv);
82         return 0;
83 }
84
85 static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
86 {
87         struct nvhost_master *host =
88                 container_of(inode->i_cdev, struct nvhost_master, cdev);
89         struct nvhost_ctrl_userctx *priv;
90         u32 *mod_locks;
91
92         trace_nvhost_ctrlopen(host->dev->name);
93
94         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
95         mod_locks = kzalloc(sizeof(u32)
96                         * nvhost_syncpt_nb_mlocks(&host->syncpt),
97                         GFP_KERNEL);
98
99         if (!(priv && mod_locks)) {
100                 kfree(priv);
101                 kfree(mod_locks);
102                 return -ENOMEM;
103         }
104
105         priv->dev = host;
106         priv->mod_locks = mod_locks;
107         filp->private_data = priv;
108         return 0;
109 }
110
111 static int nvhost_ioctl_ctrl_syncpt_read(struct nvhost_ctrl_userctx *ctx,
112         struct nvhost_ctrl_syncpt_read_args *args)
113 {
114         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
115                 return -EINVAL;
116         args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
117         trace_nvhost_ioctl_ctrl_syncpt_read(args->id, args->value);
118         return 0;
119 }
120
121 static int nvhost_ioctl_ctrl_syncpt_incr(struct nvhost_ctrl_userctx *ctx,
122         struct nvhost_ctrl_syncpt_incr_args *args)
123 {
124         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
125                 return -EINVAL;
126         trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
127         nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
128         return 0;
129 }
130
131 static int nvhost_ioctl_ctrl_syncpt_waitex(struct nvhost_ctrl_userctx *ctx,
132         struct nvhost_ctrl_syncpt_waitex_args *args)
133 {
134         u32 timeout;
135         int err;
136         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
137                 return -EINVAL;
138         if (args->timeout == NVHOST_NO_TIMEOUT)
139                 /* FIXME: MAX_SCHEDULE_TIMEOUT is ulong which can be bigger
140                    than u32 so we should fix nvhost_syncpt_wait_timeout to
141                    take ulong not u32. */
142                 timeout = (u32)MAX_SCHEDULE_TIMEOUT;
143         else
144                 timeout = (u32)msecs_to_jiffies(args->timeout);
145
146         err = nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
147                                         args->thresh, timeout, &args->value,
148                                         NULL, true);
149         trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
150           args->timeout, args->value, err);
151
152         return err;
153 }
154
155 static int nvhost_ioctl_ctrl_syncpt_waitmex(struct nvhost_ctrl_userctx *ctx,
156         struct nvhost_ctrl_syncpt_waitmex_args *args)
157 {
158         ulong timeout;
159         int err;
160         struct timespec ts;
161         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
162                 return -EINVAL;
163         if (args->timeout == NVHOST_NO_TIMEOUT)
164                 timeout = MAX_SCHEDULE_TIMEOUT;
165         else
166                 timeout = (u32)msecs_to_jiffies(args->timeout);
167
168         err = nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
169                                         args->thresh, timeout, &args->value,
170                                         &ts, true);
171         args->tv_sec = ts.tv_sec;
172         args->tv_nsec = ts.tv_nsec;
173         trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
174                                             args->timeout, args->value, err);
175
176         return err;
177 }
178
179 static int nvhost_ioctl_ctrl_sync_fence_create(struct nvhost_ctrl_userctx *ctx,
180         struct nvhost_ctrl_sync_fence_create_args *args)
181 {
182 #if CONFIG_TEGRA_GRHOST_SYNC
183         int err;
184         int i;
185         struct nvhost_ctrl_sync_fence_info *pts;
186         char name[32];
187         const char __user *args_name =
188                 (const char __user *)(uintptr_t)args->name;
189         const void __user *args_pts =
190                 (const void __user *)(uintptr_t)args->pts;
191
192         if (args_name) {
193                 if (strncpy_from_user(name, args_name, sizeof(name)) < 0)
194                         return -EFAULT;
195                 name[sizeof(name) - 1] = '\0';
196         } else {
197                 name[0] = '\0';
198         }
199
200         pts = kmalloc(sizeof(*pts) * args->num_pts, GFP_KERNEL);
201         if (!pts)
202                 return -ENOMEM;
203
204
205         if (copy_from_user(pts, args_pts, sizeof(*pts) * args->num_pts)) {
206                 err = -EFAULT;
207                 goto out;
208         }
209
210         for (i = 0; i < args->num_pts; i++) {
211                 if (pts[i].id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt)) {
212                         err = -EINVAL;
213                         goto out;
214                 }
215         }
216
217         err = nvhost_sync_create_fence(&ctx->dev->syncpt, pts, args->num_pts,
218                                        name, &args->fence_fd);
219 out:
220         kfree(pts);
221         return err;
222 #else
223         return -EINVAL;
224 #endif
225 }
226
227 static int nvhost_ioctl_ctrl_module_mutex(struct nvhost_ctrl_userctx *ctx,
228         struct nvhost_ctrl_module_mutex_args *args)
229 {
230         int err = 0;
231         if (args->id >= nvhost_syncpt_nb_mlocks(&ctx->dev->syncpt) ||
232             args->lock > 1)
233                 return -EINVAL;
234
235         trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
236         if (args->lock && !ctx->mod_locks[args->id]) {
237                 if (args->id == 0)
238                         nvhost_module_busy(ctx->dev->dev);
239                 else
240                         err = nvhost_mutex_try_lock(&ctx->dev->syncpt,
241                                         args->id);
242                 if (!err)
243                         ctx->mod_locks[args->id] = 1;
244         } else if (!args->lock && ctx->mod_locks[args->id]) {
245                 if (args->id == 0)
246                         nvhost_module_idle(ctx->dev->dev);
247                 else
248                         nvhost_mutex_unlock(&ctx->dev->syncpt, args->id);
249                 ctx->mod_locks[args->id] = 0;
250         }
251         return err;
252 }
253
254 static int nvhost_ioctl_ctrl_module_regrdwr(struct nvhost_ctrl_userctx *ctx,
255         struct nvhost_ctrl_module_regrdwr_args *args)
256 {
257         u32 num_offsets = args->num_offsets;
258         u32 __user *offsets = (u32 *)(uintptr_t)args->offsets;
259         u32 __user *values = (u32 *)(uintptr_t)args->values;
260         u32 *vals;
261         u32 *p1;
262         int remaining;
263         int err;
264
265         struct platform_device *ndev;
266         trace_nvhost_ioctl_ctrl_module_regrdwr(args->id,
267                         args->num_offsets, args->write);
268
269         /* Check that there is something to read and that block size is
270          * u32 aligned */
271         if (num_offsets == 0 || args->block_size & 3)
272                 return -EINVAL;
273
274         ndev = nvhost_device_list_match_by_id(args->id);
275         if (!ndev)
276                 return -ENODEV;
277
278         remaining = args->block_size >> 2;
279
280         vals = kmalloc(num_offsets * args->block_size,
281                                 GFP_KERNEL);
282         if (!vals)
283                 return -ENOMEM;
284         p1 = vals;
285
286         if (args->write) {
287                 if (copy_from_user((char *)vals, (char *)values,
288                                 num_offsets * args->block_size)) {
289                         kfree(vals);
290                         return -EFAULT;
291                 }
292                 while (num_offsets--) {
293                         u32 offs;
294                         if (get_user(offs, offsets)) {
295                                 kfree(vals);
296                                 return -EFAULT;
297                         }
298                         offsets++;
299                         err = nvhost_write_module_regs(ndev,
300                                         offs, remaining, p1);
301                         if (err) {
302                                 kfree(vals);
303                                 return err;
304                         }
305                         p1 += remaining;
306                 }
307                 kfree(vals);
308         } else {
309                 while (num_offsets--) {
310                         u32 offs;
311                         if (get_user(offs, offsets)) {
312                                 kfree(vals);
313                                 return -EFAULT;
314                         }
315                         offsets++;
316                         err = nvhost_read_module_regs(ndev,
317                                         offs, remaining, p1);
318                         if (err) {
319                                 kfree(vals);
320                                 return err;
321                         }
322                         p1 += remaining;
323                 }
324
325                 if (copy_to_user((char *)values, (char *)vals,
326                                 args->num_offsets * args->block_size)) {
327                         kfree(vals);
328                         return -EFAULT;
329                 }
330                 kfree(vals);
331         }
332         return 0;
333 }
334
335 static int nvhost_ioctl_ctrl_get_version(struct nvhost_ctrl_userctx *ctx,
336         struct nvhost_get_param_args *args)
337 {
338         args->value = NVHOST_SUBMIT_VERSION_MAX_SUPPORTED;
339         return 0;
340 }
341
342 static int nvhost_ioctl_ctrl_syncpt_read_max(struct nvhost_ctrl_userctx *ctx,
343         struct nvhost_ctrl_syncpt_read_args *args)
344 {
345         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
346                 return -EINVAL;
347         args->value = nvhost_syncpt_read_max(&ctx->dev->syncpt, args->id);
348         return 0;
349 }
350
351 static long nvhost_ctrlctl(struct file *filp,
352         unsigned int cmd, unsigned long arg)
353 {
354         struct nvhost_ctrl_userctx *priv = filp->private_data;
355         u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
356         int err = 0;
357
358         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
359                 (_IOC_NR(cmd) == 0) ||
360                 (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST) ||
361                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE))
362                 return -EFAULT;
363
364         if (_IOC_DIR(cmd) & _IOC_WRITE) {
365                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
366                         return -EFAULT;
367         }
368
369         switch (cmd) {
370         case NVHOST_IOCTL_CTRL_SYNCPT_READ:
371                 err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
372                 break;
373         case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
374                 err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
375                 break;
376         case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
377                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
378                 break;
379         case NVHOST_IOCTL_CTRL_SYNC_FENCE_CREATE:
380                 err = nvhost_ioctl_ctrl_sync_fence_create(priv, (void *)buf);
381                 break;
382         case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
383                 err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
384                 break;
385         case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
386                 err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
387                 break;
388         case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX:
389                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
390                 break;
391         case NVHOST_IOCTL_CTRL_GET_VERSION:
392                 err = nvhost_ioctl_ctrl_get_version(priv, (void *)buf);
393                 break;
394         case NVHOST_IOCTL_CTRL_SYNCPT_READ_MAX:
395                 err = nvhost_ioctl_ctrl_syncpt_read_max(priv, (void *)buf);
396                 break;
397         case NVHOST_IOCTL_CTRL_SYNCPT_WAITMEX:
398                 err = nvhost_ioctl_ctrl_syncpt_waitmex(priv, (void *)buf);
399                 break;
400         default:
401                 err = -ENOTTY;
402                 break;
403         }
404
405         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
406                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
407
408         return err;
409 }
410
411 static const struct file_operations nvhost_ctrlops = {
412         .owner = THIS_MODULE,
413         .release = nvhost_ctrlrelease,
414         .open = nvhost_ctrlopen,
415         .unlocked_ioctl = nvhost_ctrlctl
416 };
417
418 #ifdef CONFIG_PM
419 static void power_on_host(struct platform_device *dev)
420 {
421         struct nvhost_master *host = nvhost_get_private_data(dev);
422
423         nvhost_syncpt_reset(&host->syncpt);
424 }
425
426 static int power_off_host(struct platform_device *dev)
427 {
428         struct nvhost_master *host = nvhost_get_private_data(dev);
429
430         nvhost_syncpt_save(&host->syncpt);
431         return 0;
432 }
433
434 static void clock_on_host(struct platform_device *dev)
435 {
436         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
437         struct nvhost_master *host = nvhost_get_private_data(dev);
438         nvhost_intr_start(&host->intr, clk_get_rate(pdata->clk[0]));
439 }
440
441 static int clock_off_host(struct platform_device *dev)
442 {
443         struct nvhost_master *host = nvhost_get_private_data(dev);
444         nvhost_intr_stop(&host->intr);
445         return 0;
446 }
447 #endif
448
449 static int nvhost_user_init(struct nvhost_master *host)
450 {
451         int err, devno;
452
453         host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
454         if (IS_ERR(host->nvhost_class)) {
455                 err = PTR_ERR(host->nvhost_class);
456                 dev_err(&host->dev->dev, "failed to create class\n");
457                 goto fail;
458         }
459
460         err = alloc_chrdev_region(&devno, 0, 1, IFACE_NAME);
461         if (err < 0) {
462                 dev_err(&host->dev->dev, "failed to reserve chrdev region\n");
463                 goto fail;
464         }
465
466         cdev_init(&host->cdev, &nvhost_ctrlops);
467         host->cdev.owner = THIS_MODULE;
468         err = cdev_add(&host->cdev, devno, 1);
469         if (err < 0)
470                 goto fail;
471         host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
472                         IFACE_NAME "-ctrl");
473         if (IS_ERR(host->ctrl)) {
474                 err = PTR_ERR(host->ctrl);
475                 dev_err(&host->dev->dev, "failed to create ctrl device\n");
476                 goto fail;
477         }
478
479         return 0;
480 fail:
481         return err;
482 }
483
484 struct nvhost_channel *nvhost_alloc_channel(struct platform_device *dev)
485 {
486         return host_device_op().alloc_nvhost_channel(dev);
487 }
488
489 void nvhost_free_channel(struct nvhost_channel *ch)
490 {
491         host_device_op().free_nvhost_channel(ch);
492 }
493
494 static void nvhost_free_resources(struct nvhost_master *host)
495 {
496         kfree(host->intr.syncpt);
497         host->intr.syncpt = 0;
498 }
499
500 static int nvhost_alloc_resources(struct nvhost_master *host)
501 {
502         int err;
503
504         err = nvhost_init_chip_support(host);
505         if (err)
506                 return err;
507
508         host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
509                                     nvhost_syncpt_nb_pts(&host->syncpt),
510                                     GFP_KERNEL);
511
512         if (!host->intr.syncpt) {
513                 /* frees happen in the support removal phase */
514                 return -ENOMEM;
515         }
516
517         return 0;
518 }
519
520 static struct of_device_id tegra_host1x_of_match[] = {
521 #ifdef TEGRA_2X_OR_HIGHER_CONFIG
522         { .compatible = "nvidia,tegra20-host1x",
523                 .data = (struct nvhost_device_data *)&t20_host1x_info },
524 #endif
525 #ifdef TEGRA_3X_OR_HIGHER_CONFIG
526         { .compatible = "nvidia,tegra30-host1x",
527                 .data = (struct nvhost_device_data *)&t30_host1x_info },
528 #endif
529 #ifdef TEGRA_11X_OR_HIGHER_CONFIG
530         { .compatible = "nvidia,tegra114-host1x",
531                 .data = (struct nvhost_device_data *)&t11_host1x_info },
532 #endif
533 #ifdef TEGRA_14X_OR_HIGHER_CONFIG
534         { .compatible = "nvidia,tegra148-host1x",
535                 .data = (struct nvhost_device_data *)&t14_host1x_info },
536 #endif
537 #ifdef TEGRA_12X_OR_HIGHER_CONFIG
538         { .compatible = "nvidia,tegra124-host1x",
539                 .data = (struct nvhost_device_data *)&t124_host1x_info },
540 #endif
541         { },
542 };
543
544 void nvhost_host1x_update_clk(struct platform_device *pdev)
545 {
546         struct nvhost_device_data *pdata = NULL;
547         struct nvhost_device_profile *profile;
548
549         /* There are only two chips which need this workaround, so hardcode */
550         if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA11)
551                 pdata = &t11_gr3d_info;
552         else if (tegra_get_chipid() == TEGRA_CHIPID_TEGRA14)
553                 pdata = &t14_gr3d_info;
554         if (!pdata)
555                 return;
556
557         profile = pdata->power_profile;
558
559         if (profile && profile->actmon)
560                 actmon_op().update_sample_period(profile->actmon);
561 }
562
563 static int nvhost_probe(struct platform_device *dev)
564 {
565         struct nvhost_master *host;
566         struct resource *regs;
567         int syncpt_irq, generic_irq;
568         int i, err;
569         struct nvhost_device_data *pdata = NULL;
570
571         if (dev->dev.of_node) {
572                 const struct of_device_id *match;
573
574                 match = of_match_device(tegra_host1x_of_match, &dev->dev);
575                 if (match)
576                         pdata = (struct nvhost_device_data *)match->data;
577         } else
578                 pdata = (struct nvhost_device_data *)dev->dev.platform_data;
579
580         WARN_ON(!pdata);
581         if (!pdata) {
582                 dev_info(&dev->dev, "no platform data\n");
583                 return -ENODATA;
584         }
585
586         regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
587         if (!regs) {
588                 dev_err(&dev->dev, "missing host1x regs\n");
589                 return -ENXIO;
590         }
591
592         syncpt_irq = platform_get_irq(dev, 0);
593         if (IS_ERR_VALUE(syncpt_irq)) {
594                 dev_err(&dev->dev, "missing syncpt irq\n");
595                 return -ENXIO;
596         }
597
598         generic_irq = platform_get_irq(dev, 1);
599         if (IS_ERR_VALUE(generic_irq)) {
600                 dev_err(&dev->dev, "missing generic irq\n");
601                 return -ENXIO;
602         }
603
604         host = devm_kzalloc(&dev->dev, sizeof(*host), GFP_KERNEL);
605         if (!host)
606                 return -ENOMEM;
607
608         nvhost = host;
609
610         host->dev = dev;
611         mutex_init(&pdata->lock);
612
613         /* Copy host1x parameters. The private_data gets replaced
614          * by nvhost_master later */
615         memcpy(&host->info, pdata->private_data,
616                         sizeof(struct host1x_device_info));
617
618         pdata->pdev = dev;
619
620         /* set common host1x device data */
621         platform_set_drvdata(dev, pdata);
622
623         /* set private host1x device data */
624         nvhost_set_private_data(dev, host);
625
626         host->aperture = devm_request_and_ioremap(&dev->dev, regs);
627         if (!host->aperture) {
628                 err = -ENXIO;
629                 goto fail;
630         }
631
632         err = nvhost_alloc_resources(host);
633         if (err) {
634                 dev_err(&dev->dev, "failed to init chip support\n");
635                 goto fail;
636         }
637
638         host->memmgr = nvhost_memmgr_alloc_mgr();
639         if (!host->memmgr) {
640                 dev_err(&dev->dev, "unable to create nvmap client\n");
641                 err = -EIO;
642                 goto fail;
643         }
644
645         err = nvhost_syncpt_init(dev, &host->syncpt);
646         if (err)
647                 goto fail;
648
649         err = nvhost_intr_init(&host->intr, generic_irq, syncpt_irq);
650         if (err)
651                 goto fail;
652
653         err = nvhost_user_init(host);
654         if (err)
655                 goto fail;
656
657         err = nvhost_module_init(dev);
658         if (err)
659                 goto fail;
660
661         for (i = 0; i < pdata->num_clks; i++)
662                 clk_prepare_enable(pdata->clk[i]);
663         nvhost_syncpt_reset(&host->syncpt);
664         for (i = 0; i < pdata->num_clks; i++)
665                 clk_disable_unprepare(pdata->clk[i]);
666
667         tegra_pd_add_device(&dev->dev);
668         if (pdata->clockgate_delay) {
669                 pm_runtime_set_autosuspend_delay(&dev->dev,
670                         pdata->clockgate_delay);
671                 pm_runtime_use_autosuspend(&dev->dev);
672         }
673         pm_runtime_enable(&dev->dev);
674         pm_suspend_ignore_children(&dev->dev, true);
675
676         nvhost_device_list_init();
677         err = nvhost_device_list_add(dev);
678         if (err)
679                 goto fail;
680
681         nvhost_debug_init(host);
682
683         dev_info(&dev->dev, "initialized\n");
684         return 0;
685
686 fail:
687         nvhost_free_resources(host);
688         if (host->memmgr)
689                 nvhost_memmgr_put_mgr(host->memmgr);
690         kfree(host);
691         return err;
692 }
693
694 static int __exit nvhost_remove(struct platform_device *dev)
695 {
696         struct nvhost_master *host = nvhost_get_private_data(dev);
697         nvhost_intr_deinit(&host->intr);
698         nvhost_syncpt_deinit(&host->syncpt);
699         nvhost_free_resources(host);
700         return 0;
701 }
702
703 #ifdef CONFIG_PM
704 static int nvhost_suspend(struct device *dev)
705 {
706         struct platform_device *pdev = to_platform_device(dev);
707         struct nvhost_master *host = nvhost_get_private_data(pdev);
708         int ret = 0;
709
710         nvhost_module_enable_clk(dev);
711         power_off_host(pdev);
712         clock_off_host(pdev);
713         nvhost_module_disable_clk(dev);
714
715         ret = nvhost_module_suspend(host->dev);
716         dev_info(dev, "suspend status: %d\n", ret);
717
718         return ret;
719 }
720
721 static int nvhost_resume(struct device *dev)
722 {
723         struct platform_device *pdev = to_platform_device(dev);
724
725         nvhost_module_enable_clk(dev);
726         clock_on_host(pdev);
727         power_on_host(pdev);
728         nvhost_module_disable_clk(dev);
729
730         dev_info(dev, "resuming\n");
731
732         return 0;
733 }
734
735 static const struct dev_pm_ops host1x_pm_ops = {
736         .suspend = nvhost_suspend,
737         .resume = nvhost_resume,
738 #ifdef CONFIG_PM_RUNTIME
739         .runtime_suspend = nvhost_module_disable_clk,
740         .runtime_resume = nvhost_module_enable_clk,
741 #endif
742 };
743 #endif /* CONFIG_PM */
744
745 static struct platform_driver platform_driver = {
746         .probe = nvhost_probe,
747         .remove = __exit_p(nvhost_remove),
748         .driver = {
749                 .owner = THIS_MODULE,
750                 .name = DRIVER_NAME,
751 #ifdef CONFIG_PM
752                 .pm = &host1x_pm_ops,
753 #endif
754 #ifdef CONFIG_OF
755                 .of_match_table = tegra_host1x_of_match,
756 #endif
757         },
758 };
759
760 static int __init nvhost_mod_init(void)
761 {
762         return platform_driver_register(&platform_driver);
763 }
764
765 static void __exit nvhost_mod_exit(void)
766 {
767         platform_driver_unregister(&platform_driver);
768 }
769
770 /* host1x master device needs nvmap to be instantiated first.
771  * nvmap is instantiated via fs_initcall.
772  * Hence instantiate host1x master device using rootfs_initcall
773  * which is one level after fs_initcall. */
774 rootfs_initcall(nvhost_mod_init);
775 module_exit(nvhost_mod_exit);
776