video: tegra: host: add DT support
[linux-2.6.git] / drivers / video / tegra / host / host1x / host1x.c
1 /*
2  * drivers/video/tegra/host/dev.c
3  *
4  * Tegra Graphics Host Driver Entrypoint
5  *
6  * Copyright (c) 2010-2013, NVIDIA CORPORATION, All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/module.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/of.h>
31 #include <linux/of_device.h>
32 #include <linux/of_platform.h>
33
34 #include "dev.h"
35 #include <trace/events/nvhost.h>
36
37 #include <linux/nvhost.h>
38 #include <linux/nvhost_ioctl.h>
39
40 #include "debug.h"
41 #include "bus_client.h"
42 #include "nvhost_acm.h"
43 #include "nvhost_channel.h"
44 #include "nvhost_job.h"
45 #include "chip_support.h"
46 #include "t20/t20.h"
47 #include "t30/t30.h"
48 #include "t114/t114.h"
49
50 #define DRIVER_NAME             "host1x"
51
52 struct nvhost_master *nvhost;
53
54 struct nvhost_ctrl_userctx {
55         struct nvhost_master *dev;
56         u32 *mod_locks;
57 };
58
59 static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
60 {
61         struct nvhost_ctrl_userctx *priv = filp->private_data;
62         int i;
63
64         trace_nvhost_ctrlrelease(priv->dev->dev->name);
65
66         filp->private_data = NULL;
67         if (priv->mod_locks[0])
68                 nvhost_module_idle(priv->dev->dev);
69         for (i = 1; i < nvhost_syncpt_nb_mlocks(&priv->dev->syncpt); i++)
70                 if (priv->mod_locks[i])
71                         nvhost_mutex_unlock(&priv->dev->syncpt, i);
72         kfree(priv->mod_locks);
73         kfree(priv);
74         return 0;
75 }
76
77 static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
78 {
79         struct nvhost_master *host =
80                 container_of(inode->i_cdev, struct nvhost_master, cdev);
81         struct nvhost_ctrl_userctx *priv;
82         u32 *mod_locks;
83
84         trace_nvhost_ctrlopen(host->dev->name);
85
86         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
87         mod_locks = kzalloc(sizeof(u32)
88                         * nvhost_syncpt_nb_mlocks(&host->syncpt),
89                         GFP_KERNEL);
90
91         if (!(priv && mod_locks)) {
92                 kfree(priv);
93                 kfree(mod_locks);
94                 return -ENOMEM;
95         }
96
97         priv->dev = host;
98         priv->mod_locks = mod_locks;
99         filp->private_data = priv;
100         return 0;
101 }
102
103 static int nvhost_ioctl_ctrl_syncpt_read(struct nvhost_ctrl_userctx *ctx,
104         struct nvhost_ctrl_syncpt_read_args *args)
105 {
106         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
107                 return -EINVAL;
108         args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
109         trace_nvhost_ioctl_ctrl_syncpt_read(args->id, args->value);
110         return 0;
111 }
112
113 static int nvhost_ioctl_ctrl_syncpt_incr(struct nvhost_ctrl_userctx *ctx,
114         struct nvhost_ctrl_syncpt_incr_args *args)
115 {
116         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
117                 return -EINVAL;
118         trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
119         nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
120         return 0;
121 }
122
123 static int nvhost_ioctl_ctrl_syncpt_waitex(struct nvhost_ctrl_userctx *ctx,
124         struct nvhost_ctrl_syncpt_waitex_args *args)
125 {
126         u32 timeout;
127         int err;
128         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
129                 return -EINVAL;
130         if (args->timeout == NVHOST_NO_TIMEOUT)
131                 timeout = MAX_SCHEDULE_TIMEOUT;
132         else
133                 timeout = (u32)msecs_to_jiffies(args->timeout);
134
135         err = nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
136                                         args->thresh, timeout, &args->value);
137         trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
138           args->timeout, args->value, err);
139
140         return err;
141 }
142
143 static int nvhost_ioctl_ctrl_module_mutex(struct nvhost_ctrl_userctx *ctx,
144         struct nvhost_ctrl_module_mutex_args *args)
145 {
146         int err = 0;
147         if (args->id >= nvhost_syncpt_nb_mlocks(&ctx->dev->syncpt) ||
148             args->lock > 1)
149                 return -EINVAL;
150
151         trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
152         if (args->lock && !ctx->mod_locks[args->id]) {
153                 if (args->id == 0)
154                         nvhost_module_busy(ctx->dev->dev);
155                 else
156                         err = nvhost_mutex_try_lock(&ctx->dev->syncpt,
157                                         args->id);
158                 if (!err)
159                         ctx->mod_locks[args->id] = 1;
160         } else if (!args->lock && ctx->mod_locks[args->id]) {
161                 if (args->id == 0)
162                         nvhost_module_idle(ctx->dev->dev);
163                 else
164                         nvhost_mutex_unlock(&ctx->dev->syncpt, args->id);
165                 ctx->mod_locks[args->id] = 0;
166         }
167         return err;
168 }
169
170 static int nvhost_ioctl_ctrl_module_regrdwr(struct nvhost_ctrl_userctx *ctx,
171         struct nvhost_ctrl_module_regrdwr_args *args)
172 {
173         u32 num_offsets = args->num_offsets;
174         u32 *offsets = args->offsets;
175         u32 *values = args->values;
176         u32 vals[64];
177         struct platform_device *ndev;
178
179         trace_nvhost_ioctl_ctrl_module_regrdwr(args->id,
180                         args->num_offsets, args->write);
181
182         /* Check that there is something to read and that block size is
183          * u32 aligned */
184         if (num_offsets == 0 || args->block_size & 3)
185                 return -EINVAL;
186
187         ndev = nvhost_device_list_match_by_id(args->id);
188         BUG_ON(!ndev);
189
190         while (num_offsets--) {
191                 int err;
192                 int remaining = args->block_size >> 2;
193                 u32 offs;
194                 if (get_user(offs, offsets))
195                         return -EFAULT;
196                 offsets++;
197                 while (remaining) {
198                         int batch = min(remaining, 64);
199                         if (args->write) {
200                                 if (copy_from_user(vals, values,
201                                                         batch*sizeof(u32)))
202                                         return -EFAULT;
203                                 err = nvhost_write_module_regs(ndev,
204                                                 offs, batch, vals);
205                                 if (err)
206                                         return err;
207                         } else {
208                                 err = nvhost_read_module_regs(ndev,
209                                                 offs, batch, vals);
210                                 if (err)
211                                         return err;
212                                 if (copy_to_user(values, vals,
213                                                         batch*sizeof(u32)))
214                                         return -EFAULT;
215                         }
216                         remaining -= batch;
217                         offs += batch*sizeof(u32);
218                         values += batch;
219                 }
220         }
221
222         return 0;
223 }
224
225 static int nvhost_ioctl_ctrl_get_version(struct nvhost_ctrl_userctx *ctx,
226         struct nvhost_get_param_args *args)
227 {
228         args->value = NVHOST_SUBMIT_VERSION_MAX_SUPPORTED;
229         return 0;
230 }
231
232 static int nvhost_ioctl_ctrl_syncpt_read_max(struct nvhost_ctrl_userctx *ctx,
233         struct nvhost_ctrl_syncpt_read_args *args)
234 {
235         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
236                 return -EINVAL;
237         args->value = nvhost_syncpt_read_max(&ctx->dev->syncpt, args->id);
238         return 0;
239 }
240
241 static long nvhost_ctrlctl(struct file *filp,
242         unsigned int cmd, unsigned long arg)
243 {
244         struct nvhost_ctrl_userctx *priv = filp->private_data;
245         u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
246         int err = 0;
247
248         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
249                 (_IOC_NR(cmd) == 0) ||
250                 (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST) ||
251                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE))
252                 return -EFAULT;
253
254         if (_IOC_DIR(cmd) & _IOC_WRITE) {
255                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
256                         return -EFAULT;
257         }
258
259         switch (cmd) {
260         case NVHOST_IOCTL_CTRL_SYNCPT_READ:
261                 err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
262                 break;
263         case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
264                 err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
265                 break;
266         case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
267                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
268                 break;
269         case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
270                 err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
271                 break;
272         case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
273                 err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
274                 break;
275         case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX:
276                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
277                 break;
278         case NVHOST_IOCTL_CTRL_GET_VERSION:
279                 err = nvhost_ioctl_ctrl_get_version(priv, (void *)buf);
280                 break;
281         case NVHOST_IOCTL_CTRL_SYNCPT_READ_MAX:
282                 err = nvhost_ioctl_ctrl_syncpt_read_max(priv, (void *)buf);
283                 break;
284         default:
285                 err = -ENOTTY;
286                 break;
287         }
288
289         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
290                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
291
292         return err;
293 }
294
295 static const struct file_operations nvhost_ctrlops = {
296         .owner = THIS_MODULE,
297         .release = nvhost_ctrlrelease,
298         .open = nvhost_ctrlopen,
299         .unlocked_ioctl = nvhost_ctrlctl
300 };
301
302 static void power_on_host(struct platform_device *dev)
303 {
304         struct nvhost_master *host = nvhost_get_private_data(dev);
305
306         nvhost_syncpt_reset(&host->syncpt);
307 }
308
309 static int power_off_host(struct platform_device *dev)
310 {
311         struct nvhost_master *host = nvhost_get_private_data(dev);
312
313         nvhost_syncpt_save(&host->syncpt);
314         return 0;
315 }
316
317 static void clock_on_host(struct platform_device *dev)
318 {
319         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
320         struct nvhost_master *host = nvhost_get_private_data(dev);
321         nvhost_intr_start(&host->intr, clk_get_rate(pdata->clk[0]));
322 }
323
324 static int clock_off_host(struct platform_device *dev)
325 {
326         struct nvhost_master *host = nvhost_get_private_data(dev);
327         nvhost_intr_stop(&host->intr);
328         return 0;
329 }
330
331 static int __devinit nvhost_user_init(struct nvhost_master *host)
332 {
333         int err, devno;
334
335         host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
336         if (IS_ERR(host->nvhost_class)) {
337                 err = PTR_ERR(host->nvhost_class);
338                 dev_err(&host->dev->dev, "failed to create class\n");
339                 goto fail;
340         }
341
342         err = alloc_chrdev_region(&devno, 0, 1, IFACE_NAME);
343         if (err < 0) {
344                 dev_err(&host->dev->dev, "failed to reserve chrdev region\n");
345                 goto fail;
346         }
347
348         cdev_init(&host->cdev, &nvhost_ctrlops);
349         host->cdev.owner = THIS_MODULE;
350         err = cdev_add(&host->cdev, devno, 1);
351         if (err < 0)
352                 goto fail;
353         host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
354                         IFACE_NAME "-ctrl");
355         if (IS_ERR(host->ctrl)) {
356                 err = PTR_ERR(host->ctrl);
357                 dev_err(&host->dev->dev, "failed to create ctrl device\n");
358                 goto fail;
359         }
360
361         return 0;
362 fail:
363         return err;
364 }
365
366 struct nvhost_channel *nvhost_alloc_channel(struct platform_device *dev)
367 {
368         BUG_ON(!host_device_op().alloc_nvhost_channel);
369         return host_device_op().alloc_nvhost_channel(dev);
370 }
371
372 void nvhost_free_channel(struct nvhost_channel *ch)
373 {
374         BUG_ON(!host_device_op().free_nvhost_channel);
375         host_device_op().free_nvhost_channel(ch);
376 }
377
378 static void nvhost_free_resources(struct nvhost_master *host)
379 {
380         kfree(host->intr.syncpt);
381         host->intr.syncpt = 0;
382 }
383
384 static int __devinit nvhost_alloc_resources(struct nvhost_master *host)
385 {
386         int err;
387
388         err = nvhost_init_chip_support(host);
389         if (err)
390                 return err;
391
392         host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
393                                     nvhost_syncpt_nb_pts(&host->syncpt),
394                                     GFP_KERNEL);
395
396         if (!host->intr.syncpt) {
397                 /* frees happen in the support removal phase */
398                 return -ENOMEM;
399         }
400
401         return 0;
402 }
403
404 void nvhost_host1x_update_clk(struct platform_device *pdev)
405 {
406         struct nvhost_master *host = nvhost_get_host(pdev);
407
408         actmon_op().update_sample_period(host);
409 }
410
411 static struct of_device_id tegra_host1x_of_match[] __devinitdata = {
412         { .compatible = "nvidia,tegra20-host1x",
413                 .data = (struct nvhost_device_data *)&t20_host1x_info },
414         { .compatible = "nvidia,tegra30-host1x",
415                 .data = (struct nvhost_device_data *)&t30_host1x_info },
416         { .compatible = "nvidia,tegra114-host1x",
417                 .data = (struct nvhost_device_data *)&t11_host1x_info },
418         { },
419 };
420
421 static int __devinit nvhost_probe(struct platform_device *dev)
422 {
423         struct nvhost_master *host;
424         struct resource *regs, *intr0, *intr1;
425         int i, err;
426         struct nvhost_device_data *pdata = NULL;
427
428         if (dev->dev.of_node) {
429                 const struct of_device_id *match;
430
431                 match = of_match_device(tegra_host1x_of_match, &dev->dev);
432                 if (match)
433                         pdata = (struct nvhost_device_data *)match->data;
434         } else
435                 pdata = (struct nvhost_device_data *)dev->dev.platform_data;
436
437         WARN_ON(!pdata);
438         if (!pdata) {
439                 dev_info(&dev->dev, "no platform data\n");
440                 return -ENODATA;
441         }
442         regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
443         intr0 = platform_get_resource(dev, IORESOURCE_IRQ, 0);
444         intr1 = platform_get_resource(dev, IORESOURCE_IRQ, 1);
445
446         if (!regs || !intr0 || !intr1) {
447                 dev_err(&dev->dev, "missing required platform resources\n");
448                 return -ENXIO;
449         }
450
451         host = kzalloc(sizeof(*host), GFP_KERNEL);
452         if (!host)
453                 return -ENOMEM;
454
455         nvhost = host;
456
457         host->dev = dev;
458
459         /* Copy host1x parameters. The private_data gets replaced
460          * by nvhost_master later */
461         memcpy(&host->info, pdata->private_data,
462                         sizeof(struct host1x_device_info));
463
464         pdata->finalize_poweron = power_on_host;
465         pdata->prepare_poweroff = power_off_host;
466         pdata->prepare_clockoff = clock_off_host;
467         pdata->finalize_clockon = clock_on_host;
468
469         pdata->pdev = dev;
470
471         /* set common host1x device data */
472         platform_set_drvdata(dev, pdata);
473
474         /* set private host1x device data */
475         nvhost_set_private_data(dev, host);
476
477         host->reg_mem = request_mem_region(regs->start,
478                 resource_size(regs), dev->name);
479         if (!host->reg_mem) {
480                 dev_err(&dev->dev, "failed to get host register memory\n");
481                 err = -ENXIO;
482                 goto fail;
483         }
484
485         host->aperture = ioremap(regs->start, resource_size(regs));
486         if (!host->aperture) {
487                 dev_err(&dev->dev, "failed to remap host registers\n");
488                 err = -ENXIO;
489                 goto fail;
490         }
491
492         err = nvhost_alloc_resources(host);
493         if (err) {
494                 dev_err(&dev->dev, "failed to init chip support\n");
495                 goto fail;
496         }
497
498         host->memmgr = mem_op().alloc_mgr();
499         if (!host->memmgr) {
500                 dev_err(&dev->dev, "unable to create nvmap client\n");
501                 err = -EIO;
502                 goto fail;
503         }
504
505         err = nvhost_syncpt_init(dev, &host->syncpt);
506         if (err)
507                 goto fail;
508
509         err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
510         if (err)
511                 goto fail;
512
513         err = nvhost_user_init(host);
514         if (err)
515                 goto fail;
516
517         err = nvhost_module_init(dev);
518         if (err)
519                 goto fail;
520
521         for (i = 0; i < pdata->num_clks; i++)
522                 clk_prepare_enable(pdata->clk[i]);
523         nvhost_syncpt_reset(&host->syncpt);
524         for (i = 0; i < pdata->num_clks; i++)
525                 clk_disable_unprepare(pdata->clk[i]);
526
527         pm_runtime_use_autosuspend(&dev->dev);
528         pm_runtime_set_autosuspend_delay(&dev->dev, 100);
529         pm_runtime_enable(&dev->dev);
530
531         nvhost_device_list_init();
532         err = nvhost_device_list_add(dev);
533         if (err)
534                 goto fail;
535
536         nvhost_debug_init(host);
537
538         dev_info(&dev->dev, "initialized\n");
539         return 0;
540
541 fail:
542         nvhost_free_resources(host);
543         if (host->memmgr)
544                 mem_op().put_mgr(host->memmgr);
545         kfree(host);
546         return err;
547 }
548
549 static int __exit nvhost_remove(struct platform_device *dev)
550 {
551         struct nvhost_master *host = nvhost_get_private_data(dev);
552         nvhost_intr_deinit(&host->intr);
553         nvhost_syncpt_deinit(&host->syncpt);
554         nvhost_free_resources(host);
555         return 0;
556 }
557
558 static int nvhost_suspend(struct platform_device *dev, pm_message_t state)
559 {
560         struct nvhost_master *host = nvhost_get_private_data(dev);
561         int ret = 0;
562
563         ret = nvhost_module_suspend(host->dev);
564         dev_info(&dev->dev, "suspend status: %d\n", ret);
565
566         return ret;
567 }
568
569 static int nvhost_resume(struct platform_device *dev)
570 {
571         dev_info(&dev->dev, "resuming\n");
572         return 0;
573 }
574
575 static struct platform_driver platform_driver = {
576         .probe = nvhost_probe,
577         .remove = __exit_p(nvhost_remove),
578         .suspend = nvhost_suspend,
579         .resume = nvhost_resume,
580         .driver = {
581                 .owner = THIS_MODULE,
582                 .name = DRIVER_NAME,
583 #ifdef CONFIG_OF
584                 .of_match_table = tegra_host1x_of_match,
585 #endif
586         },
587 };
588
589 static int __init nvhost_mod_init(void)
590 {
591         return platform_driver_register(&platform_driver);
592 }
593
594 static void __exit nvhost_mod_exit(void)
595 {
596         platform_driver_unregister(&platform_driver);
597 }
598
599 /* host1x master device needs nvmap to be instantiated first.
600  * nvmap is instantiated via fs_initcall.
601  * Hence instantiate host1x master device using rootfs_initcall
602  * which is one level after fs_initcall. */
603 rootfs_initcall(nvhost_mod_init);
604 module_exit(nvhost_mod_exit);
605