d8d7f29d44ef6c19cfe908905055a451ac79b02f
[linux-2.6.git] / drivers / video / tegra / host / host1x / host1x.c
1 /*
2  * drivers/video/tegra/host/dev.c
3  *
4  * Tegra Graphics Host Driver Entrypoint
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/module.h>
29 #include <linux/pm_runtime.h>
30
31 #include "dev.h"
32 #include <trace/events/nvhost.h>
33
34 #include <linux/nvhost.h>
35 #include <linux/nvhost_ioctl.h>
36
37 #include "debug.h"
38 #include "bus_client.h"
39 #include "nvhost_acm.h"
40 #include "nvhost_channel.h"
41 #include "nvhost_job.h"
42 #include "chip_support.h"
43
44 #define DRIVER_NAME             "host1x"
45
46 struct nvhost_master *nvhost;
47
48 struct nvhost_ctrl_userctx {
49         struct nvhost_master *dev;
50         u32 *mod_locks;
51 };
52
53 static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
54 {
55         struct nvhost_ctrl_userctx *priv = filp->private_data;
56         int i;
57
58         trace_nvhost_ctrlrelease(priv->dev->dev->name);
59
60         filp->private_data = NULL;
61         if (priv->mod_locks[0])
62                 nvhost_module_idle(priv->dev->dev);
63         for (i = 1; i < nvhost_syncpt_nb_mlocks(&priv->dev->syncpt); i++)
64                 if (priv->mod_locks[i])
65                         nvhost_mutex_unlock(&priv->dev->syncpt, i);
66         kfree(priv->mod_locks);
67         kfree(priv);
68         return 0;
69 }
70
71 static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
72 {
73         struct nvhost_master *host =
74                 container_of(inode->i_cdev, struct nvhost_master, cdev);
75         struct nvhost_ctrl_userctx *priv;
76         u32 *mod_locks;
77
78         trace_nvhost_ctrlopen(host->dev->name);
79
80         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
81         mod_locks = kzalloc(sizeof(u32)
82                         * nvhost_syncpt_nb_mlocks(&host->syncpt),
83                         GFP_KERNEL);
84
85         if (!(priv && mod_locks)) {
86                 kfree(priv);
87                 kfree(mod_locks);
88                 return -ENOMEM;
89         }
90
91         priv->dev = host;
92         priv->mod_locks = mod_locks;
93         filp->private_data = priv;
94         return 0;
95 }
96
97 static int nvhost_ioctl_ctrl_syncpt_read(struct nvhost_ctrl_userctx *ctx,
98         struct nvhost_ctrl_syncpt_read_args *args)
99 {
100         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
101                 return -EINVAL;
102         args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
103         trace_nvhost_ioctl_ctrl_syncpt_read(args->id, args->value);
104         return 0;
105 }
106
107 static int nvhost_ioctl_ctrl_syncpt_incr(struct nvhost_ctrl_userctx *ctx,
108         struct nvhost_ctrl_syncpt_incr_args *args)
109 {
110         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
111                 return -EINVAL;
112         trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
113         nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
114         return 0;
115 }
116
117 static int nvhost_ioctl_ctrl_syncpt_waitex(struct nvhost_ctrl_userctx *ctx,
118         struct nvhost_ctrl_syncpt_waitex_args *args)
119 {
120         u32 timeout;
121         int err;
122         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
123                 return -EINVAL;
124         if (args->timeout == NVHOST_NO_TIMEOUT)
125                 timeout = MAX_SCHEDULE_TIMEOUT;
126         else
127                 timeout = (u32)msecs_to_jiffies(args->timeout);
128
129         err = nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
130                                         args->thresh, timeout, &args->value);
131         trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
132           args->timeout, args->value, err);
133
134         return err;
135 }
136
137 static int nvhost_ioctl_ctrl_module_mutex(struct nvhost_ctrl_userctx *ctx,
138         struct nvhost_ctrl_module_mutex_args *args)
139 {
140         int err = 0;
141         if (args->id >= nvhost_syncpt_nb_mlocks(&ctx->dev->syncpt) ||
142             args->lock > 1)
143                 return -EINVAL;
144
145         trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
146         if (args->lock && !ctx->mod_locks[args->id]) {
147                 if (args->id == 0)
148                         nvhost_module_busy(ctx->dev->dev);
149                 else
150                         err = nvhost_mutex_try_lock(&ctx->dev->syncpt,
151                                         args->id);
152                 if (!err)
153                         ctx->mod_locks[args->id] = 1;
154         } else if (!args->lock && ctx->mod_locks[args->id]) {
155                 if (args->id == 0)
156                         nvhost_module_idle(ctx->dev->dev);
157                 else
158                         nvhost_mutex_unlock(&ctx->dev->syncpt, args->id);
159                 ctx->mod_locks[args->id] = 0;
160         }
161         return err;
162 }
163
164 static int nvhost_ioctl_ctrl_module_regrdwr(struct nvhost_ctrl_userctx *ctx,
165         struct nvhost_ctrl_module_regrdwr_args *args)
166 {
167         u32 num_offsets = args->num_offsets;
168         u32 *offsets = args->offsets;
169         u32 *values = args->values;
170         u32 vals[64];
171         struct platform_device *ndev;
172
173         trace_nvhost_ioctl_ctrl_module_regrdwr(args->id,
174                         args->num_offsets, args->write);
175
176         /* Check that there is something to read and that block size is
177          * u32 aligned */
178         if (num_offsets == 0 || args->block_size & 3)
179                 return -EINVAL;
180
181         ndev = nvhost_device_list_match_by_id(args->id);
182         BUG_ON(!ndev);
183
184         while (num_offsets--) {
185                 int err;
186                 int remaining = args->block_size >> 2;
187                 u32 offs;
188                 if (get_user(offs, offsets))
189                         return -EFAULT;
190                 offsets++;
191                 while (remaining) {
192                         int batch = min(remaining, 64);
193                         if (args->write) {
194                                 if (copy_from_user(vals, values,
195                                                         batch*sizeof(u32)))
196                                         return -EFAULT;
197                                 err = nvhost_write_module_regs(ndev,
198                                                 offs, batch, vals);
199                                 if (err)
200                                         return err;
201                         } else {
202                                 err = nvhost_read_module_regs(ndev,
203                                                 offs, batch, vals);
204                                 if (err)
205                                         return err;
206                                 if (copy_to_user(values, vals,
207                                                         batch*sizeof(u32)))
208                                         return -EFAULT;
209                         }
210                         remaining -= batch;
211                         offs += batch*sizeof(u32);
212                         values += batch;
213                 }
214         }
215
216         return 0;
217 }
218
219 static int nvhost_ioctl_ctrl_get_version(struct nvhost_ctrl_userctx *ctx,
220         struct nvhost_get_param_args *args)
221 {
222         args->value = NVHOST_SUBMIT_VERSION_MAX_SUPPORTED;
223         return 0;
224 }
225
226 static int nvhost_ioctl_ctrl_syncpt_read_max(struct nvhost_ctrl_userctx *ctx,
227         struct nvhost_ctrl_syncpt_read_args *args)
228 {
229         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
230                 return -EINVAL;
231         args->value = nvhost_syncpt_read_max(&ctx->dev->syncpt, args->id);
232         return 0;
233 }
234
235 static long nvhost_ctrlctl(struct file *filp,
236         unsigned int cmd, unsigned long arg)
237 {
238         struct nvhost_ctrl_userctx *priv = filp->private_data;
239         u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
240         int err = 0;
241
242         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
243                 (_IOC_NR(cmd) == 0) ||
244                 (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST) ||
245                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE))
246                 return -EFAULT;
247
248         if (_IOC_DIR(cmd) & _IOC_WRITE) {
249                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
250                         return -EFAULT;
251         }
252
253         switch (cmd) {
254         case NVHOST_IOCTL_CTRL_SYNCPT_READ:
255                 err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
256                 break;
257         case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
258                 err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
259                 break;
260         case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
261                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
262                 break;
263         case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
264                 err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
265                 break;
266         case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
267                 err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
268                 break;
269         case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX:
270                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
271                 break;
272         case NVHOST_IOCTL_CTRL_GET_VERSION:
273                 err = nvhost_ioctl_ctrl_get_version(priv, (void *)buf);
274                 break;
275         case NVHOST_IOCTL_CTRL_SYNCPT_READ_MAX:
276                 err = nvhost_ioctl_ctrl_syncpt_read_max(priv, (void *)buf);
277                 break;
278         default:
279                 err = -ENOTTY;
280                 break;
281         }
282
283         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
284                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
285
286         return err;
287 }
288
289 static const struct file_operations nvhost_ctrlops = {
290         .owner = THIS_MODULE,
291         .release = nvhost_ctrlrelease,
292         .open = nvhost_ctrlopen,
293         .unlocked_ioctl = nvhost_ctrlctl
294 };
295
296 static void power_on_host(struct platform_device *dev)
297 {
298         struct nvhost_master *host = nvhost_get_private_data(dev);
299
300         nvhost_syncpt_reset(&host->syncpt);
301 }
302
303 static int power_off_host(struct platform_device *dev)
304 {
305         struct nvhost_master *host = nvhost_get_private_data(dev);
306
307         nvhost_syncpt_save(&host->syncpt);
308         return 0;
309 }
310
311 static void clock_on_host(struct platform_device *dev)
312 {
313         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
314         struct nvhost_master *host = nvhost_get_private_data(dev);
315         nvhost_intr_start(&host->intr, clk_get_rate(pdata->clk[0]));
316 }
317
318 static int clock_off_host(struct platform_device *dev)
319 {
320         struct nvhost_master *host = nvhost_get_private_data(dev);
321         nvhost_intr_stop(&host->intr);
322         return 0;
323 }
324
325 static int __devinit nvhost_user_init(struct nvhost_master *host)
326 {
327         int err, devno;
328
329         host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
330         if (IS_ERR(host->nvhost_class)) {
331                 err = PTR_ERR(host->nvhost_class);
332                 dev_err(&host->dev->dev, "failed to create class\n");
333                 goto fail;
334         }
335
336         err = alloc_chrdev_region(&devno, 0, 1, IFACE_NAME);
337         if (err < 0) {
338                 dev_err(&host->dev->dev, "failed to reserve chrdev region\n");
339                 goto fail;
340         }
341
342         cdev_init(&host->cdev, &nvhost_ctrlops);
343         host->cdev.owner = THIS_MODULE;
344         err = cdev_add(&host->cdev, devno, 1);
345         if (err < 0)
346                 goto fail;
347         host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
348                         IFACE_NAME "-ctrl");
349         if (IS_ERR(host->ctrl)) {
350                 err = PTR_ERR(host->ctrl);
351                 dev_err(&host->dev->dev, "failed to create ctrl device\n");
352                 goto fail;
353         }
354
355         return 0;
356 fail:
357         return err;
358 }
359
360 struct nvhost_channel *nvhost_alloc_channel(struct platform_device *dev)
361 {
362         BUG_ON(!host_device_op().alloc_nvhost_channel);
363         return host_device_op().alloc_nvhost_channel(dev);
364 }
365
366 void nvhost_free_channel(struct nvhost_channel *ch)
367 {
368         BUG_ON(!host_device_op().free_nvhost_channel);
369         host_device_op().free_nvhost_channel(ch);
370 }
371
372 static void nvhost_free_resources(struct nvhost_master *host)
373 {
374         kfree(host->intr.syncpt);
375         host->intr.syncpt = 0;
376 }
377
378 static int __devinit nvhost_alloc_resources(struct nvhost_master *host)
379 {
380         int err;
381
382         err = nvhost_init_chip_support(host);
383         if (err)
384                 return err;
385
386         host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
387                                     nvhost_syncpt_nb_pts(&host->syncpt),
388                                     GFP_KERNEL);
389
390         if (!host->intr.syncpt) {
391                 /* frees happen in the support removal phase */
392                 return -ENOMEM;
393         }
394
395         return 0;
396 }
397
398 void nvhost_host1x_update_clk(struct platform_device *pdev)
399 {
400         struct nvhost_master *host = nvhost_get_host(pdev);
401
402         actmon_op().update_sample_period(host);
403 }
404
405 static int __devinit nvhost_probe(struct platform_device *dev)
406 {
407         struct nvhost_master *host;
408         struct resource *regs, *intr0, *intr1;
409         int i, err;
410         struct nvhost_device_data *pdata =
411                 (struct nvhost_device_data *)dev->dev.platform_data;
412
413         regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
414         intr0 = platform_get_resource(dev, IORESOURCE_IRQ, 0);
415         intr1 = platform_get_resource(dev, IORESOURCE_IRQ, 1);
416
417         if (!regs || !intr0 || !intr1) {
418                 dev_err(&dev->dev, "missing required platform resources\n");
419                 return -ENXIO;
420         }
421
422         host = kzalloc(sizeof(*host), GFP_KERNEL);
423         if (!host)
424                 return -ENOMEM;
425
426         nvhost = host;
427
428         host->dev = dev;
429
430         /* Copy host1x parameters. The private_data gets replaced
431          * by nvhost_master later */
432         memcpy(&host->info, pdata->private_data,
433                         sizeof(struct host1x_device_info));
434
435         pdata->finalize_poweron = power_on_host;
436         pdata->prepare_poweroff = power_off_host;
437         pdata->prepare_clockoff = clock_off_host;
438         pdata->finalize_clockon = clock_on_host;
439
440         pdata->pdev = dev;
441
442         /* set common host1x device data */
443         platform_set_drvdata(dev, pdata);
444
445         /* set private host1x device data */
446         nvhost_set_private_data(dev, host);
447
448         host->reg_mem = request_mem_region(regs->start,
449                 resource_size(regs), dev->name);
450         if (!host->reg_mem) {
451                 dev_err(&dev->dev, "failed to get host register memory\n");
452                 err = -ENXIO;
453                 goto fail;
454         }
455
456         host->aperture = ioremap(regs->start, resource_size(regs));
457         if (!host->aperture) {
458                 dev_err(&dev->dev, "failed to remap host registers\n");
459                 err = -ENXIO;
460                 goto fail;
461         }
462
463         err = nvhost_alloc_resources(host);
464         if (err) {
465                 dev_err(&dev->dev, "failed to init chip support\n");
466                 goto fail;
467         }
468
469         host->memmgr = mem_op().alloc_mgr();
470         if (!host->memmgr) {
471                 dev_err(&dev->dev, "unable to create nvmap client\n");
472                 err = -EIO;
473                 goto fail;
474         }
475
476         err = nvhost_syncpt_init(dev, &host->syncpt);
477         if (err)
478                 goto fail;
479
480         err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
481         if (err)
482                 goto fail;
483
484         err = nvhost_user_init(host);
485         if (err)
486                 goto fail;
487
488         err = nvhost_module_init(dev);
489         if (err)
490                 goto fail;
491
492         for (i = 0; i < pdata->num_clks; i++)
493                 clk_prepare_enable(pdata->clk[i]);
494         nvhost_syncpt_reset(&host->syncpt);
495         for (i = 0; i < pdata->num_clks; i++)
496                 clk_disable_unprepare(pdata->clk[i]);
497
498         pm_runtime_use_autosuspend(&dev->dev);
499         pm_runtime_set_autosuspend_delay(&dev->dev, 100);
500         pm_runtime_enable(&dev->dev);
501
502         nvhost_device_list_init();
503         err = nvhost_device_list_add(dev);
504         if (err)
505                 goto fail;
506
507         nvhost_debug_init(host);
508
509         dev_info(&dev->dev, "initialized\n");
510         return 0;
511
512 fail:
513         nvhost_free_resources(host);
514         if (host->memmgr)
515                 mem_op().put_mgr(host->memmgr);
516         kfree(host);
517         return err;
518 }
519
520 static int __exit nvhost_remove(struct platform_device *dev)
521 {
522         struct nvhost_master *host = nvhost_get_private_data(dev);
523         nvhost_intr_deinit(&host->intr);
524         nvhost_syncpt_deinit(&host->syncpt);
525         nvhost_free_resources(host);
526         return 0;
527 }
528
529 static int nvhost_suspend(struct platform_device *dev, pm_message_t state)
530 {
531         struct nvhost_master *host = nvhost_get_private_data(dev);
532         int ret = 0;
533
534         ret = nvhost_module_suspend(host->dev);
535         dev_info(&dev->dev, "suspend status: %d\n", ret);
536
537         return ret;
538 }
539
540 static int nvhost_resume(struct platform_device *dev)
541 {
542         dev_info(&dev->dev, "resuming\n");
543         return 0;
544 }
545
546 static struct platform_driver platform_driver = {
547         .probe = nvhost_probe,
548         .remove = __exit_p(nvhost_remove),
549         .suspend = nvhost_suspend,
550         .resume = nvhost_resume,
551         .driver = {
552                 .owner = THIS_MODULE,
553                 .name = DRIVER_NAME
554         },
555 };
556
557 static int __init nvhost_mod_init(void)
558 {
559         return platform_driver_register(&platform_driver);
560 }
561
562 static void __exit nvhost_mod_exit(void)
563 {
564         platform_driver_unregister(&platform_driver);
565 }
566
567 /* host1x master device needs nvmap to be instantiated first.
568  * nvmap is instantiated via fs_initcall.
569  * Hence instantiate host1x master device using rootfs_initcall
570  * which is one level after fs_initcall. */
571 rootfs_initcall(nvhost_mod_init);
572 module_exit(nvhost_mod_exit);
573