video: tegra: host: Turn host1x on when reading counters
[linux-2.6.git] / drivers / video / tegra / host / host1x / host1x.c
1 /*
2  * drivers/video/tegra/host/dev.c
3  *
4  * Tegra Graphics Host Driver Entrypoint
5  *
6  * Copyright (c) 2010-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify it
9  * under the terms and conditions of the GNU General Public License,
10  * version 2, as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20
21 #include <linux/slab.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/uaccess.h>
25 #include <linux/file.h>
26 #include <linux/clk.h>
27 #include <linux/hrtimer.h>
28 #include <linux/module.h>
29
30 #include "dev.h"
31 #include <trace/events/nvhost.h>
32
33 #include <linux/nvhost.h>
34 #include <linux/nvhost_ioctl.h>
35
36 #include "debug.h"
37 #include "bus_client.h"
38 #include "nvhost_acm.h"
39 #include "nvhost_channel.h"
40 #include "nvhost_job.h"
41 #include "chip_support.h"
42
43 #define DRIVER_NAME             "host1x"
44
45 struct nvhost_master *nvhost;
46
47 struct nvhost_ctrl_userctx {
48         struct nvhost_master *dev;
49         u32 *mod_locks;
50 };
51
52 static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
53 {
54         struct nvhost_ctrl_userctx *priv = filp->private_data;
55         int i;
56
57         trace_nvhost_ctrlrelease(priv->dev->dev->name);
58
59         filp->private_data = NULL;
60         if (priv->mod_locks[0])
61                 nvhost_module_idle(priv->dev->dev);
62         for (i = 1; i < nvhost_syncpt_nb_mlocks(&priv->dev->syncpt); i++)
63                 if (priv->mod_locks[i])
64                         nvhost_mutex_unlock(&priv->dev->syncpt, i);
65         kfree(priv->mod_locks);
66         kfree(priv);
67         return 0;
68 }
69
70 static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
71 {
72         struct nvhost_master *host =
73                 container_of(inode->i_cdev, struct nvhost_master, cdev);
74         struct nvhost_ctrl_userctx *priv;
75         u32 *mod_locks;
76
77         trace_nvhost_ctrlopen(host->dev->name);
78
79         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
80         mod_locks = kzalloc(sizeof(u32)
81                         * nvhost_syncpt_nb_mlocks(&host->syncpt),
82                         GFP_KERNEL);
83
84         if (!(priv && mod_locks)) {
85                 kfree(priv);
86                 kfree(mod_locks);
87                 return -ENOMEM;
88         }
89
90         priv->dev = host;
91         priv->mod_locks = mod_locks;
92         filp->private_data = priv;
93         return 0;
94 }
95
96 static int nvhost_ioctl_ctrl_syncpt_read(struct nvhost_ctrl_userctx *ctx,
97         struct nvhost_ctrl_syncpt_read_args *args)
98 {
99         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
100                 return -EINVAL;
101         args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
102         trace_nvhost_ioctl_ctrl_syncpt_read(args->id, args->value);
103         return 0;
104 }
105
106 static int nvhost_ioctl_ctrl_syncpt_incr(struct nvhost_ctrl_userctx *ctx,
107         struct nvhost_ctrl_syncpt_incr_args *args)
108 {
109         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
110                 return -EINVAL;
111         trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
112         nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
113         return 0;
114 }
115
116 static int nvhost_ioctl_ctrl_syncpt_waitex(struct nvhost_ctrl_userctx *ctx,
117         struct nvhost_ctrl_syncpt_waitex_args *args)
118 {
119         u32 timeout;
120         int err;
121         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
122                 return -EINVAL;
123         if (args->timeout == NVHOST_NO_TIMEOUT)
124                 timeout = MAX_SCHEDULE_TIMEOUT;
125         else
126                 timeout = (u32)msecs_to_jiffies(args->timeout);
127
128         err = nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
129                                         args->thresh, timeout, &args->value);
130         trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
131           args->timeout, args->value, err);
132
133         return err;
134 }
135
136 static int nvhost_ioctl_ctrl_module_mutex(struct nvhost_ctrl_userctx *ctx,
137         struct nvhost_ctrl_module_mutex_args *args)
138 {
139         int err = 0;
140         if (args->id >= nvhost_syncpt_nb_mlocks(&ctx->dev->syncpt) ||
141             args->lock > 1)
142                 return -EINVAL;
143
144         trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
145         if (args->lock && !ctx->mod_locks[args->id]) {
146                 if (args->id == 0)
147                         nvhost_module_busy(ctx->dev->dev);
148                 else
149                         err = nvhost_mutex_try_lock(&ctx->dev->syncpt,
150                                         args->id);
151                 if (!err)
152                         ctx->mod_locks[args->id] = 1;
153         } else if (!args->lock && ctx->mod_locks[args->id]) {
154                 if (args->id == 0)
155                         nvhost_module_idle(ctx->dev->dev);
156                 else
157                         nvhost_mutex_unlock(&ctx->dev->syncpt, args->id);
158                 ctx->mod_locks[args->id] = 0;
159         }
160         return err;
161 }
162
163 static int nvhost_ioctl_ctrl_module_regrdwr(struct nvhost_ctrl_userctx *ctx,
164         struct nvhost_ctrl_module_regrdwr_args *args)
165 {
166         u32 num_offsets = args->num_offsets;
167         u32 *offsets = args->offsets;
168         u32 *values = args->values;
169         u32 vals[64];
170         struct platform_device *ndev;
171
172         trace_nvhost_ioctl_ctrl_module_regrdwr(args->id,
173                         args->num_offsets, args->write);
174
175         /* Check that there is something to read and that block size is
176          * u32 aligned */
177         if (num_offsets == 0 || args->block_size & 3)
178                 return -EINVAL;
179
180         ndev = nvhost_device_list_match_by_id(args->id);
181         BUG_ON(!ndev);
182
183         while (num_offsets--) {
184                 int err;
185                 int remaining = args->block_size >> 2;
186                 u32 offs;
187                 if (get_user(offs, offsets))
188                         return -EFAULT;
189                 offsets++;
190                 while (remaining) {
191                         int batch = min(remaining, 64);
192                         if (args->write) {
193                                 if (copy_from_user(vals, values,
194                                                         batch*sizeof(u32)))
195                                         return -EFAULT;
196                                 err = nvhost_write_module_regs(ndev,
197                                                 offs, batch, vals);
198                                 if (err)
199                                         return err;
200                         } else {
201                                 err = nvhost_read_module_regs(ndev,
202                                                 offs, batch, vals);
203                                 if (err)
204                                         return err;
205                                 if (copy_to_user(values, vals,
206                                                         batch*sizeof(u32)))
207                                         return -EFAULT;
208                         }
209                         remaining -= batch;
210                         offs += batch*sizeof(u32);
211                         values += batch;
212                 }
213         }
214
215         return 0;
216 }
217
218 static int nvhost_ioctl_ctrl_get_version(struct nvhost_ctrl_userctx *ctx,
219         struct nvhost_get_param_args *args)
220 {
221         args->value = NVHOST_SUBMIT_VERSION_MAX_SUPPORTED;
222         return 0;
223 }
224
225 static int nvhost_ioctl_ctrl_syncpt_read_max(struct nvhost_ctrl_userctx *ctx,
226         struct nvhost_ctrl_syncpt_read_args *args)
227 {
228         if (args->id >= nvhost_syncpt_nb_pts(&ctx->dev->syncpt))
229                 return -EINVAL;
230         args->value = nvhost_syncpt_read_max(&ctx->dev->syncpt, args->id);
231         return 0;
232 }
233
234 static long nvhost_ctrlctl(struct file *filp,
235         unsigned int cmd, unsigned long arg)
236 {
237         struct nvhost_ctrl_userctx *priv = filp->private_data;
238         u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
239         int err = 0;
240
241         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
242                 (_IOC_NR(cmd) == 0) ||
243                 (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST) ||
244                 (_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE))
245                 return -EFAULT;
246
247         if (_IOC_DIR(cmd) & _IOC_WRITE) {
248                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
249                         return -EFAULT;
250         }
251
252         switch (cmd) {
253         case NVHOST_IOCTL_CTRL_SYNCPT_READ:
254                 err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
255                 break;
256         case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
257                 err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
258                 break;
259         case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
260                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
261                 break;
262         case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
263                 err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
264                 break;
265         case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
266                 err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
267                 break;
268         case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX:
269                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
270                 break;
271         case NVHOST_IOCTL_CTRL_GET_VERSION:
272                 err = nvhost_ioctl_ctrl_get_version(priv, (void *)buf);
273                 break;
274         case NVHOST_IOCTL_CTRL_SYNCPT_READ_MAX:
275                 err = nvhost_ioctl_ctrl_syncpt_read_max(priv, (void *)buf);
276                 break;
277         default:
278                 err = -ENOTTY;
279                 break;
280         }
281
282         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
283                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
284
285         return err;
286 }
287
288 static const struct file_operations nvhost_ctrlops = {
289         .owner = THIS_MODULE,
290         .release = nvhost_ctrlrelease,
291         .open = nvhost_ctrlopen,
292         .unlocked_ioctl = nvhost_ctrlctl
293 };
294
295 static void power_on_host(struct platform_device *dev)
296 {
297         struct nvhost_master *host = nvhost_get_private_data(dev);
298
299         nvhost_syncpt_reset(&host->syncpt);
300 }
301
302 static int power_off_host(struct platform_device *dev)
303 {
304         struct nvhost_master *host = nvhost_get_private_data(dev);
305
306         nvhost_syncpt_save(&host->syncpt);
307         return 0;
308 }
309
310 static void clock_on_host(struct platform_device *dev)
311 {
312         struct nvhost_device_data *pdata = platform_get_drvdata(dev);
313         struct nvhost_master *host = nvhost_get_private_data(dev);
314         nvhost_intr_start(&host->intr, clk_get_rate(pdata->clk[0]));
315 }
316
317 static int clock_off_host(struct platform_device *dev)
318 {
319         struct nvhost_master *host = nvhost_get_private_data(dev);
320         nvhost_intr_stop(&host->intr);
321         return 0;
322 }
323
324 static int __devinit nvhost_user_init(struct nvhost_master *host)
325 {
326         int err, devno;
327
328         host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
329         if (IS_ERR(host->nvhost_class)) {
330                 err = PTR_ERR(host->nvhost_class);
331                 dev_err(&host->dev->dev, "failed to create class\n");
332                 goto fail;
333         }
334
335         err = alloc_chrdev_region(&devno, 0, 1, IFACE_NAME);
336         if (err < 0) {
337                 dev_err(&host->dev->dev, "failed to reserve chrdev region\n");
338                 goto fail;
339         }
340
341         cdev_init(&host->cdev, &nvhost_ctrlops);
342         host->cdev.owner = THIS_MODULE;
343         err = cdev_add(&host->cdev, devno, 1);
344         if (err < 0)
345                 goto fail;
346         host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
347                         IFACE_NAME "-ctrl");
348         if (IS_ERR(host->ctrl)) {
349                 err = PTR_ERR(host->ctrl);
350                 dev_err(&host->dev->dev, "failed to create ctrl device\n");
351                 goto fail;
352         }
353
354         return 0;
355 fail:
356         return err;
357 }
358
359 struct nvhost_channel *nvhost_alloc_channel(struct platform_device *dev)
360 {
361         BUG_ON(!host_device_op().alloc_nvhost_channel);
362         return host_device_op().alloc_nvhost_channel(dev);
363 }
364
365 void nvhost_free_channel(struct nvhost_channel *ch)
366 {
367         BUG_ON(!host_device_op().free_nvhost_channel);
368         host_device_op().free_nvhost_channel(ch);
369 }
370
371 static void nvhost_free_resources(struct nvhost_master *host)
372 {
373         kfree(host->intr.syncpt);
374         host->intr.syncpt = 0;
375 }
376
377 static int __devinit nvhost_alloc_resources(struct nvhost_master *host)
378 {
379         int err;
380
381         err = nvhost_init_chip_support(host);
382         if (err)
383                 return err;
384
385         host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
386                                     nvhost_syncpt_nb_pts(&host->syncpt),
387                                     GFP_KERNEL);
388
389         if (!host->intr.syncpt) {
390                 /* frees happen in the support removal phase */
391                 return -ENOMEM;
392         }
393
394         return 0;
395 }
396
397 static int __devinit nvhost_probe(struct platform_device *dev)
398 {
399         struct nvhost_master *host;
400         struct resource *regs, *intr0, *intr1;
401         int i, err;
402         struct nvhost_device_data *pdata =
403                 (struct nvhost_device_data *)dev->dev.platform_data;
404
405         regs = platform_get_resource(dev, IORESOURCE_MEM, 0);
406         intr0 = platform_get_resource(dev, IORESOURCE_IRQ, 0);
407         intr1 = platform_get_resource(dev, IORESOURCE_IRQ, 1);
408
409         if (!regs || !intr0 || !intr1) {
410                 dev_err(&dev->dev, "missing required platform resources\n");
411                 return -ENXIO;
412         }
413
414         host = kzalloc(sizeof(*host), GFP_KERNEL);
415         if (!host)
416                 return -ENOMEM;
417
418         nvhost = host;
419
420         host->dev = dev;
421
422         /* Copy host1x parameters. The private_data gets replaced
423          * by nvhost_master later */
424         memcpy(&host->info, pdata->private_data,
425                         sizeof(struct host1x_device_info));
426
427         pdata->finalize_poweron = power_on_host;
428         pdata->prepare_poweroff = power_off_host;
429         pdata->prepare_clockoff = clock_off_host;
430         pdata->finalize_clockon = clock_on_host;
431
432         pdata->pdev = dev;
433
434         /* set common host1x device data */
435         platform_set_drvdata(dev, pdata);
436
437         /* set private host1x device data */
438         nvhost_set_private_data(dev, host);
439
440         host->reg_mem = request_mem_region(regs->start,
441                 resource_size(regs), dev->name);
442         if (!host->reg_mem) {
443                 dev_err(&dev->dev, "failed to get host register memory\n");
444                 err = -ENXIO;
445                 goto fail;
446         }
447
448         host->aperture = ioremap(regs->start, resource_size(regs));
449         if (!host->aperture) {
450                 dev_err(&dev->dev, "failed to remap host registers\n");
451                 err = -ENXIO;
452                 goto fail;
453         }
454
455         err = nvhost_alloc_resources(host);
456         if (err) {
457                 dev_err(&dev->dev, "failed to init chip support\n");
458                 goto fail;
459         }
460
461         host->memmgr = mem_op().alloc_mgr();
462         if (!host->memmgr) {
463                 dev_err(&dev->dev, "unable to create nvmap client\n");
464                 err = -EIO;
465                 goto fail;
466         }
467
468         err = nvhost_syncpt_init(dev, &host->syncpt);
469         if (err)
470                 goto fail;
471
472         err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
473         if (err)
474                 goto fail;
475
476         err = nvhost_user_init(host);
477         if (err)
478                 goto fail;
479
480         err = nvhost_module_init(dev);
481         if (err)
482                 goto fail;
483
484         for (i = 0; i < pdata->num_clks; i++)
485                 clk_prepare_enable(pdata->clk[i]);
486         nvhost_syncpt_reset(&host->syncpt);
487         for (i = 0; i < pdata->num_clks; i++)
488                 clk_disable_unprepare(pdata->clk[i]);
489
490         nvhost_device_list_init();
491         err = nvhost_device_list_add(dev);
492         if (err)
493                 goto fail;
494
495         nvhost_debug_init(host);
496
497         dev_info(&dev->dev, "initialized\n");
498         return 0;
499
500 fail:
501         nvhost_free_resources(host);
502         if (host->memmgr)
503                 mem_op().put_mgr(host->memmgr);
504         kfree(host);
505         return err;
506 }
507
508 static int __exit nvhost_remove(struct platform_device *dev)
509 {
510         struct nvhost_master *host = nvhost_get_private_data(dev);
511         nvhost_intr_deinit(&host->intr);
512         nvhost_syncpt_deinit(&host->syncpt);
513         nvhost_free_resources(host);
514         return 0;
515 }
516
517 static int nvhost_suspend(struct platform_device *dev, pm_message_t state)
518 {
519         struct nvhost_master *host = nvhost_get_private_data(dev);
520         int ret = 0;
521
522         ret = nvhost_module_suspend(host->dev);
523         dev_info(&dev->dev, "suspend status: %d\n", ret);
524
525         return ret;
526 }
527
528 static int nvhost_resume(struct platform_device *dev)
529 {
530         dev_info(&dev->dev, "resuming\n");
531         return 0;
532 }
533
534 static struct platform_driver platform_driver = {
535         .probe = nvhost_probe,
536         .remove = __exit_p(nvhost_remove),
537         .suspend = nvhost_suspend,
538         .resume = nvhost_resume,
539         .driver = {
540                 .owner = THIS_MODULE,
541                 .name = DRIVER_NAME
542         },
543 };
544
545 static int __init nvhost_mod_init(void)
546 {
547         return platform_driver_register(&platform_driver);
548 }
549
550 static void __exit nvhost_mod_exit(void)
551 {
552         platform_driver_unregister(&platform_driver);
553 }
554
555 /* host1x master device needs nvmap to be instantiated first.
556  * nvmap is instantiated via fs_initcall.
557  * Hence instantiate host1x master device using rootfs_initcall
558  * which is one level after fs_initcall. */
559 rootfs_initcall(nvhost_mod_init);
560 module_exit(nvhost_mod_exit);
561