video: tegra: refactor for multiple chip support
[linux-2.6.git] / drivers / video / tegra / host / dev.c
1 /*
2  * drivers/video/tegra/host/dev.c
3  *
4  * Tegra Graphics Host Driver Entrypoint
5  *
6  * Copyright (c) 2010-2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include "dev.h"
24
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/spinlock.h>
28 #include <linux/fs.h>
29 #include <linux/cdev.h>
30 #include <linux/platform_device.h>
31 #include <linux/uaccess.h>
32 #include <linux/file.h>
33 #include <linux/clk.h>
34 #include <linux/hrtimer.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/nvhost.h>
37
38 #include <asm/io.h>
39
40 #include <mach/nvhost.h>
41 #include <mach/nvmap.h>
42 #include <mach/gpufuse.h>
43
44 #define DRIVER_NAME "tegra_grhost"
45 #define IFACE_NAME "nvhost"
46
47 static int nvhost_major = NVHOST_MAJOR;
48 static int nvhost_minor;
49 static unsigned int register_sets;
50
51 struct nvhost_channel_userctx {
52         struct nvhost_channel *ch;
53         struct nvhost_hwctx *hwctx;
54         struct nvhost_submit_hdr_ext hdr;
55         struct nvmap_handle_ref *gather_mem;
56         u32 *gathers;
57         u32 *cur_gather;
58         int pinarray_size;
59         struct nvmap_pinarray_elem pinarray[NVHOST_MAX_HANDLES];
60         struct nvmap_handle *unpinarray[NVHOST_MAX_HANDLES];
61         struct nvmap_client *nvmap;
62         struct nvhost_waitchk waitchks[NVHOST_MAX_WAIT_CHECKS];
63         struct nvhost_waitchk *cur_waitchk;
64 };
65
66 struct nvhost_ctrl_userctx {
67         struct nvhost_master *dev;
68         u32 *mod_locks;
69 };
70
71 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
72 {
73         struct nvhost_channel_userctx *priv = filp->private_data;
74
75         trace_nvhost_channel_release(priv->ch->desc->name);
76
77         filp->private_data = NULL;
78
79         nvhost_putchannel(priv->ch, priv->hwctx);
80
81         if (priv->hwctx)
82                 priv->ch->ctxhandler.put(priv->hwctx);
83
84         if (priv->gathers)
85                 nvmap_munmap(priv->gather_mem, priv->gathers);
86
87         if (!IS_ERR_OR_NULL(priv->gather_mem))
88                 nvmap_free(priv->ch->dev->nvmap, priv->gather_mem);
89
90         nvmap_client_put(priv->nvmap);
91         kfree(priv);
92         return 0;
93 }
94
95 static int nvhost_channelopen(struct inode *inode, struct file *filp)
96 {
97         struct nvhost_channel_userctx *priv;
98         struct nvhost_channel *ch;
99
100
101         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
102         ch = nvhost_getchannel(ch);
103         if (!ch)
104                 return -ENOMEM;
105         trace_nvhost_channel_open(ch->desc->name);
106
107         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
108         if (!priv) {
109                 nvhost_putchannel(ch, NULL);
110                 return -ENOMEM;
111         }
112         filp->private_data = priv;
113         priv->ch = ch;
114         priv->gather_mem = nvmap_alloc(ch->dev->nvmap,
115                                 sizeof(u32) * 2 * NVHOST_MAX_GATHERS, 32,
116                                 NVMAP_HANDLE_CACHEABLE);
117         if (IS_ERR(priv->gather_mem))
118                 goto fail;
119
120         if (ch->ctxhandler.alloc) {
121                 priv->hwctx = ch->ctxhandler.alloc(ch);
122                 if (!priv->hwctx)
123                         goto fail;
124         }
125
126         priv->gathers = nvmap_mmap(priv->gather_mem);
127
128         return 0;
129 fail:
130         nvhost_channelrelease(inode, filp);
131         return -ENOMEM;
132 }
133
134 static void add_gather(struct nvhost_channel_userctx *ctx,
135                 u32 mem_id, u32 words, u32 offset)
136 {
137         struct nvmap_pinarray_elem *pin;
138         u32* cur_gather = ctx->cur_gather;
139         pin = &ctx->pinarray[ctx->pinarray_size++];
140         pin->patch_mem = (u32)nvmap_ref_to_handle(ctx->gather_mem);
141         pin->patch_offset = ((cur_gather + 1) - ctx->gathers) * sizeof(u32);
142         pin->pin_mem = mem_id;
143         pin->pin_offset = offset;
144         cur_gather[0] = words;
145         ctx->cur_gather = cur_gather + 2;
146 }
147
148 static int set_submit(struct nvhost_channel_userctx *ctx)
149 {
150         /* submit should have at least 1 cmdbuf */
151         if (!ctx->hdr.num_cmdbufs)
152                 return -EIO;
153
154         /* check submit doesn't exceed static structs */
155         if ((ctx->hdr.num_cmdbufs + ctx->hdr.num_relocs) > NVHOST_MAX_HANDLES) {
156                 dev_err(&ctx->ch->dev->pdev->dev,
157                         "channel submit exceeded max handles (%d > %d)\n",
158                         ctx->hdr.num_cmdbufs + ctx->hdr.num_relocs,
159                         NVHOST_MAX_HANDLES);
160                 return -EIO;
161         }
162         if (ctx->hdr.num_waitchks > NVHOST_MAX_WAIT_CHECKS) {
163                 dev_err(&ctx->ch->dev->pdev->dev,
164                         "channel submit exceeded max waitchks (%d > %d)\n",
165                         ctx->hdr.num_waitchks,
166                         NVHOST_MAX_WAIT_CHECKS);
167                 return -EIO;
168         }
169
170         ctx->cur_gather = ctx->gathers;
171         ctx->cur_waitchk = ctx->waitchks;
172         ctx->pinarray_size = 0;
173
174         return 0;
175 }
176
177 static void reset_submit(struct nvhost_channel_userctx *ctx)
178 {
179         ctx->hdr.num_cmdbufs = 0;
180         ctx->hdr.num_relocs = 0;
181         ctx->hdr.num_waitchks = 0;
182 }
183
184 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
185                                 size_t count, loff_t *offp)
186 {
187         struct nvhost_channel_userctx *priv = filp->private_data;
188         size_t remaining = count;
189         int err = 0;
190
191         while (remaining) {
192                 size_t consumed;
193                 if (!priv->hdr.num_relocs &&
194                     !priv->hdr.num_cmdbufs &&
195                     !priv->hdr.num_waitchks) {
196                         consumed = sizeof(struct nvhost_submit_hdr);
197                         if (remaining < consumed)
198                                 break;
199                         if (copy_from_user(&priv->hdr, buf, consumed)) {
200                                 err = -EFAULT;
201                                 break;
202                         }
203                         priv->hdr.submit_version = NVHOST_SUBMIT_VERSION_V0;
204                         err = set_submit(priv);
205                         if (err)
206                                 break;
207                         trace_nvhost_channel_write_submit(priv->ch->desc->name,
208                           count, priv->hdr.num_cmdbufs, priv->hdr.num_relocs);
209                 } else if (priv->hdr.num_cmdbufs) {
210                         struct nvhost_cmdbuf cmdbuf;
211                         consumed = sizeof(cmdbuf);
212                         if (remaining < consumed)
213                                 break;
214                         if (copy_from_user(&cmdbuf, buf, consumed)) {
215                                 err = -EFAULT;
216                                 break;
217                         }
218                         trace_nvhost_channel_write_cmdbuf(priv->ch->desc->name,
219                           cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
220                         add_gather(priv,
221                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
222                         priv->hdr.num_cmdbufs--;
223                 } else if (priv->hdr.num_relocs) {
224                         int numrelocs = remaining / sizeof(struct nvhost_reloc);
225                         if (!numrelocs)
226                                 break;
227                         numrelocs = min_t(int, numrelocs, priv->hdr.num_relocs);
228                         consumed = numrelocs * sizeof(struct nvhost_reloc);
229                         if (copy_from_user(&priv->pinarray[priv->pinarray_size],
230                                                 buf, consumed)) {
231                                 err = -EFAULT;
232                                 break;
233                         }
234                         trace_nvhost_channel_write_relocs(priv->ch->desc->name,
235                           numrelocs);
236                         priv->pinarray_size += numrelocs;
237                         priv->hdr.num_relocs -= numrelocs;
238                 } else if (priv->hdr.num_waitchks) {
239                         int numwaitchks =
240                                 (remaining / sizeof(struct nvhost_waitchk));
241                         if (!numwaitchks)
242                                 break;
243                         numwaitchks = min_t(int,
244                                 numwaitchks, priv->hdr.num_waitchks);
245                         consumed = numwaitchks * sizeof(struct nvhost_waitchk);
246                         if (copy_from_user(priv->cur_waitchk, buf, consumed)) {
247                                 err = -EFAULT;
248                                 break;
249                         }
250                         trace_nvhost_channel_write_waitchks(
251                           priv->ch->desc->name, numwaitchks,
252                           priv->hdr.waitchk_mask);
253                         priv->cur_waitchk += numwaitchks;
254                         priv->hdr.num_waitchks -= numwaitchks;
255                 } else {
256                         err = -EFAULT;
257                         break;
258                 }
259                 remaining -= consumed;
260                 buf += consumed;
261         }
262
263         if (err < 0) {
264                 dev_err(&priv->ch->dev->pdev->dev, "channel write error\n");
265                 reset_submit(priv);
266                 return err;
267         }
268
269         return (count - remaining);
270 }
271
272 static int nvhost_ioctl_channel_flush(
273         struct nvhost_channel_userctx *ctx,
274         struct nvhost_get_param_args *args,
275         int null_kickoff)
276 {
277         struct device *device = &ctx->ch->dev->pdev->dev;
278         int num_unpin;
279         int err;
280
281         trace_nvhost_ioctl_channel_flush(ctx->ch->desc->name);
282
283         if (ctx->hdr.num_relocs ||
284             ctx->hdr.num_cmdbufs ||
285             ctx->hdr.num_waitchks) {
286                 reset_submit(ctx);
287                 dev_err(device, "channel submit out of sync\n");
288                 return -EFAULT;
289         }
290         if (!ctx->nvmap) {
291                 dev_err(device, "no nvmap context set\n");
292                 return -EFAULT;
293         }
294         if (ctx->cur_gather == ctx->gathers)
295                 return 0;
296
297         /* pin mem handles and patch physical addresses */
298         num_unpin = nvmap_pin_array(ctx->nvmap,
299                                     nvmap_ref_to_handle(ctx->gather_mem),
300                                     ctx->pinarray, ctx->pinarray_size,
301                                     ctx->unpinarray);
302         if (num_unpin < 0) {
303                 dev_warn(device, "nvmap_pin_array failed: %d\n", num_unpin);
304                 return num_unpin;
305         }
306
307         if (nvhost_debug_null_kickoff_pid == current->tgid)
308                 null_kickoff = 1;
309
310         /* context switch if needed, and submit user's gathers to the channel */
311         BUG_ON(!channel_op(ctx->ch).submit);
312         err = channel_op(ctx->ch).submit(ctx->ch, ctx->hwctx, ctx->nvmap,
313                                 ctx->gathers, ctx->cur_gather,
314                                 ctx->waitchks, ctx->cur_waitchk,
315                                 ctx->hdr.waitchk_mask,
316                                 ctx->unpinarray, num_unpin,
317                                 ctx->hdr.syncpt_id, ctx->hdr.syncpt_incrs,
318                                 &args->value,
319                                 null_kickoff);
320         if (err)
321                 nvmap_unpin_handles(ctx->nvmap, ctx->unpinarray, num_unpin);
322
323         return 0;
324 }
325
326 static long nvhost_channelctl(struct file *filp,
327         unsigned int cmd, unsigned long arg)
328 {
329         struct nvhost_channel_userctx *priv = filp->private_data;
330         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
331         int err = 0;
332
333         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
334                 (_IOC_NR(cmd) == 0) ||
335                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
336                 return -EFAULT;
337
338         BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
339
340         if (_IOC_DIR(cmd) & _IOC_WRITE) {
341                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
342                         return -EFAULT;
343         }
344
345         switch (cmd) {
346         case NVHOST_IOCTL_CHANNEL_FLUSH:
347                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
348                 break;
349         case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
350                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
351                 break;
352         case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
353         {
354                 struct nvhost_submit_hdr_ext *hdr;
355
356                 if (priv->hdr.num_relocs ||
357                     priv->hdr.num_cmdbufs ||
358                     priv->hdr.num_waitchks) {
359                         reset_submit(priv);
360                         dev_err(&priv->ch->dev->pdev->dev,
361                                 "channel submit out of sync\n");
362                         err = -EIO;
363                         break;
364                 }
365
366                 hdr = (struct nvhost_submit_hdr_ext *)buf;
367                 if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
368                         dev_err(&priv->ch->dev->pdev->dev,
369                                 "submit version %d > max supported %d\n",
370                                 hdr->submit_version,
371                                 NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
372                         err = -EINVAL;
373                         break;
374                 }
375                 memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
376                 err = set_submit(priv);
377                 trace_nvhost_ioctl_channel_submit(priv->ch->desc->name,
378                         priv->hdr.submit_version,
379                         priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
380                         priv->hdr.num_waitchks);
381                 break;
382         }
383         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
384                 /* host syncpt ID is used by the RM (and never be given out) */
385                 BUG_ON(priv->ch->desc->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
386                 ((struct nvhost_get_param_args *)buf)->value =
387                         priv->ch->desc->syncpts;
388                 break;
389         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
390                 ((struct nvhost_get_param_args *)buf)->value =
391                         priv->ch->desc->waitbases;
392                 break;
393         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
394                 ((struct nvhost_get_param_args *)buf)->value =
395                         priv->ch->desc->modulemutexes;
396                 break;
397         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
398         {
399                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
400                 struct nvmap_client *new_client = nvmap_client_get_file(fd);
401
402                 if (IS_ERR(new_client)) {
403                         err = PTR_ERR(new_client);
404                         break;
405                 }
406
407                 if (priv->nvmap)
408                         nvmap_client_put(priv->nvmap);
409
410                 priv->nvmap = new_client;
411                 break;
412         }
413         default:
414                 err = -ENOTTY;
415                 break;
416         }
417
418         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
419                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
420
421         return err;
422 }
423
424 static struct file_operations nvhost_channelops = {
425         .owner = THIS_MODULE,
426         .release = nvhost_channelrelease,
427         .open = nvhost_channelopen,
428         .write = nvhost_channelwrite,
429         .unlocked_ioctl = nvhost_channelctl
430 };
431
432 static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
433 {
434         struct nvhost_ctrl_userctx *priv = filp->private_data;
435         int i;
436
437         trace_nvhost_ctrlrelease(priv->dev->mod.name);
438
439         filp->private_data = NULL;
440         if (priv->mod_locks[0])
441                 nvhost_module_idle(&priv->dev->mod);
442         for (i = 1; i < priv->dev->nb_mlocks; i++)
443                 if (priv->mod_locks[i])
444                         nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
445         kfree(priv->mod_locks);
446         kfree(priv);
447         return 0;
448 }
449
450 static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
451 {
452         struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
453         struct nvhost_ctrl_userctx *priv;
454         u32 *mod_locks;
455
456         trace_nvhost_ctrlopen(host->mod.name);
457
458         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
459         mod_locks = kzalloc(sizeof(u32)*host->nb_mlocks, GFP_KERNEL);
460
461         if (!(priv && mod_locks)) {
462                 kfree(priv);
463                 kfree(mod_locks);
464                 return -ENOMEM;
465         }
466
467         priv->dev = host;
468         priv->mod_locks = mod_locks;
469         filp->private_data = priv;
470         return 0;
471 }
472
473 static int nvhost_ioctl_ctrl_syncpt_read(
474         struct nvhost_ctrl_userctx *ctx,
475         struct nvhost_ctrl_syncpt_read_args *args)
476 {
477         if (args->id >= ctx->dev->syncpt.nb_pts)
478                 return -EINVAL;
479         trace_nvhost_ioctl_ctrl_syncpt_read(args->id);
480         args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
481         return 0;
482 }
483
484 static int nvhost_ioctl_ctrl_syncpt_incr(
485         struct nvhost_ctrl_userctx *ctx,
486         struct nvhost_ctrl_syncpt_incr_args *args)
487 {
488         if (args->id >= ctx->dev->syncpt.nb_pts)
489                 return -EINVAL;
490         trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
491         nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
492         return 0;
493 }
494
495 static int nvhost_ioctl_ctrl_syncpt_waitex(
496         struct nvhost_ctrl_userctx *ctx,
497         struct nvhost_ctrl_syncpt_waitex_args *args)
498 {
499         u32 timeout;
500         if (args->id >= ctx->dev->syncpt.nb_pts)
501                 return -EINVAL;
502         if (args->timeout == NVHOST_NO_TIMEOUT)
503                 timeout = MAX_SCHEDULE_TIMEOUT;
504         else
505                 timeout = (u32)msecs_to_jiffies(args->timeout);
506
507         trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
508           args->timeout);
509         return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
510                                         args->thresh, timeout, &args->value);
511 }
512
513 static int nvhost_ioctl_ctrl_module_mutex(
514         struct nvhost_ctrl_userctx *ctx,
515         struct nvhost_ctrl_module_mutex_args *args)
516 {
517         int err = 0;
518         if (args->id >= ctx->dev->nb_mlocks ||
519             args->lock > 1)
520                 return -EINVAL;
521
522         trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
523         if (args->lock && !ctx->mod_locks[args->id]) {
524                 if (args->id == 0)
525                         nvhost_module_busy(&ctx->dev->mod);
526                 else
527                         err = nvhost_mutex_try_lock(&ctx->dev->cpuaccess, args->id);
528                 if (!err)
529                         ctx->mod_locks[args->id] = 1;
530         }
531         else if (!args->lock && ctx->mod_locks[args->id]) {
532                 if (args->id == 0)
533                         nvhost_module_idle(&ctx->dev->mod);
534                 else
535                         nvhost_mutex_unlock(&ctx->dev->cpuaccess, args->id);
536                 ctx->mod_locks[args->id] = 0;
537         }
538         return err;
539 }
540
541 static int nvhost_ioctl_ctrl_module_regrdwr(
542         struct nvhost_ctrl_userctx *ctx,
543         struct nvhost_ctrl_module_regrdwr_args *args)
544 {
545         u32 num_offsets = args->num_offsets;
546         u32 *offsets = args->offsets;
547         void *values = args->values;
548         u32 vals[64];
549
550         if (!(args->id < ctx->dev->nb_modules) ||
551             (num_offsets == 0))
552                 return -EINVAL;
553
554         while (num_offsets--) {
555                 u32 remaining = args->block_size;
556                 u32 offs;
557                 if (get_user(offs, offsets))
558                         return -EFAULT;
559                 offsets++;
560                 while (remaining) {
561                         u32 batch = min(remaining, 64*sizeof(u32));
562                         if (args->write) {
563                                 if (copy_from_user(vals, values, batch))
564                                         return -EFAULT;
565                                 nvhost_write_module_regs(&ctx->dev->cpuaccess,
566                                                         args->id, offs, batch, vals);
567                         } else {
568                                 nvhost_read_module_regs(&ctx->dev->cpuaccess,
569                                                         args->id, offs, batch, vals);
570                                 if (copy_to_user(values, vals, batch))
571                                         return -EFAULT;
572                         }
573                         remaining -= batch;
574                         offs += batch;
575                         values += batch;
576                 }
577         }
578
579         return 0;
580 }
581
582 static long nvhost_ctrlctl(struct file *filp,
583         unsigned int cmd, unsigned long arg)
584 {
585         struct nvhost_ctrl_userctx *priv = filp->private_data;
586         u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
587         int err = 0;
588
589         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
590                 (_IOC_NR(cmd) == 0) ||
591                 (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
592                 return -EFAULT;
593
594         BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
595
596         if (_IOC_DIR(cmd) & _IOC_WRITE) {
597                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
598                         return -EFAULT;
599         }
600
601         switch (cmd) {
602         case NVHOST_IOCTL_CTRL_SYNCPT_READ:
603                 err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
604                 break;
605         case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
606                 err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
607                 break;
608         case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
609                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
610                 break;
611         case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
612                 err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
613                 break;
614         case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
615                 err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
616                 break;
617         case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX:
618                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
619                 break;
620         default:
621                 err = -ENOTTY;
622                 break;
623         }
624
625         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
626                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
627
628         return err;
629 }
630
631 static struct file_operations nvhost_ctrlops = {
632         .owner = THIS_MODULE,
633         .release = nvhost_ctrlrelease,
634         .open = nvhost_ctrlopen,
635         .unlocked_ioctl = nvhost_ctrlctl
636 };
637
638 static void power_host(struct nvhost_module *mod, enum nvhost_power_action action)
639 {
640         struct nvhost_master *dev = container_of(mod, struct nvhost_master, mod);
641
642         if (action == NVHOST_POWER_ACTION_ON) {
643                 nvhost_intr_start(&dev->intr, clk_get_rate(mod->clk[0]));
644                 /* don't do it, as display may have changed syncpt
645                  * after the last save
646                  * nvhost_syncpt_reset(&dev->syncpt);
647                  */
648         } else if (action == NVHOST_POWER_ACTION_OFF) {
649                 int i;
650                 for (i = 0; i < dev->nb_channels; i++)
651                         nvhost_channel_suspend(&dev->channels[i]);
652                 nvhost_syncpt_save(&dev->syncpt);
653                 nvhost_intr_stop(&dev->intr);
654         }
655 }
656
657 static int __devinit nvhost_user_init(struct nvhost_master *host)
658 {
659         int i, err, devno;
660
661         host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
662         if (IS_ERR(host->nvhost_class)) {
663                 err = PTR_ERR(host->nvhost_class);
664                 dev_err(&host->pdev->dev, "failed to create class\n");
665                 goto fail;
666         }
667
668         if (nvhost_major) {
669                 devno = MKDEV(nvhost_major, nvhost_minor);
670                 err = register_chrdev_region(devno, host->nb_channels + 1,
671                                              IFACE_NAME);
672         } else {
673                 err = alloc_chrdev_region(&devno, nvhost_minor,
674                                         host->nb_channels + 1, IFACE_NAME);
675                 nvhost_major = MAJOR(devno);
676         }
677         if (err < 0) {
678                 dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
679                 goto fail;
680         }
681
682         for (i = 0; i < host->nb_channels; i++) {
683                 struct nvhost_channel *ch = &host->channels[i];
684
685                 cdev_init(&ch->cdev, &nvhost_channelops);
686                 ch->cdev.owner = THIS_MODULE;
687
688                 devno = MKDEV(nvhost_major, nvhost_minor + i);
689                 err = cdev_add(&ch->cdev, devno, 1);
690                 if (err < 0) {
691                         dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
692                         goto fail;
693                 }
694                 ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
695                                 IFACE_NAME "-%s", ch->desc->name);
696                 if (IS_ERR(ch->node)) {
697                         err = PTR_ERR(ch->node);
698                         dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
699                         goto fail;
700                 }
701         }
702
703         cdev_init(&host->cdev, &nvhost_ctrlops);
704         host->cdev.owner = THIS_MODULE;
705         devno = MKDEV(nvhost_major, nvhost_minor + host->nb_channels);
706         err = cdev_add(&host->cdev, devno, 1);
707         if (err < 0)
708                 goto fail;
709         host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
710                         IFACE_NAME "-ctrl");
711         if (IS_ERR(host->ctrl)) {
712                 err = PTR_ERR(host->ctrl);
713                 dev_err(&host->pdev->dev, "failed to create ctrl device\n");
714                 goto fail;
715         }
716
717         return 0;
718 fail:
719         return err;
720 }
721
722 static void nvhost_remove_chip_support(struct nvhost_master *host)
723 {
724
725         kfree(host->channels);
726         host->channels = 0;
727
728         kfree(host->syncpt.min_val);
729         host->syncpt.min_val = 0;
730
731         kfree(host->syncpt.max_val);
732         host->syncpt.max_val = 0;
733
734         kfree(host->syncpt.base_val);
735         host->syncpt.base_val = 0;
736
737         kfree(host->intr.syncpt);
738         host->intr.syncpt = 0;
739
740         kfree(host->cpuaccess.regs);
741         host->cpuaccess.regs = 0;
742
743         kfree(host->cpuaccess.reg_mem);
744         host->cpuaccess.reg_mem = 0;
745
746         kfree(host->cpuaccess.lock_counts);
747         host->cpuaccess.lock_counts = 0;
748 }
749
750 static int __devinit nvhost_init_chip_support(struct nvhost_master *host)
751 {
752         int err;
753         err = tegra_get_chip_info(&host->chip_info);
754         if (err)
755                 return err;
756
757         switch (host->chip_info.arch) {
758         case TEGRA_SOC_CHIP_ARCH_T20:
759                 err = nvhost_init_t20_support(host);
760                 break;
761
762         case TEGRA_SOC_CHIP_ARCH_T30:
763                 err = nvhost_init_t30_support(host);
764                 break;
765         default:
766                 return -ENODEV;
767         }
768
769         if (err)
770                 return err;
771
772         /* allocate items sized in chip specific support init */
773         host->channels = kzalloc(sizeof(struct nvhost_channel) *
774                                  host->nb_channels, GFP_KERNEL);
775
776         host->syncpt.min_val = kzalloc(sizeof(atomic_t) *
777                                        host->syncpt.nb_pts, GFP_KERNEL);
778
779         host->syncpt.max_val = kzalloc(sizeof(atomic_t) *
780                                        host->syncpt.nb_pts, GFP_KERNEL);
781
782         host->syncpt.base_val = kzalloc(sizeof(u32) *
783                                         host->syncpt.nb_bases, GFP_KERNEL);
784
785         host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
786                                     host->syncpt.nb_pts, GFP_KERNEL);
787
788         host->cpuaccess.reg_mem = kzalloc(sizeof(struct resource *) *
789                                        host->nb_modules, GFP_KERNEL);
790
791         host->cpuaccess.regs = kzalloc(sizeof(void __iomem *) *
792                                        host->nb_modules, GFP_KERNEL);
793
794         host->cpuaccess.lock_counts = kzalloc(sizeof(atomic_t) *
795                                        host->nb_mlocks, GFP_KERNEL);
796
797         if (!(host->channels && host->syncpt.min_val &&
798               host->syncpt.max_val && host->syncpt.base_val &&
799               host->intr.syncpt && host->cpuaccess.reg_mem &&
800               host->cpuaccess.regs && host->cpuaccess.lock_counts)) {
801                 /* frees happen in the support removal phase */
802                 return -ENOMEM;
803         }
804
805         return 0;
806 }
807 static int __devinit nvhost_probe(struct platform_device *pdev)
808 {
809         struct nvhost_master *host;
810         struct resource *regs, *intr0, *intr1;
811         int i, err;
812
813         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
814         intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
815         intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
816
817         if (!regs || !intr0 || !intr1) {
818                 dev_err(&pdev->dev, "missing required platform resources\n");
819                 return -ENXIO;
820         }
821
822         host = kzalloc(sizeof(*host), GFP_KERNEL);
823         if (!host)
824                 return -ENOMEM;
825
826         host->pdev = pdev;
827
828         host->nvmap = nvmap_create_client(nvmap_dev, "nvhost");
829         if (!host->nvmap) {
830                 dev_err(&pdev->dev, "unable to create nvmap client\n");
831                 err = -EIO;
832                 goto fail;
833         }
834
835         host->reg_mem = request_mem_region(regs->start,
836                                         resource_size(regs), pdev->name);
837         if (!host->reg_mem) {
838                 dev_err(&pdev->dev, "failed to get host register memory\n");
839                 err = -ENXIO;
840                 goto fail;
841         }
842         host->aperture = ioremap(regs->start, resource_size(regs));
843         if (!host->aperture) {
844                 dev_err(&pdev->dev, "failed to remap host registers\n");
845                 err = -ENXIO;
846                 goto fail;
847         }
848
849         err = nvhost_init_chip_support(host);
850         if (err) {
851                 dev_err(&pdev->dev, "failed to init chip support\n");
852                 goto fail;
853         }
854
855         for (i = 0; i < host->nb_channels; i++) {
856                 struct nvhost_channel *ch = &host->channels[i];
857                 BUG_ON(!host_channel_op(host).init);
858                 err = host_channel_op(host).init(ch, host, i);
859                 if (err < 0) {
860                         dev_err(&pdev->dev, "failed to init channel %d\n", i);
861                         goto fail;
862                 }
863         }
864
865
866         err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
867         if (err)
868                 goto fail;
869
870         err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
871         if (err)
872                 goto fail;
873
874         err = nvhost_user_init(host);
875         if (err)
876                 goto fail;
877
878         err = nvhost_module_init(&host->mod, "host1x", power_host, NULL, &pdev->dev);
879         if (err)
880                 goto fail;
881
882
883         platform_set_drvdata(pdev, host);
884
885         clk_enable(host->mod.clk[0]);
886         nvhost_syncpt_reset(&host->syncpt);
887         clk_disable(host->mod.clk[0]);
888
889         nvhost_bus_register(host);
890
891         nvhost_debug_init(host);
892
893         dev_info(&pdev->dev, "initialized\n");
894         return 0;
895
896 fail:
897         nvhost_remove_chip_support(host);
898         if (host->nvmap)
899                 nvmap_client_put(host->nvmap);
900         /* TODO: [ahatala 2010-05-04] */
901         kfree(host);
902         return err;
903 }
904
905 static int __exit nvhost_remove(struct platform_device *pdev)
906 {
907         struct nvhost_master *host = platform_get_drvdata(pdev);
908         nvhost_remove_chip_support(host);
909         /*kfree(host);?*/
910         return 0;
911 }
912
913 static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
914 {
915         struct nvhost_master *host = platform_get_drvdata(pdev);
916         dev_info(&pdev->dev, "suspending\n");
917         nvhost_module_suspend(&host->mod, true);
918         clk_enable(host->mod.clk[0]);
919         nvhost_syncpt_save(&host->syncpt);
920         clk_disable(host->mod.clk[0]);
921         dev_info(&pdev->dev, "suspended\n");
922         return 0;
923 }
924
925 static int nvhost_resume(struct platform_device *pdev)
926 {
927         struct nvhost_master *host = platform_get_drvdata(pdev);
928         dev_info(&pdev->dev, "resuming\n");
929         clk_enable(host->mod.clk[0]);
930         nvhost_syncpt_reset(&host->syncpt);
931         clk_disable(host->mod.clk[0]);
932         dev_info(&pdev->dev, "resumed\n");
933         return 0;
934 }
935
936 static struct platform_driver nvhost_driver = {
937         .remove = __exit_p(nvhost_remove),
938         .suspend = nvhost_suspend,
939         .resume = nvhost_resume,
940         .driver = {
941                 .owner = THIS_MODULE,
942                 .name = DRIVER_NAME
943         }
944 };
945
946 static int __init nvhost_mod_init(void)
947 {
948         register_sets = tegra_gpu_register_sets();
949         return platform_driver_probe(&nvhost_driver, nvhost_probe);
950 }
951
952 static void __exit nvhost_mod_exit(void)
953 {
954         platform_driver_unregister(&nvhost_driver);
955 }
956
957 module_init(nvhost_mod_init);
958 module_exit(nvhost_mod_exit);
959
960 module_param_call(register_sets, NULL, param_get_uint, &register_sets, 0444);
961 MODULE_PARM_DESC(register_sets, "Number of register sets");
962
963 MODULE_AUTHOR("NVIDIA");
964 MODULE_DESCRIPTION("Graphics host driver for Tegra products");
965 MODULE_VERSION("1.0");
966 MODULE_LICENSE("Dual BSD/GPL");
967 MODULE_ALIAS("platform-nvhost");