6ea8cbabff13fe41c54696be1cd2a2fecbd7cde6
[linux-2.6.git] / drivers / video / tegra / host / dev.c
1 /*
2  * drivers/video/tegra/host/dev.c
3  *
4  * Tegra Graphics Host Driver Entrypoint
5  *
6  * Copyright (c) 2010-2011, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include "dev.h"
24
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/spinlock.h>
28 #include <linux/fs.h>
29 #include <linux/cdev.h>
30 #include <linux/platform_device.h>
31 #include <linux/uaccess.h>
32 #include <linux/file.h>
33 #include <linux/clk.h>
34 #include <linux/hrtimer.h>
35 #define CREATE_TRACE_POINTS
36 #include <trace/events/nvhost.h>
37
38 #include <linux/io.h>
39
40 #include <linux/nvhost.h>
41 #include <linux/nvhost_ioctl.h>
42 #include <mach/nvmap.h>
43 #include <mach/gpufuse.h>
44 #include <mach/hardware.h>
45
46 #include "debug.h"
47 #include "nvhost_job.h"
48
49 #define DRIVER_NAME "tegra_grhost"
50 #define IFACE_NAME "nvhost"
51 #define TRACE_MAX_LENGTH 128U
52
53 static int nvhost_major = NVHOST_MAJOR;
54 static int nvhost_minor;
55 static unsigned int register_sets;
56
57 struct nvhost_channel_userctx {
58         struct nvhost_channel *ch;
59         struct nvhost_hwctx *hwctx;
60         struct nvhost_submit_hdr_ext hdr;
61         int num_relocshifts;
62         struct nvhost_job *job;
63         struct nvmap_client *nvmap;
64         u32 timeout;
65         u32 priority;
66         int clientid;
67 };
68
69 struct nvhost_ctrl_userctx {
70         struct nvhost_master *dev;
71         u32 *mod_locks;
72 };
73
74 /*
75  * Write cmdbuf to ftrace output. Checks if cmdbuf contents should be output
76  * and mmaps the cmdbuf contents if required.
77  */
78 static void trace_write_cmdbufs(struct nvhost_job *job)
79 {
80 #if defined(CONFIG_TEGRA_NVMAP)
81         struct nvmap_handle_ref handle;
82         void *mem = NULL;
83         int i = 0;
84
85         for (i = 0; i < job->num_gathers; i++) {
86                 struct nvhost_channel_gather *gather = &job->gathers[i];
87                 if (nvhost_debug_trace_cmdbuf) {
88                         handle.handle = nvmap_id_to_handle(gather->mem_id);
89                         mem = nvmap_mmap(&handle);
90                         if (IS_ERR_OR_NULL(mem))
91                                 mem = NULL;
92                 };
93
94                 if (mem) {
95                         u32 i;
96                         /*
97                          * Write in batches of 128 as there seems to be a limit
98                          * of how much you can output to ftrace at once.
99                          */
100                         for (i = 0; i < gather->words; i += TRACE_MAX_LENGTH) {
101                                 trace_nvhost_channel_write_cmdbuf_data(
102                                         job->ch->dev->name,
103                                         gather->mem_id,
104                                         min(gather->words - i,
105                                             TRACE_MAX_LENGTH),
106                                         gather->offset + i * sizeof(u32),
107                                         mem);
108                         }
109                         nvmap_munmap(&handle, mem);
110                 }
111         }
112 #endif
113 }
114
115 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
116 {
117         struct nvhost_channel_userctx *priv = filp->private_data;
118
119         trace_nvhost_channel_release(priv->ch->dev->name);
120
121         filp->private_data = NULL;
122
123         nvhost_module_remove_client(priv->ch->dev, priv);
124         nvhost_putchannel(priv->ch, priv->hwctx);
125
126         if (priv->hwctx)
127                 priv->ch->ctxhandler.put(priv->hwctx);
128
129         if (priv->job)
130                 nvhost_job_put(priv->job);
131
132         nvmap_client_put(priv->nvmap);
133         kfree(priv);
134         return 0;
135 }
136
137 static int nvhost_channelopen(struct inode *inode, struct file *filp)
138 {
139         struct nvhost_channel_userctx *priv;
140         struct nvhost_channel *ch;
141
142         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
143         ch = nvhost_getchannel(ch);
144         if (!ch)
145                 return -ENOMEM;
146         trace_nvhost_channel_open(ch->dev->name);
147
148         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
149         if (!priv) {
150                 nvhost_putchannel(ch, NULL);
151                 return -ENOMEM;
152         }
153         filp->private_data = priv;
154         priv->ch = ch;
155         nvhost_module_add_client(ch->dev, priv);
156
157         if (ch->ctxhandler.alloc) {
158                 priv->hwctx = ch->ctxhandler.alloc(ch);
159                 if (!priv->hwctx)
160                         goto fail;
161         }
162         priv->priority = NVHOST_PRIORITY_MEDIUM;
163         priv->clientid = atomic_add_return(1, &ch->dev->host->clientid);
164
165         priv->job = nvhost_job_alloc(ch, priv->hwctx, &priv->hdr,
166                         NULL, priv->priority, priv->clientid);
167         if (!priv->job)
168                 goto fail;
169
170         return 0;
171 fail:
172         nvhost_channelrelease(inode, filp);
173         return -ENOMEM;
174 }
175
176 static int set_submit(struct nvhost_channel_userctx *ctx)
177 {
178         struct device *device = &ctx->ch->dev->dev;
179
180         /* submit should have at least 1 cmdbuf */
181         if (!ctx->hdr.num_cmdbufs)
182                 return -EIO;
183
184         if (!ctx->nvmap) {
185                 dev_err(device, "no nvmap context set\n");
186                 return -EFAULT;
187         }
188
189         ctx->job = nvhost_job_realloc(ctx->job,
190                         &ctx->hdr,
191                         ctx->nvmap,
192                         ctx->priority,
193                         ctx->clientid);
194         if (!ctx->job)
195                 return -ENOMEM;
196         ctx->job->timeout = ctx->timeout;
197
198         if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
199                 ctx->num_relocshifts = ctx->hdr.num_relocs;
200
201         return 0;
202 }
203
204 static void reset_submit(struct nvhost_channel_userctx *ctx)
205 {
206         ctx->hdr.num_cmdbufs = 0;
207         ctx->hdr.num_relocs = 0;
208         ctx->num_relocshifts = 0;
209         ctx->hdr.num_waitchks = 0;
210 }
211
212 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
213                                 size_t count, loff_t *offp)
214 {
215         struct nvhost_channel_userctx *priv = filp->private_data;
216         size_t remaining = count;
217         int err = 0;
218         struct nvhost_job *job = priv->job;
219         struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
220         const char *chname = priv->ch->dev->name;
221
222         while (remaining) {
223                 size_t consumed;
224                 if (!hdr->num_relocs &&
225                     !priv->num_relocshifts &&
226                     !hdr->num_cmdbufs &&
227                     !hdr->num_waitchks) {
228                         consumed = sizeof(struct nvhost_submit_hdr);
229                         if (remaining < consumed)
230                                 break;
231                         if (copy_from_user(hdr, buf, consumed)) {
232                                 err = -EFAULT;
233                                 break;
234                         }
235                         hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
236                         err = set_submit(priv);
237                         if (err)
238                                 break;
239                         trace_nvhost_channel_write_submit(chname,
240                           count, hdr->num_cmdbufs, hdr->num_relocs,
241                           hdr->syncpt_id, hdr->syncpt_incrs);
242                 } else if (hdr->num_cmdbufs) {
243                         struct nvhost_cmdbuf cmdbuf;
244                         consumed = sizeof(cmdbuf);
245                         if (remaining < consumed)
246                                 break;
247                         if (copy_from_user(&cmdbuf, buf, consumed)) {
248                                 err = -EFAULT;
249                                 break;
250                         }
251                         trace_nvhost_channel_write_cmdbuf(chname,
252                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
253                         nvhost_job_add_gather(job,
254                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
255                         hdr->num_cmdbufs--;
256                 } else if (hdr->num_relocs) {
257                         struct nvmap_pinarray_elem *elem =
258                                                 &job->pinarray[job->num_pins];
259                         consumed = sizeof(struct nvhost_reloc);
260                         if (remaining < consumed)
261                                 break;
262                         if (copy_from_user(elem, buf, consumed)) {
263                                 err = -EFAULT;
264                                 break;
265                         }
266                         elem->patch_mem =
267                                 nvmap_convert_handle_u2k(elem->patch_mem);
268                         elem->pin_mem =
269                                 nvmap_convert_handle_u2k(elem->pin_mem);
270                         trace_nvhost_channel_write_reloc(chname);
271                         job->num_pins++;
272                         hdr->num_relocs--;
273                 } else if (hdr->num_waitchks) {
274                         struct nvhost_waitchk *waitchk =
275                                         &job->waitchk[job->num_waitchk];
276                         consumed = sizeof(struct nvhost_waitchk);
277                         if (remaining < consumed)
278                                 break;
279                         if (copy_from_user(waitchk, buf, consumed)) {
280                                 err = -EFAULT;
281                                 break;
282                         }
283                         waitchk->mem = nvmap_convert_handle_u2k(waitchk->mem);
284                         trace_nvhost_channel_write_waitchks(
285                           chname, 1,
286                           hdr->waitchk_mask);
287                         job->num_waitchk++;
288                         hdr->num_waitchks--;
289                 } else if (priv->num_relocshifts) {
290                         int next_shift =
291                                 job->num_pins - priv->num_relocshifts;
292                         consumed = sizeof(struct nvhost_reloc_shift);
293                         if (remaining < consumed)
294                                 break;
295                         if (copy_from_user(
296                                         &job->pinarray[next_shift].reloc_shift,
297                                         buf, consumed)) {
298                                 err = -EFAULT;
299                                 break;
300                         }
301                         priv->num_relocshifts--;
302                 } else {
303                         err = -EFAULT;
304                         break;
305                 }
306                 remaining -= consumed;
307                 buf += consumed;
308         }
309
310         if (err < 0) {
311                 dev_err(&priv->ch->dev->dev, "channel write error\n");
312                 reset_submit(priv);
313                 return err;
314         }
315
316         return count - remaining;
317 }
318
319 static int nvhost_ioctl_channel_flush(
320         struct nvhost_channel_userctx *ctx,
321         struct nvhost_get_param_args *args,
322         int null_kickoff)
323 {
324         struct device *device = &ctx->ch->dev->dev;
325         int err;
326
327         trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name);
328
329         if (!ctx->job ||
330             ctx->hdr.num_relocs ||
331             ctx->hdr.num_cmdbufs ||
332             ctx->hdr.num_waitchks) {
333                 reset_submit(ctx);
334                 dev_err(device, "channel submit out of sync\n");
335                 return -EFAULT;
336         }
337
338         err = nvhost_job_pin(ctx->job);
339         if (err) {
340                 dev_warn(device, "nvhost_job_pin failed: %d\n", err);
341                 return err;
342         }
343
344         if (nvhost_debug_null_kickoff_pid == current->tgid)
345                 null_kickoff = 1;
346         ctx->job->null_kickoff = null_kickoff;
347
348         if ((nvhost_debug_force_timeout_pid == current->tgid) &&
349             (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
350                 ctx->timeout = nvhost_debug_force_timeout_val;
351         }
352
353         trace_write_cmdbufs(ctx->job);
354
355         /* context switch if needed, and submit user's gathers to the channel */
356         err = nvhost_channel_submit(ctx->job);
357         args->value = ctx->job->syncpt_end;
358         if (err)
359                 nvhost_job_unpin(ctx->job);
360
361         return err;
362 }
363
364 static int nvhost_ioctl_channel_read_3d_reg(
365         struct nvhost_channel_userctx *ctx,
366         struct nvhost_read_3d_reg_args *args)
367 {
368         BUG_ON(!channel_op(ctx->ch).read3dreg);
369         return channel_op(ctx->ch).read3dreg(ctx->ch, ctx->hwctx,
370                         args->offset, &args->value);
371 }
372
373 static long nvhost_channelctl(struct file *filp,
374         unsigned int cmd, unsigned long arg)
375 {
376         struct nvhost_channel_userctx *priv = filp->private_data;
377         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
378         int err = 0;
379
380         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
381                 (_IOC_NR(cmd) == 0) ||
382                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
383                 return -EFAULT;
384
385         BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
386
387         if (_IOC_DIR(cmd) & _IOC_WRITE) {
388                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
389                         return -EFAULT;
390         }
391
392         switch (cmd) {
393         case NVHOST_IOCTL_CHANNEL_FLUSH:
394                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
395                 break;
396         case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
397                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
398                 break;
399         case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
400         {
401                 struct nvhost_submit_hdr_ext *hdr;
402
403                 if (priv->hdr.num_relocs ||
404                     priv->num_relocshifts ||
405                     priv->hdr.num_cmdbufs ||
406                     priv->hdr.num_waitchks) {
407                         reset_submit(priv);
408                         dev_err(&priv->ch->dev->dev,
409                                 "channel submit out of sync\n");
410                         err = -EIO;
411                         break;
412                 }
413
414                 hdr = (struct nvhost_submit_hdr_ext *)buf;
415                 if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
416                         dev_err(&priv->ch->dev->dev,
417                                 "submit version %d > max supported %d\n",
418                                 hdr->submit_version,
419                                 NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
420                         err = -EINVAL;
421                         break;
422                 }
423                 memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
424                 err = set_submit(priv);
425                 trace_nvhost_ioctl_channel_submit(priv->ch->dev->name,
426                         priv->hdr.submit_version,
427                         priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
428                         priv->hdr.num_waitchks,
429                         priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
430                 break;
431         }
432         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
433                 /* host syncpt ID is used by the RM (and never be given out) */
434                 BUG_ON(priv->ch->dev->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
435                 ((struct nvhost_get_param_args *)buf)->value =
436                         priv->ch->dev->syncpts;
437                 break;
438         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
439                 ((struct nvhost_get_param_args *)buf)->value =
440                         priv->ch->dev->waitbases;
441                 break;
442         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
443                 ((struct nvhost_get_param_args *)buf)->value =
444                         priv->ch->dev->modulemutexes;
445                 break;
446         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
447         {
448                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
449                 struct nvmap_client *new_client = nvmap_client_get_file(fd);
450
451                 if (IS_ERR(new_client)) {
452                         err = PTR_ERR(new_client);
453                         break;
454                 }
455
456                 if (priv->nvmap)
457                         nvmap_client_put(priv->nvmap);
458
459                 priv->nvmap = new_client;
460                 break;
461         }
462         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
463                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
464                 break;
465         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
466         {
467                 unsigned long rate;
468                 struct nvhost_clk_rate_args *arg =
469                                 (struct nvhost_clk_rate_args *)buf;
470
471                 err = nvhost_module_get_rate(priv->ch->dev, &rate, 0);
472                 if (err == 0)
473                         arg->rate = rate;
474                 break;
475         }
476         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
477         {
478                 struct nvhost_clk_rate_args *arg =
479                                 (struct nvhost_clk_rate_args *)buf;
480                 unsigned long rate = (unsigned long)arg->rate;
481
482                 err = nvhost_module_set_rate(priv->ch->dev, priv, rate, 0);
483                 break;
484         }
485         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
486                 priv->timeout =
487                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
488                 dev_dbg(&priv->ch->dev->dev,
489                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
490                         __func__, priv->timeout, priv);
491                 break;
492         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
493                 ((struct nvhost_get_param_args *)buf)->value =
494                                 priv->hwctx->has_timedout;
495                 break;
496         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
497                 priv->priority =
498                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
499                 break;
500         default:
501                 err = -ENOTTY;
502                 break;
503         }
504
505         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
506                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
507
508         return err;
509 }
510
511 static const struct file_operations nvhost_channelops = {
512         .owner = THIS_MODULE,
513         .release = nvhost_channelrelease,
514         .open = nvhost_channelopen,
515         .write = nvhost_channelwrite,
516         .unlocked_ioctl = nvhost_channelctl
517 };
518
519 static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
520 {
521         struct nvhost_ctrl_userctx *priv = filp->private_data;
522         int i;
523
524         trace_nvhost_ctrlrelease(priv->dev->dev->name);
525
526         filp->private_data = NULL;
527         if (priv->mod_locks[0])
528                 nvhost_module_idle(priv->dev->dev);
529         for (i = 1; i < priv->dev->nb_mlocks; i++)
530                 if (priv->mod_locks[i])
531                         nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
532         kfree(priv->mod_locks);
533         kfree(priv);
534         return 0;
535 }
536
537 static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
538 {
539         struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
540         struct nvhost_ctrl_userctx *priv;
541         u32 *mod_locks;
542
543         trace_nvhost_ctrlopen(host->dev->name);
544
545         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
546         mod_locks = kzalloc(sizeof(u32)*host->nb_mlocks, GFP_KERNEL);
547
548         if (!(priv && mod_locks)) {
549                 kfree(priv);
550                 kfree(mod_locks);
551                 return -ENOMEM;
552         }
553
554         priv->dev = host;
555         priv->mod_locks = mod_locks;
556         filp->private_data = priv;
557         return 0;
558 }
559
560 static int nvhost_ioctl_ctrl_syncpt_read(
561         struct nvhost_ctrl_userctx *ctx,
562         struct nvhost_ctrl_syncpt_read_args *args)
563 {
564         if (args->id >= ctx->dev->syncpt.nb_pts)
565                 return -EINVAL;
566         trace_nvhost_ioctl_ctrl_syncpt_read(args->id);
567         args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
568         return 0;
569 }
570
571 static int nvhost_ioctl_ctrl_syncpt_incr(
572         struct nvhost_ctrl_userctx *ctx,
573         struct nvhost_ctrl_syncpt_incr_args *args)
574 {
575         if (args->id >= ctx->dev->syncpt.nb_pts)
576                 return -EINVAL;
577         trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
578         nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
579         return 0;
580 }
581
582 static int nvhost_ioctl_ctrl_syncpt_waitex(
583         struct nvhost_ctrl_userctx *ctx,
584         struct nvhost_ctrl_syncpt_waitex_args *args)
585 {
586         u32 timeout;
587         if (args->id >= ctx->dev->syncpt.nb_pts)
588                 return -EINVAL;
589         if (args->timeout == NVHOST_NO_TIMEOUT)
590                 timeout = MAX_SCHEDULE_TIMEOUT;
591         else
592                 timeout = (u32)msecs_to_jiffies(args->timeout);
593
594         trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
595           args->timeout);
596         return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
597                                         args->thresh, timeout, &args->value);
598 }
599
600 static int nvhost_ioctl_ctrl_module_mutex(
601         struct nvhost_ctrl_userctx *ctx,
602         struct nvhost_ctrl_module_mutex_args *args)
603 {
604         int err = 0;
605         if (args->id >= ctx->dev->nb_mlocks ||
606             args->lock > 1)
607                 return -EINVAL;
608
609         trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
610         if (args->lock && !ctx->mod_locks[args->id]) {
611                 if (args->id == 0)
612                         nvhost_module_busy(ctx->dev->dev);
613                 else
614                         err = nvhost_mutex_try_lock(&ctx->dev->cpuaccess, args->id);
615                 if (!err)
616                         ctx->mod_locks[args->id] = 1;
617         } else if (!args->lock && ctx->mod_locks[args->id]) {
618                 if (args->id == 0)
619                         nvhost_module_idle(ctx->dev->dev);
620                 else
621                         nvhost_mutex_unlock(&ctx->dev->cpuaccess, args->id);
622                 ctx->mod_locks[args->id] = 0;
623         }
624         return err;
625 }
626
627 static int nvhost_ioctl_ctrl_module_regrdwr(
628         struct nvhost_ctrl_userctx *ctx,
629         struct nvhost_ctrl_module_regrdwr_args *args)
630 {
631         u32 num_offsets = args->num_offsets;
632         u32 *offsets = args->offsets;
633         void *values = args->values;
634         u32 vals[64];
635
636         trace_nvhost_ioctl_ctrl_module_regrdwr(args->id,
637                         args->num_offsets, args->write);
638         if (!(args->id < ctx->dev->nb_modules) ||
639             (num_offsets == 0))
640                 return -EINVAL;
641
642         while (num_offsets--) {
643                 u32 remaining = args->block_size;
644                 u32 offs;
645                 if (get_user(offs, offsets))
646                         return -EFAULT;
647                 offsets++;
648                 while (remaining) {
649                         u32 batch = min(remaining, 64*sizeof(u32));
650                         if (args->write) {
651                                 if (copy_from_user(vals, values, batch))
652                                         return -EFAULT;
653                                 nvhost_write_module_regs(&ctx->dev->cpuaccess,
654                                                         args->id, offs, batch, vals);
655                         } else {
656                                 nvhost_read_module_regs(&ctx->dev->cpuaccess,
657                                                         args->id, offs, batch, vals);
658                                 if (copy_to_user(values, vals, batch))
659                                         return -EFAULT;
660                         }
661                         remaining -= batch;
662                         offs += batch;
663                         values += batch;
664                 }
665         }
666
667         return 0;
668 }
669
670 static int nvhost_ioctl_ctrl_get_version(
671         struct nvhost_ctrl_userctx *ctx,
672         struct nvhost_get_param_args *args)
673 {
674         args->value = NVHOST_SUBMIT_VERSION_MAX_SUPPORTED;
675         return 0;
676 }
677
678 static long nvhost_ctrlctl(struct file *filp,
679         unsigned int cmd, unsigned long arg)
680 {
681         struct nvhost_ctrl_userctx *priv = filp->private_data;
682         u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
683         int err = 0;
684
685         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
686                 (_IOC_NR(cmd) == 0) ||
687                 (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
688                 return -EFAULT;
689
690         BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
691
692         if (_IOC_DIR(cmd) & _IOC_WRITE) {
693                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
694                         return -EFAULT;
695         }
696
697         switch (cmd) {
698         case NVHOST_IOCTL_CTRL_SYNCPT_READ:
699                 err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
700                 break;
701         case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
702                 err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
703                 break;
704         case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
705                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
706                 break;
707         case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
708                 err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
709                 break;
710         case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
711                 err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
712                 break;
713         case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX:
714                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
715                 break;
716         case NVHOST_IOCTL_CTRL_GET_VERSION:
717                 err = nvhost_ioctl_ctrl_get_version(priv, (void *)buf);
718                 break;
719         default:
720                 err = -ENOTTY;
721                 break;
722         }
723
724         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
725                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
726
727         return err;
728 }
729
730 static const struct file_operations nvhost_ctrlops = {
731         .owner = THIS_MODULE,
732         .release = nvhost_ctrlrelease,
733         .open = nvhost_ctrlopen,
734         .unlocked_ioctl = nvhost_ctrlctl
735 };
736
737 static void power_on_host(struct nvhost_device *dev)
738 {
739         struct nvhost_master *host = dev->host;
740         nvhost_intr_start(&host->intr, clk_get_rate(dev->clk[0]));
741         nvhost_syncpt_reset(&host->syncpt);
742 }
743
744 static int power_off_host(struct nvhost_device *dev)
745 {
746         struct nvhost_master *host = dev->host;
747         nvhost_syncpt_save(&host->syncpt);
748         nvhost_intr_stop(&host->intr);
749         return 0;
750 }
751
752 static int __devinit nvhost_user_init(struct nvhost_master *host)
753 {
754         int i, err, devno;
755
756         host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
757         if (IS_ERR(host->nvhost_class)) {
758                 err = PTR_ERR(host->nvhost_class);
759                 dev_err(&host->pdev->dev, "failed to create class\n");
760                 goto fail;
761         }
762
763         err = alloc_chrdev_region(&devno, nvhost_minor,
764                                 host->nb_channels + 1, IFACE_NAME);
765         nvhost_major = MAJOR(devno);
766         if (err < 0) {
767                 dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
768                 goto fail;
769         }
770
771         for (i = 0; i < host->nb_channels; i++) {
772                 struct nvhost_channel *ch = &host->channels[i];
773
774                 cdev_init(&ch->cdev, &nvhost_channelops);
775                 ch->cdev.owner = THIS_MODULE;
776
777                 devno = MKDEV(nvhost_major, nvhost_minor + i);
778                 err = cdev_add(&ch->cdev, devno, 1);
779                 if (err < 0) {
780                         dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
781                         goto fail;
782                 }
783                 ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
784                                 IFACE_NAME "-%s", ch->dev->name);
785                 if (IS_ERR(ch->node)) {
786                         err = PTR_ERR(ch->node);
787                         dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
788                         goto fail;
789                 }
790         }
791
792         cdev_init(&host->cdev, &nvhost_ctrlops);
793         host->cdev.owner = THIS_MODULE;
794         devno = MKDEV(nvhost_major, nvhost_minor + host->nb_channels);
795         err = cdev_add(&host->cdev, devno, 1);
796         if (err < 0)
797                 goto fail;
798         host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
799                         IFACE_NAME "-ctrl");
800         if (IS_ERR(host->ctrl)) {
801                 err = PTR_ERR(host->ctrl);
802                 dev_err(&host->pdev->dev, "failed to create ctrl device\n");
803                 goto fail;
804         }
805
806         return 0;
807 fail:
808         return err;
809 }
810
811 static void nvhost_remove_chip_support(struct nvhost_master *host)
812 {
813
814         kfree(host->channels);
815         host->channels = 0;
816
817         kfree(host->syncpt.min_val);
818         host->syncpt.min_val = 0;
819
820         kfree(host->syncpt.max_val);
821         host->syncpt.max_val = 0;
822
823         kfree(host->syncpt.base_val);
824         host->syncpt.base_val = 0;
825
826         kfree(host->intr.syncpt);
827         host->intr.syncpt = 0;
828
829         kfree(host->cpuaccess.regs);
830         host->cpuaccess.regs = 0;
831
832         kfree(host->cpuaccess.reg_mem);
833         host->cpuaccess.reg_mem = 0;
834
835         kfree(host->cpuaccess.lock_counts);
836         host->cpuaccess.lock_counts = 0;
837 }
838
839 static int __devinit nvhost_init_chip_support(struct nvhost_master *host)
840 {
841         int err;
842         switch (tegra_get_chipid()) {
843         case TEGRA_CHIPID_TEGRA2:
844                 err = nvhost_init_t20_support(host);
845                 break;
846
847         case TEGRA_CHIPID_TEGRA3:
848                 err = nvhost_init_t30_support(host);
849                 break;
850         default:
851                 return -ENODEV;
852         }
853
854         if (err)
855                 return err;
856
857         /* allocate items sized in chip specific support init */
858         host->channels = kzalloc(sizeof(struct nvhost_channel) *
859                                  host->nb_channels, GFP_KERNEL);
860
861         host->syncpt.min_val = kzalloc(sizeof(atomic_t) *
862                                        host->syncpt.nb_pts, GFP_KERNEL);
863
864         host->syncpt.max_val = kzalloc(sizeof(atomic_t) *
865                                        host->syncpt.nb_pts, GFP_KERNEL);
866
867         host->syncpt.base_val = kzalloc(sizeof(u32) *
868                                         host->syncpt.nb_bases, GFP_KERNEL);
869
870         host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
871                                     host->syncpt.nb_pts, GFP_KERNEL);
872
873         host->cpuaccess.reg_mem = kzalloc(sizeof(struct resource *) *
874                                        host->nb_modules, GFP_KERNEL);
875
876         host->cpuaccess.regs = kzalloc(sizeof(void __iomem *) *
877                                        host->nb_modules, GFP_KERNEL);
878
879         host->cpuaccess.lock_counts = kzalloc(sizeof(atomic_t) *
880                                        host->nb_mlocks, GFP_KERNEL);
881
882         if (!(host->channels && host->syncpt.min_val &&
883               host->syncpt.max_val && host->syncpt.base_val &&
884               host->intr.syncpt && host->cpuaccess.reg_mem &&
885               host->cpuaccess.regs && host->cpuaccess.lock_counts)) {
886                 /* frees happen in the support removal phase */
887                 return -ENOMEM;
888         }
889
890         return 0;
891 }
892
893 struct nvhost_device hostdev = {
894         .name = "host1x",
895         .finalize_poweron = power_on_host,
896         .prepare_poweroff = power_off_host,
897         .clocks = {{"host1x", UINT_MAX}, {} },
898         NVHOST_MODULE_NO_POWERGATE_IDS,
899 };
900
901 static int __devinit nvhost_probe(struct platform_device *pdev)
902 {
903         struct nvhost_master *host;
904         struct resource *regs, *intr0, *intr1;
905         int i, err;
906
907         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
908         intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
909         intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
910
911         if (!regs || !intr0 || !intr1) {
912                 dev_err(&pdev->dev, "missing required platform resources\n");
913                 return -ENXIO;
914         }
915
916         host = kzalloc(sizeof(*host), GFP_KERNEL);
917         if (!host)
918                 return -ENOMEM;
919
920         host->pdev = pdev;
921
922         host->nvmap = nvmap_create_client(nvmap_dev, "nvhost");
923         if (!host->nvmap) {
924                 dev_err(&pdev->dev, "unable to create nvmap client\n");
925                 err = -EIO;
926                 goto fail;
927         }
928
929         host->reg_mem = request_mem_region(regs->start,
930                                         resource_size(regs), pdev->name);
931         if (!host->reg_mem) {
932                 dev_err(&pdev->dev, "failed to get host register memory\n");
933                 err = -ENXIO;
934                 goto fail;
935         }
936         host->aperture = ioremap(regs->start, resource_size(regs));
937         if (!host->aperture) {
938                 dev_err(&pdev->dev, "failed to remap host registers\n");
939                 err = -ENXIO;
940                 goto fail;
941         }
942
943         err = nvhost_init_chip_support(host);
944         if (err) {
945                 dev_err(&pdev->dev, "failed to init chip support\n");
946                 goto fail;
947         }
948
949         /*  Register host1x device as bus master */
950         nvhost_device_register(&hostdev);
951         host->dev = &hostdev;
952         nvhost_bus_add_host(host);
953
954         for (i = 0; i < host->nb_channels; i++) {
955                 struct nvhost_channel *ch = &host->channels[i];
956                 BUG_ON(!host_channel_op(host).init);
957                 err = host_channel_op(host).init(ch, host, i);
958                 if (err < 0) {
959                         dev_err(&pdev->dev, "failed to init channel %d\n", i);
960                         goto fail;
961                 }
962                 ch->dev->channel = ch;
963         }
964
965         err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
966         if (err)
967                 goto fail;
968
969         err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
970         if (err)
971                 goto fail;
972
973         err = nvhost_user_init(host);
974         if (err)
975                 goto fail;
976
977         err = nvhost_module_init(&hostdev);
978         if (err)
979                 goto fail;
980
981         for (i = 0; i < host->nb_channels; i++) {
982                 struct nvhost_channel *ch = &host->channels[i];
983                 nvhost_module_preinit(ch->dev);
984         }
985
986         platform_set_drvdata(pdev, host);
987
988         clk_enable(host->dev->clk[0]);
989         nvhost_syncpt_reset(&host->syncpt);
990         clk_disable(host->dev->clk[0]);
991
992         nvhost_debug_init(host);
993
994         dev_info(&pdev->dev, "initialized\n");
995         return 0;
996
997 fail:
998         nvhost_remove_chip_support(host);
999         if (host->nvmap)
1000                 nvmap_client_put(host->nvmap);
1001         kfree(host);
1002         return err;
1003 }
1004
1005 static int __exit nvhost_remove(struct platform_device *pdev)
1006 {
1007         struct nvhost_master *host = platform_get_drvdata(pdev);
1008         nvhost_remove_chip_support(host);
1009         return 0;
1010 }
1011
1012 static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
1013 {
1014         struct nvhost_master *host = platform_get_drvdata(pdev);
1015         int i, ret;
1016         dev_info(&pdev->dev, "suspending\n");
1017
1018         for (i = 0; i < host->nb_channels; i++) {
1019                 ret = nvhost_channel_suspend(&host->channels[i]);
1020                 if (ret)
1021                         return ret;
1022         }
1023
1024         ret = nvhost_module_suspend(host->dev, true);
1025         dev_info(&pdev->dev, "suspend status: %d\n", ret);
1026         return ret;
1027 }
1028
1029 static int nvhost_resume(struct platform_device *pdev)
1030 {
1031         dev_info(&pdev->dev, "resuming\n");
1032         return 0;
1033 }
1034
1035 static struct platform_driver nvhost_driver = {
1036         .remove = __exit_p(nvhost_remove),
1037         .suspend = nvhost_suspend,
1038         .resume = nvhost_resume,
1039         .driver = {
1040                 .owner = THIS_MODULE,
1041                 .name = DRIVER_NAME
1042         }
1043 };
1044
1045 static int __init nvhost_mod_init(void)
1046 {
1047         register_sets = tegra_gpu_register_sets();
1048         return platform_driver_probe(&nvhost_driver, nvhost_probe);
1049 }
1050
1051 static void __exit nvhost_mod_exit(void)
1052 {
1053         platform_driver_unregister(&nvhost_driver);
1054 }
1055
1056 module_init(nvhost_mod_init);
1057 module_exit(nvhost_mod_exit);
1058
1059 module_param_call(register_sets, NULL, param_get_uint, &register_sets, 0444);
1060 MODULE_PARM_DESC(register_sets, "Number of register sets");
1061
1062 MODULE_AUTHOR("NVIDIA");
1063 MODULE_DESCRIPTION("Graphics host driver for Tegra products");
1064 MODULE_VERSION("1.0");
1065 MODULE_LICENSE("Dual BSD/GPL");
1066 MODULE_ALIAS("platform-nvhost");