video: tegra: host: use runtime pm for clock management
[linux-2.6.git] / drivers / video / tegra / host / dev.c
1 /*
2  * drivers/video/tegra/host/dev.c
3  *
4  * Tegra Graphics Host Driver Entrypoint
5  *
6  * Copyright (c) 2010-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include "dev.h"
24 #include "bus_client.h"
25
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/spinlock.h>
29 #include <linux/fs.h>
30 #include <linux/cdev.h>
31 #include <linux/platform_device.h>
32 #include <linux/uaccess.h>
33 #include <linux/file.h>
34 #include <linux/clk.h>
35 #include <linux/hrtimer.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/nvhost.h>
38 #include <linux/pm_runtime.h>
39 #include <linux/io.h>
40
41 #include <linux/nvhost.h>
42 #include <linux/nvhost_ioctl.h>
43 #include <mach/nvmap.h>
44 #include <mach/gpufuse.h>
45 #include <mach/hardware.h>
46
47 #include "debug.h"
48 #include "nvhost_job.h"
49 #include "t20/t20.h"
50 #include "t30/t30.h"
51
52 #define DRIVER_NAME "tegra_grhost"
53 #define IFACE_NAME "nvhost"
54 #define TRACE_MAX_LENGTH 128U
55
56 static int nvhost_major = NVHOST_MAJOR;
57 static int nvhost_minor;
58 static unsigned int register_sets;
59
60 struct nvhost_channel_userctx {
61         struct nvhost_channel *ch;
62         struct nvhost_hwctx *hwctx;
63         struct nvhost_submit_hdr_ext hdr;
64         int num_relocshifts;
65         struct nvhost_job *job;
66         struct nvmap_client *nvmap;
67         u32 timeout;
68         u32 priority;
69         int clientid;
70 };
71
72 struct nvhost_ctrl_userctx {
73         struct nvhost_master *dev;
74         u32 *mod_locks;
75 };
76
77 /*
78  * Write cmdbuf to ftrace output. Checks if cmdbuf contents should be output
79  * and mmaps the cmdbuf contents if required.
80  */
81 static void trace_write_cmdbufs(struct nvhost_job *job)
82 {
83 #if defined(CONFIG_TEGRA_NVMAP)
84         struct nvmap_handle_ref handle;
85         void *mem = NULL;
86         int i = 0;
87
88         for (i = 0; i < job->num_gathers; i++) {
89                 struct nvhost_channel_gather *gather = &job->gathers[i];
90                 if (nvhost_debug_trace_cmdbuf) {
91                         handle.handle = nvmap_id_to_handle(gather->mem_id);
92                         mem = nvmap_mmap(&handle);
93                         if (IS_ERR_OR_NULL(mem))
94                                 mem = NULL;
95                 };
96
97                 if (mem) {
98                         u32 i;
99                         /*
100                          * Write in batches of 128 as there seems to be a limit
101                          * of how much you can output to ftrace at once.
102                          */
103                         for (i = 0; i < gather->words; i += TRACE_MAX_LENGTH) {
104                                 trace_nvhost_channel_write_cmdbuf_data(
105                                         job->ch->dev->name,
106                                         gather->mem_id,
107                                         min(gather->words - i,
108                                             TRACE_MAX_LENGTH),
109                                         gather->offset + i * sizeof(u32),
110                                         mem);
111                         }
112                         nvmap_munmap(&handle, mem);
113                 }
114         }
115 #endif
116 }
117
118 static int nvhost_channelrelease(struct inode *inode, struct file *filp)
119 {
120         struct nvhost_channel_userctx *priv = filp->private_data;
121
122         trace_nvhost_channel_release(priv->ch->dev->name);
123
124         filp->private_data = NULL;
125
126         nvhost_module_remove_client(priv->ch->dev, priv);
127         nvhost_putchannel(priv->ch, priv->hwctx);
128
129         if (priv->hwctx)
130                 priv->ch->ctxhandler.put(priv->hwctx);
131
132         if (priv->job)
133                 nvhost_job_put(priv->job);
134
135         nvmap_client_put(priv->nvmap);
136         kfree(priv);
137         return 0;
138 }
139
140 static int nvhost_channelopen(struct inode *inode, struct file *filp)
141 {
142         struct nvhost_channel_userctx *priv;
143         struct nvhost_channel *ch;
144
145         ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
146         ch = nvhost_getchannel(ch);
147         if (!ch)
148                 return -ENOMEM;
149         trace_nvhost_channel_open(ch->dev->name);
150
151         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
152         if (!priv) {
153                 nvhost_putchannel(ch, NULL);
154                 return -ENOMEM;
155         }
156         filp->private_data = priv;
157         priv->ch = ch;
158         nvhost_module_add_client(ch->dev, priv);
159
160         if (ch->ctxhandler.alloc) {
161                 priv->hwctx = ch->ctxhandler.alloc(ch);
162                 if (!priv->hwctx)
163                         goto fail;
164         }
165         priv->priority = NVHOST_PRIORITY_MEDIUM;
166         priv->clientid = atomic_add_return(1,
167                         &nvhost_get_host(ch->dev)->clientid);
168
169         priv->job = nvhost_job_alloc(ch, priv->hwctx, &priv->hdr,
170                         NULL, priv->priority, priv->clientid);
171         if (!priv->job)
172                 goto fail;
173
174         return 0;
175 fail:
176         nvhost_channelrelease(inode, filp);
177         return -ENOMEM;
178 }
179
180 static int set_submit(struct nvhost_channel_userctx *ctx)
181 {
182         struct device *device = &ctx->ch->dev->dev;
183
184         /* submit should have at least 1 cmdbuf */
185         if (!ctx->hdr.num_cmdbufs)
186                 return -EIO;
187
188         if (!ctx->nvmap) {
189                 dev_err(device, "no nvmap context set\n");
190                 return -EFAULT;
191         }
192
193         ctx->job = nvhost_job_realloc(ctx->job,
194                         &ctx->hdr,
195                         ctx->nvmap,
196                         ctx->priority,
197                         ctx->clientid);
198         if (!ctx->job)
199                 return -ENOMEM;
200         ctx->job->timeout = ctx->timeout;
201
202         if (ctx->hdr.submit_version >= NVHOST_SUBMIT_VERSION_V2)
203                 ctx->num_relocshifts = ctx->hdr.num_relocs;
204
205         return 0;
206 }
207
208 static void reset_submit(struct nvhost_channel_userctx *ctx)
209 {
210         ctx->hdr.num_cmdbufs = 0;
211         ctx->hdr.num_relocs = 0;
212         ctx->num_relocshifts = 0;
213         ctx->hdr.num_waitchks = 0;
214 }
215
216 static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
217                                 size_t count, loff_t *offp)
218 {
219         struct nvhost_channel_userctx *priv = filp->private_data;
220         size_t remaining = count;
221         int err = 0;
222         struct nvhost_job *job = priv->job;
223         struct nvhost_submit_hdr_ext *hdr = &priv->hdr;
224         const char *chname = priv->ch->dev->name;
225
226         while (remaining) {
227                 size_t consumed;
228                 if (!hdr->num_relocs &&
229                     !priv->num_relocshifts &&
230                     !hdr->num_cmdbufs &&
231                     !hdr->num_waitchks) {
232                         consumed = sizeof(struct nvhost_submit_hdr);
233                         if (remaining < consumed)
234                                 break;
235                         if (copy_from_user(hdr, buf, consumed)) {
236                                 err = -EFAULT;
237                                 break;
238                         }
239                         hdr->submit_version = NVHOST_SUBMIT_VERSION_V0;
240                         err = set_submit(priv);
241                         if (err)
242                                 break;
243                         trace_nvhost_channel_write_submit(chname,
244                           count, hdr->num_cmdbufs, hdr->num_relocs,
245                           hdr->syncpt_id, hdr->syncpt_incrs);
246                 } else if (hdr->num_cmdbufs) {
247                         struct nvhost_cmdbuf cmdbuf;
248                         consumed = sizeof(cmdbuf);
249                         if (remaining < consumed)
250                                 break;
251                         if (copy_from_user(&cmdbuf, buf, consumed)) {
252                                 err = -EFAULT;
253                                 break;
254                         }
255                         trace_nvhost_channel_write_cmdbuf(chname,
256                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
257                         nvhost_job_add_gather(job,
258                                 cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
259                         hdr->num_cmdbufs--;
260                 } else if (hdr->num_relocs) {
261                         struct nvmap_pinarray_elem *elem =
262                                                 &job->pinarray[job->num_pins];
263                         consumed = sizeof(struct nvhost_reloc);
264                         if (remaining < consumed)
265                                 break;
266                         if (copy_from_user(elem, buf, consumed)) {
267                                 err = -EFAULT;
268                                 break;
269                         }
270                         elem->patch_mem =
271                                 nvmap_convert_handle_u2k(elem->patch_mem);
272                         elem->pin_mem =
273                                 nvmap_convert_handle_u2k(elem->pin_mem);
274                         trace_nvhost_channel_write_reloc(chname);
275                         job->num_pins++;
276                         hdr->num_relocs--;
277                 } else if (hdr->num_waitchks) {
278                         struct nvhost_waitchk *waitchk =
279                                         &job->waitchk[job->num_waitchk];
280                         consumed = sizeof(struct nvhost_waitchk);
281                         if (remaining < consumed)
282                                 break;
283                         if (copy_from_user(waitchk, buf, consumed)) {
284                                 err = -EFAULT;
285                                 break;
286                         }
287                         waitchk->mem = nvmap_convert_handle_u2k(waitchk->mem);
288                         trace_nvhost_channel_write_waitchks(
289                           chname, 1,
290                           hdr->waitchk_mask);
291                         job->num_waitchk++;
292                         hdr->num_waitchks--;
293                 } else if (priv->num_relocshifts) {
294                         int next_shift =
295                                 job->num_pins - priv->num_relocshifts;
296                         consumed = sizeof(struct nvhost_reloc_shift);
297                         if (remaining < consumed)
298                                 break;
299                         if (copy_from_user(
300                                         &job->pinarray[next_shift].reloc_shift,
301                                         buf, consumed)) {
302                                 err = -EFAULT;
303                                 break;
304                         }
305                         priv->num_relocshifts--;
306                 } else {
307                         err = -EFAULT;
308                         break;
309                 }
310                 remaining -= consumed;
311                 buf += consumed;
312         }
313
314         if (err < 0) {
315                 dev_err(&priv->ch->dev->dev, "channel write error\n");
316                 reset_submit(priv);
317                 return err;
318         }
319
320         return count - remaining;
321 }
322
323 static int nvhost_ioctl_channel_flush(
324         struct nvhost_channel_userctx *ctx,
325         struct nvhost_get_param_args *args,
326         int null_kickoff)
327 {
328         struct device *device = &ctx->ch->dev->dev;
329         int err;
330
331         trace_nvhost_ioctl_channel_flush(ctx->ch->dev->name);
332
333         if (!ctx->job ||
334             ctx->hdr.num_relocs ||
335             ctx->hdr.num_cmdbufs ||
336             ctx->hdr.num_waitchks) {
337                 reset_submit(ctx);
338                 dev_err(device, "channel submit out of sync\n");
339                 return -EFAULT;
340         }
341
342         err = nvhost_job_pin(ctx->job);
343         if (err) {
344                 dev_warn(device, "nvhost_job_pin failed: %d\n", err);
345                 return err;
346         }
347
348         if (nvhost_debug_null_kickoff_pid == current->tgid)
349                 null_kickoff = 1;
350         ctx->job->null_kickoff = null_kickoff;
351
352         if ((nvhost_debug_force_timeout_pid == current->tgid) &&
353             (nvhost_debug_force_timeout_channel == ctx->ch->chid)) {
354                 ctx->timeout = nvhost_debug_force_timeout_val;
355         }
356
357         trace_write_cmdbufs(ctx->job);
358
359         /* context switch if needed, and submit user's gathers to the channel */
360         err = nvhost_channel_submit(ctx->job);
361         args->value = ctx->job->syncpt_end;
362         if (err)
363                 nvhost_job_unpin(ctx->job);
364
365         return err;
366 }
367
368 static int nvhost_ioctl_channel_read_3d_reg(
369         struct nvhost_channel_userctx *ctx,
370         struct nvhost_read_3d_reg_args *args)
371 {
372         BUG_ON(!channel_op(ctx->ch).read3dreg);
373         return channel_op(ctx->ch).read3dreg(ctx->ch, ctx->hwctx,
374                         args->offset, &args->value);
375 }
376
377 static long nvhost_channelctl(struct file *filp,
378         unsigned int cmd, unsigned long arg)
379 {
380         struct nvhost_channel_userctx *priv = filp->private_data;
381         u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
382         int err = 0;
383
384         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
385                 (_IOC_NR(cmd) == 0) ||
386                 (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
387                 return -EFAULT;
388
389         BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
390
391         if (_IOC_DIR(cmd) & _IOC_WRITE) {
392                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
393                         return -EFAULT;
394         }
395
396         switch (cmd) {
397         case NVHOST_IOCTL_CHANNEL_FLUSH:
398                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
399                 break;
400         case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
401                 err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
402                 break;
403         case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
404         {
405                 struct nvhost_submit_hdr_ext *hdr;
406
407                 if (priv->hdr.num_relocs ||
408                     priv->num_relocshifts ||
409                     priv->hdr.num_cmdbufs ||
410                     priv->hdr.num_waitchks) {
411                         reset_submit(priv);
412                         dev_err(&priv->ch->dev->dev,
413                                 "channel submit out of sync\n");
414                         err = -EIO;
415                         break;
416                 }
417
418                 hdr = (struct nvhost_submit_hdr_ext *)buf;
419                 if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
420                         dev_err(&priv->ch->dev->dev,
421                                 "submit version %d > max supported %d\n",
422                                 hdr->submit_version,
423                                 NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
424                         err = -EINVAL;
425                         break;
426                 }
427                 memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
428                 err = set_submit(priv);
429                 trace_nvhost_ioctl_channel_submit(priv->ch->dev->name,
430                         priv->hdr.submit_version,
431                         priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
432                         priv->hdr.num_waitchks,
433                         priv->hdr.syncpt_id, priv->hdr.syncpt_incrs);
434                 break;
435         }
436         case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
437                 /* host syncpt ID is used by the RM (and never be given out) */
438                 BUG_ON(priv->ch->dev->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
439                 ((struct nvhost_get_param_args *)buf)->value =
440                         priv->ch->dev->syncpts;
441                 break;
442         case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
443                 ((struct nvhost_get_param_args *)buf)->value =
444                         priv->ch->dev->waitbases;
445                 break;
446         case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
447                 ((struct nvhost_get_param_args *)buf)->value =
448                         priv->ch->dev->modulemutexes;
449                 break;
450         case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
451         {
452                 int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
453                 struct nvmap_client *new_client = nvmap_client_get_file(fd);
454
455                 if (IS_ERR(new_client)) {
456                         err = PTR_ERR(new_client);
457                         break;
458                 }
459
460                 if (priv->nvmap)
461                         nvmap_client_put(priv->nvmap);
462
463                 priv->nvmap = new_client;
464                 break;
465         }
466         case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
467                 err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
468                 break;
469         case NVHOST_IOCTL_CHANNEL_GET_CLK_RATE:
470         {
471                 unsigned long rate;
472                 struct nvhost_clk_rate_args *arg =
473                                 (struct nvhost_clk_rate_args *)buf;
474
475                 err = nvhost_module_get_rate(priv->ch->dev, &rate, 0);
476                 if (err == 0)
477                         arg->rate = rate;
478                 break;
479         }
480         case NVHOST_IOCTL_CHANNEL_SET_CLK_RATE:
481         {
482                 struct nvhost_clk_rate_args *arg =
483                                 (struct nvhost_clk_rate_args *)buf;
484                 unsigned long rate = (unsigned long)arg->rate;
485
486                 err = nvhost_module_set_rate(priv->ch->dev, priv, rate, 0);
487                 break;
488         }
489         case NVHOST_IOCTL_CHANNEL_SET_TIMEOUT:
490                 priv->timeout =
491                         (u32)((struct nvhost_set_timeout_args *)buf)->timeout;
492                 dev_dbg(&priv->ch->dev->dev,
493                         "%s: setting buffer timeout (%d ms) for userctx 0x%p\n",
494                         __func__, priv->timeout, priv);
495                 break;
496         case NVHOST_IOCTL_CHANNEL_GET_TIMEDOUT:
497                 ((struct nvhost_get_param_args *)buf)->value =
498                                 priv->hwctx->has_timedout;
499                 break;
500         case NVHOST_IOCTL_CHANNEL_SET_PRIORITY:
501                 priv->priority =
502                         (u32)((struct nvhost_set_priority_args *)buf)->priority;
503                 break;
504         default:
505                 err = -ENOTTY;
506                 break;
507         }
508
509         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
510                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
511
512         return err;
513 }
514
515 static const struct file_operations nvhost_channelops = {
516         .owner = THIS_MODULE,
517         .release = nvhost_channelrelease,
518         .open = nvhost_channelopen,
519         .write = nvhost_channelwrite,
520         .unlocked_ioctl = nvhost_channelctl
521 };
522
523 static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
524 {
525         struct nvhost_ctrl_userctx *priv = filp->private_data;
526         int i;
527
528         trace_nvhost_ctrlrelease(priv->dev->dev->name);
529
530         filp->private_data = NULL;
531         if (priv->mod_locks[0])
532                 nvhost_module_idle(priv->dev->dev);
533         for (i = 1; i < priv->dev->syncpt.nb_mlocks; i++)
534                 if (priv->mod_locks[i])
535                         nvhost_mutex_unlock(&priv->dev->syncpt, i);
536         kfree(priv->mod_locks);
537         kfree(priv);
538         return 0;
539 }
540
541 static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
542 {
543         struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
544         struct nvhost_ctrl_userctx *priv;
545         u32 *mod_locks;
546
547         trace_nvhost_ctrlopen(host->dev->name);
548
549         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
550         mod_locks = kzalloc(sizeof(u32) * host->syncpt.nb_mlocks, GFP_KERNEL);
551
552         if (!(priv && mod_locks)) {
553                 kfree(priv);
554                 kfree(mod_locks);
555                 return -ENOMEM;
556         }
557
558         priv->dev = host;
559         priv->mod_locks = mod_locks;
560         filp->private_data = priv;
561         return 0;
562 }
563
564 static int nvhost_ioctl_ctrl_syncpt_read(
565         struct nvhost_ctrl_userctx *ctx,
566         struct nvhost_ctrl_syncpt_read_args *args)
567 {
568         if (args->id >= ctx->dev->syncpt.nb_pts)
569                 return -EINVAL;
570         trace_nvhost_ioctl_ctrl_syncpt_read(args->id);
571         args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
572         return 0;
573 }
574
575 static int nvhost_ioctl_ctrl_syncpt_incr(
576         struct nvhost_ctrl_userctx *ctx,
577         struct nvhost_ctrl_syncpt_incr_args *args)
578 {
579         if (args->id >= ctx->dev->syncpt.nb_pts)
580                 return -EINVAL;
581         trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
582         nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
583         return 0;
584 }
585
586 static int nvhost_ioctl_ctrl_syncpt_waitex(
587         struct nvhost_ctrl_userctx *ctx,
588         struct nvhost_ctrl_syncpt_waitex_args *args)
589 {
590         u32 timeout;
591         if (args->id >= ctx->dev->syncpt.nb_pts)
592                 return -EINVAL;
593         if (args->timeout == NVHOST_NO_TIMEOUT)
594                 timeout = MAX_SCHEDULE_TIMEOUT;
595         else
596                 timeout = (u32)msecs_to_jiffies(args->timeout);
597
598         trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
599           args->timeout);
600         return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
601                                         args->thresh, timeout, &args->value);
602 }
603
604 static int nvhost_ioctl_ctrl_module_mutex(
605         struct nvhost_ctrl_userctx *ctx,
606         struct nvhost_ctrl_module_mutex_args *args)
607 {
608         int err = 0;
609         if (args->id >= ctx->dev->syncpt.nb_mlocks ||
610             args->lock > 1)
611                 return -EINVAL;
612
613         trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
614         if (args->lock && !ctx->mod_locks[args->id]) {
615                 if (args->id == 0)
616                         nvhost_module_busy(ctx->dev->dev);
617                 else
618                         err = nvhost_mutex_try_lock(&ctx->dev->syncpt,
619                                         args->id);
620                 if (!err)
621                         ctx->mod_locks[args->id] = 1;
622         } else if (!args->lock && ctx->mod_locks[args->id]) {
623                 if (args->id == 0)
624                         nvhost_module_idle(ctx->dev->dev);
625                 else
626                         nvhost_mutex_unlock(&ctx->dev->syncpt, args->id);
627                 ctx->mod_locks[args->id] = 0;
628         }
629         return err;
630 }
631
632 static struct nvhost_device *get_ndev_by_moduleid(struct nvhost_master *host,
633                 u32 id)
634 {
635         int i;
636
637         for (i = 0; i < host->nb_channels; i++) {
638                 struct nvhost_device *ndev = host->channels[i].dev;
639                 if (id == ndev->moduleid)
640                         return ndev;
641         }
642         return NULL;
643 }
644
645 static int nvhost_ioctl_ctrl_module_regrdwr(
646         struct nvhost_ctrl_userctx *ctx,
647         struct nvhost_ctrl_module_regrdwr_args *args)
648 {
649         u32 num_offsets = args->num_offsets;
650         u32 *offsets = args->offsets;
651         u32 *values = args->values;
652         u32 vals[64];
653         struct nvhost_device *ndev;
654
655         trace_nvhost_ioctl_ctrl_module_regrdwr(args->id,
656                         args->num_offsets, args->write);
657         /* Check that there is something to read and that block size is
658          * u32 aligned */
659         if (num_offsets == 0 || args->block_size & 3)
660                 return -EINVAL;
661
662         ndev = get_ndev_by_moduleid(ctx->dev, args->id);
663         if (!ndev)
664                 return -EINVAL;
665
666         while (num_offsets--) {
667                 int remaining = args->block_size >> 2;
668                 u32 offs;
669                 if (get_user(offs, offsets))
670                         return -EFAULT;
671                 offsets++;
672                 while (remaining) {
673                         int batch = min(remaining, 64);
674                         if (args->write) {
675                                 if (copy_from_user(vals, values,
676                                                         batch*sizeof(u32)))
677                                         return -EFAULT;
678                                 nvhost_write_module_regs(ndev,
679                                                 offs, batch, vals);
680                         } else {
681                                 nvhost_read_module_regs(ndev,
682                                                 offs, batch, vals);
683                                 if (copy_to_user(values, vals,
684                                                         batch*sizeof(u32)))
685                                         return -EFAULT;
686                         }
687                         remaining -= batch;
688                         offs += batch;
689                         values += batch;
690                 }
691         }
692
693         return 0;
694 }
695
696 static int nvhost_ioctl_ctrl_get_version(
697         struct nvhost_ctrl_userctx *ctx,
698         struct nvhost_get_param_args *args)
699 {
700         args->value = NVHOST_SUBMIT_VERSION_MAX_SUPPORTED;
701         return 0;
702 }
703
704 static long nvhost_ctrlctl(struct file *filp,
705         unsigned int cmd, unsigned long arg)
706 {
707         struct nvhost_ctrl_userctx *priv = filp->private_data;
708         u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
709         int err = 0;
710
711         if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
712                 (_IOC_NR(cmd) == 0) ||
713                 (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
714                 return -EFAULT;
715
716         BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
717
718         if (_IOC_DIR(cmd) & _IOC_WRITE) {
719                 if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
720                         return -EFAULT;
721         }
722
723         switch (cmd) {
724         case NVHOST_IOCTL_CTRL_SYNCPT_READ:
725                 err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
726                 break;
727         case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
728                 err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
729                 break;
730         case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
731                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
732                 break;
733         case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
734                 err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
735                 break;
736         case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
737                 err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
738                 break;
739         case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX:
740                 err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
741                 break;
742         case NVHOST_IOCTL_CTRL_GET_VERSION:
743                 err = nvhost_ioctl_ctrl_get_version(priv, (void *)buf);
744                 break;
745         default:
746                 err = -ENOTTY;
747                 break;
748         }
749
750         if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
751                 err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
752
753         return err;
754 }
755
756 static const struct file_operations nvhost_ctrlops = {
757         .owner = THIS_MODULE,
758         .release = nvhost_ctrlrelease,
759         .open = nvhost_ctrlopen,
760         .unlocked_ioctl = nvhost_ctrlctl
761 };
762
763 static void power_on_host(struct nvhost_device *dev)
764 {
765         struct nvhost_master *host = nvhost_get_drvdata(dev);
766         nvhost_intr_start(&host->intr, clk_get_rate(dev->clk[0]));
767         nvhost_syncpt_reset(&host->syncpt);
768 }
769
770 static int power_off_host(struct nvhost_device *dev)
771 {
772         struct nvhost_master *host = nvhost_get_drvdata(dev);
773         nvhost_syncpt_save(&host->syncpt);
774         nvhost_intr_stop(&host->intr);
775         return 0;
776 }
777
778 static int __devinit nvhost_user_init(struct nvhost_master *host)
779 {
780         int i, err, devno;
781
782         host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
783         if (IS_ERR(host->nvhost_class)) {
784                 err = PTR_ERR(host->nvhost_class);
785                 dev_err(&host->pdev->dev, "failed to create class\n");
786                 goto fail;
787         }
788
789         err = alloc_chrdev_region(&devno, nvhost_minor,
790                                 host->nb_channels + 1, IFACE_NAME);
791         nvhost_major = MAJOR(devno);
792         if (err < 0) {
793                 dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
794                 goto fail;
795         }
796
797         for (i = 0; i < host->nb_channels; i++) {
798                 struct nvhost_channel *ch = &host->channels[i];
799
800                 cdev_init(&ch->cdev, &nvhost_channelops);
801                 ch->cdev.owner = THIS_MODULE;
802
803                 devno = MKDEV(nvhost_major, nvhost_minor + i);
804                 err = cdev_add(&ch->cdev, devno, 1);
805                 if (err < 0) {
806                         dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
807                         goto fail;
808                 }
809                 ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
810                                 IFACE_NAME "-%s", ch->dev->name);
811                 if (IS_ERR(ch->node)) {
812                         err = PTR_ERR(ch->node);
813                         dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
814                         goto fail;
815                 }
816         }
817
818         cdev_init(&host->cdev, &nvhost_ctrlops);
819         host->cdev.owner = THIS_MODULE;
820         devno = MKDEV(nvhost_major, nvhost_minor + host->nb_channels);
821         err = cdev_add(&host->cdev, devno, 1);
822         if (err < 0)
823                 goto fail;
824         host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
825                         IFACE_NAME "-ctrl");
826         if (IS_ERR(host->ctrl)) {
827                 err = PTR_ERR(host->ctrl);
828                 dev_err(&host->pdev->dev, "failed to create ctrl device\n");
829                 goto fail;
830         }
831
832         return 0;
833 fail:
834         return err;
835 }
836
837 static void nvhost_remove_chip_support(struct nvhost_master *host)
838 {
839
840         kfree(host->channels);
841         host->channels = 0;
842
843         kfree(host->syncpt.min_val);
844         host->syncpt.min_val = 0;
845
846         kfree(host->syncpt.max_val);
847         host->syncpt.max_val = 0;
848
849         kfree(host->syncpt.base_val);
850         host->syncpt.base_val = 0;
851
852         kfree(host->intr.syncpt);
853         host->intr.syncpt = 0;
854
855         kfree(host->syncpt.lock_counts);
856         host->syncpt.lock_counts = 0;
857 }
858
859 static int __devinit nvhost_init_chip_support(struct nvhost_master *host)
860 {
861         int err;
862         switch (tegra_get_chipid()) {
863         case TEGRA_CHIPID_TEGRA2:
864                 err = nvhost_init_t20_support(host);
865                 break;
866
867         case TEGRA_CHIPID_TEGRA3:
868                 err = nvhost_init_t30_support(host);
869                 break;
870         default:
871                 return -ENODEV;
872         }
873
874         if (err)
875                 return err;
876
877         /* allocate items sized in chip specific support init */
878         host->channels = kzalloc(sizeof(struct nvhost_channel) *
879                                  host->nb_channels, GFP_KERNEL);
880
881         host->syncpt.min_val = kzalloc(sizeof(atomic_t) *
882                                        host->syncpt.nb_pts, GFP_KERNEL);
883
884         host->syncpt.max_val = kzalloc(sizeof(atomic_t) *
885                                        host->syncpt.nb_pts, GFP_KERNEL);
886
887         host->syncpt.base_val = kzalloc(sizeof(u32) *
888                                         host->syncpt.nb_bases, GFP_KERNEL);
889
890         host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
891                                     host->syncpt.nb_pts, GFP_KERNEL);
892
893         host->syncpt.lock_counts = kzalloc(sizeof(atomic_t) *
894                                        host->syncpt.nb_mlocks, GFP_KERNEL);
895
896         if (!(host->channels && host->syncpt.min_val &&
897               host->syncpt.max_val && host->syncpt.base_val &&
898               host->intr.syncpt && host->syncpt.lock_counts)) {
899                 /* frees happen in the support removal phase */
900                 return -ENOMEM;
901         }
902
903         return 0;
904 }
905
906 struct nvhost_device hostdev = {
907         .name = "host1x",
908         .id = -1,
909         .finalize_poweron = power_on_host,
910         .prepare_poweroff = power_off_host,
911         .clocks = {{"host1x", UINT_MAX}, {} },
912         NVHOST_MODULE_NO_POWERGATE_IDS,
913 };
914
915 static int __devinit nvhost_probe(struct platform_device *pdev)
916 {
917         struct nvhost_master *host;
918         struct resource *regs, *intr0, *intr1;
919         int i, err;
920
921         regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
922         intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
923         intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
924
925         if (!regs || !intr0 || !intr1) {
926                 dev_err(&pdev->dev, "missing required platform resources\n");
927                 return -ENXIO;
928         }
929
930         host = kzalloc(sizeof(*host), GFP_KERNEL);
931         if (!host)
932                 return -ENOMEM;
933
934         host->pdev = pdev;
935
936         host->nvmap = nvmap_create_client(nvmap_dev, "nvhost");
937         if (!host->nvmap) {
938                 dev_err(&pdev->dev, "unable to create nvmap client\n");
939                 err = -EIO;
940                 goto fail;
941         }
942
943         host->reg_mem = request_mem_region(regs->start,
944                                         resource_size(regs), pdev->name);
945         if (!host->reg_mem) {
946                 dev_err(&pdev->dev, "failed to get host register memory\n");
947                 err = -ENXIO;
948                 goto fail;
949         }
950         host->aperture = ioremap(regs->start, resource_size(regs));
951         if (!host->aperture) {
952                 dev_err(&pdev->dev, "failed to remap host registers\n");
953                 err = -ENXIO;
954                 goto fail;
955         }
956
957         err = nvhost_init_chip_support(host);
958         if (err) {
959                 dev_err(&pdev->dev, "failed to init chip support\n");
960                 goto fail;
961         }
962
963         /*  Register host1x device as bus master */
964         nvhost_device_register(&hostdev);
965         host->dev = &hostdev;
966         nvhost_bus_add_host(host);
967
968         /*  Give pointer to host1x via driver */
969         nvhost_set_drvdata(&hostdev, host);
970
971         BUG_ON(!host_channel_op(host).init);
972         for (i = 0; i < host->nb_channels; i++) {
973                 struct nvhost_channel *ch = &host->channels[i];
974                 err = nvhost_channel_init(ch, host, i);
975                 if (err)
976                         goto fail;
977         }
978
979         err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
980         if (err)
981                 goto fail;
982
983         err = nvhost_user_init(host);
984         if (err)
985                 goto fail;
986
987         pm_runtime_enable(&hostdev.dev);
988         err = nvhost_module_init(&hostdev);
989         if (err)
990                 goto fail;
991
992         for (i = 0; i < host->nb_channels; i++) {
993                 struct nvhost_channel *ch = &host->channels[i];
994                 pm_runtime_enable(&ch->dev->dev);
995                 nvhost_module_init(ch->dev);
996         }
997
998         platform_set_drvdata(pdev, host);
999
1000         for (i = 0; i < host->dev->num_clks; i++)
1001                 clk_enable(host->dev->clk[i]);
1002         nvhost_syncpt_reset(&host->syncpt);
1003         for (i = 0; i < host->dev->num_clks; i++)
1004                 clk_disable(host->dev->clk[i]);
1005
1006         nvhost_debug_init(host);
1007
1008         dev_info(&pdev->dev, "initialized\n");
1009         return 0;
1010
1011 fail:
1012         nvhost_remove_chip_support(host);
1013         if (host->nvmap)
1014                 nvmap_client_put(host->nvmap);
1015         kfree(host);
1016         return err;
1017 }
1018
1019 static int __exit nvhost_remove(struct platform_device *pdev)
1020 {
1021         struct nvhost_master *host = platform_get_drvdata(pdev);
1022         nvhost_remove_chip_support(host);
1023         return 0;
1024 }
1025
1026 static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
1027 {
1028         struct nvhost_master *host = platform_get_drvdata(pdev);
1029         int i, ret;
1030         dev_info(&pdev->dev, "suspending\n");
1031
1032         for (i = 0; i < host->nb_channels; i++) {
1033                 ret = nvhost_channel_suspend(&host->channels[i]);
1034                 if (ret)
1035                         return ret;
1036         }
1037
1038         ret = nvhost_module_suspend(host->dev, true);
1039         dev_info(&pdev->dev, "suspend status: %d\n", ret);
1040         return ret;
1041 }
1042
1043 static int nvhost_resume(struct platform_device *pdev)
1044 {
1045         int i;
1046         struct nvhost_master *host = platform_get_drvdata(pdev);
1047
1048         dev_info(&pdev->dev, "resuming\n");
1049
1050         for (i = 0; i < host->dev->num_clks; i++)
1051                 clk_enable(host->dev->clk[i]);
1052         if (host->dev->finalize_poweron)
1053                 host->dev->finalize_poweron(host->dev);
1054         for (i = 0; i < host->dev->num_clks; i++)
1055                 clk_disable(host->dev->clk[i]);
1056
1057         /* enable runtime pm for host1x */
1058         nvhost_module_resume(host->dev);
1059
1060         /* enable runtime pm for clients */
1061         for (i = 0; i < host->nb_channels; i++)
1062                 nvhost_module_resume(host->channels[i].dev);
1063
1064         return 0;
1065 }
1066
1067 static struct platform_driver nvhost_driver = {
1068         .remove = __exit_p(nvhost_remove),
1069         .suspend = nvhost_suspend,
1070         .resume = nvhost_resume,
1071         .driver = {
1072                 .owner = THIS_MODULE,
1073                 .name = DRIVER_NAME
1074         }
1075 };
1076
1077 static int __init nvhost_mod_init(void)
1078 {
1079         register_sets = tegra_gpu_register_sets();
1080         return platform_driver_probe(&nvhost_driver, nvhost_probe);
1081 }
1082
1083 static void __exit nvhost_mod_exit(void)
1084 {
1085         platform_driver_unregister(&nvhost_driver);
1086 }
1087
1088 module_init(nvhost_mod_init);
1089 module_exit(nvhost_mod_exit);
1090
1091 module_param_call(register_sets, NULL, param_get_uint, &register_sets, 0444);
1092 MODULE_PARM_DESC(register_sets, "Number of register sets");
1093
1094 MODULE_AUTHOR("NVIDIA");
1095 MODULE_DESCRIPTION("Graphics host driver for Tegra products");
1096 MODULE_VERSION("1.0");
1097 MODULE_LICENSE("Dual BSD/GPL");
1098 MODULE_ALIAS("platform-nvhost");