*
* Tegra Graphics Host Driver Entrypoint
*
- * Copyright (c) 2010, NVIDIA Corporation.
+ * Copyright (c) 2010-2011, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <linux/uaccess.h>
#include <linux/file.h>
#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/nvhost.h>
#include <asm/io.h>
#include <mach/nvhost.h>
#include <mach/nvmap.h>
+#include <mach/gpufuse.h>
#define DRIVER_NAME "tegra_grhost"
#define IFACE_NAME "nvhost"
static int nvhost_major = NVHOST_MAJOR;
-static int nvhost_minor = NVHOST_CHANNEL_BASE;
+static int nvhost_minor;
+static unsigned int register_sets;
struct nvhost_channel_userctx {
struct nvhost_channel *ch;
struct nvhost_hwctx *hwctx;
- u32 syncpt_id;
- u32 syncpt_incrs;
- u32 cmdbufs_pending;
- u32 relocs_pending;
- u32 null_kickoff;
+ struct nvhost_submit_hdr_ext hdr;
struct nvmap_handle_ref *gather_mem;
u32 *gathers;
u32 *cur_gather;
struct nvmap_pinarray_elem pinarray[NVHOST_MAX_HANDLES];
struct nvmap_handle *unpinarray[NVHOST_MAX_HANDLES];
struct nvmap_client *nvmap;
+ struct nvhost_waitchk waitchks[NVHOST_MAX_WAIT_CHECKS];
+ struct nvhost_waitchk *cur_waitchk;
};
struct nvhost_ctrl_userctx {
struct nvhost_master *dev;
- u32 mod_locks[NV_HOST1X_NB_MLOCKS];
+ u32 *mod_locks;
};
static int nvhost_channelrelease(struct inode *inode, struct file *filp)
{
struct nvhost_channel_userctx *priv = filp->private_data;
+ trace_nvhost_channel_release(priv->ch->desc->name);
+
filp->private_data = NULL;
nvhost_putchannel(priv->ch, priv->hwctx);
struct nvhost_channel_userctx *priv;
struct nvhost_channel *ch;
+
ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
ch = nvhost_getchannel(ch);
if (!ch)
return -ENOMEM;
+ trace_nvhost_channel_open(ch->desc->name);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ctx->cur_gather = cur_gather + 2;
}
+static int set_submit(struct nvhost_channel_userctx *ctx)
+{
+ /* submit should have at least 1 cmdbuf */
+ if (!ctx->hdr.num_cmdbufs)
+ return -EIO;
+
+ /* check submit doesn't exceed static structs */
+ if ((ctx->hdr.num_cmdbufs + ctx->hdr.num_relocs) > NVHOST_MAX_HANDLES) {
+ dev_err(&ctx->ch->dev->pdev->dev,
+ "channel submit exceeded max handles (%d > %d)\n",
+ ctx->hdr.num_cmdbufs + ctx->hdr.num_relocs,
+ NVHOST_MAX_HANDLES);
+ return -EIO;
+ }
+ if (ctx->hdr.num_waitchks > NVHOST_MAX_WAIT_CHECKS) {
+ dev_err(&ctx->ch->dev->pdev->dev,
+ "channel submit exceeded max waitchks (%d > %d)\n",
+ ctx->hdr.num_waitchks,
+ NVHOST_MAX_WAIT_CHECKS);
+ return -EIO;
+ }
+
+ ctx->cur_gather = ctx->gathers;
+ ctx->cur_waitchk = ctx->waitchks;
+ ctx->pinarray_size = 0;
+
+ return 0;
+}
+
static void reset_submit(struct nvhost_channel_userctx *ctx)
{
- ctx->cmdbufs_pending = 0;
- ctx->relocs_pending = 0;
+ ctx->hdr.num_cmdbufs = 0;
+ ctx->hdr.num_relocs = 0;
+ ctx->hdr.num_waitchks = 0;
}
static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
while (remaining) {
size_t consumed;
- if (!priv->relocs_pending && !priv->cmdbufs_pending) {
+ if (!priv->hdr.num_relocs &&
+ !priv->hdr.num_cmdbufs &&
+ !priv->hdr.num_waitchks) {
consumed = sizeof(struct nvhost_submit_hdr);
if (remaining < consumed)
break;
- if (copy_from_user(&priv->syncpt_id, buf, consumed)) {
+ if (copy_from_user(&priv->hdr, buf, consumed)) {
err = -EFAULT;
break;
}
- if (!priv->cmdbufs_pending) {
- err = -EFAULT;
+ priv->hdr.submit_version = NVHOST_SUBMIT_VERSION_V0;
+ err = set_submit(priv);
+ if (err)
break;
- }
- priv->cur_gather = priv->gathers;
- priv->pinarray_size = 0;
- } else if (priv->cmdbufs_pending) {
+ trace_nvhost_channel_write_submit(priv->ch->desc->name,
+ count, priv->hdr.num_cmdbufs, priv->hdr.num_relocs);
+ } else if (priv->hdr.num_cmdbufs) {
struct nvhost_cmdbuf cmdbuf;
consumed = sizeof(cmdbuf);
if (remaining < consumed)
err = -EFAULT;
break;
}
+ trace_nvhost_channel_write_cmdbuf(priv->ch->desc->name,
+ cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
add_gather(priv,
cmdbuf.mem, cmdbuf.words, cmdbuf.offset);
- priv->cmdbufs_pending--;
- } else if (priv->relocs_pending) {
+ priv->hdr.num_cmdbufs--;
+ } else if (priv->hdr.num_relocs) {
int numrelocs = remaining / sizeof(struct nvhost_reloc);
if (!numrelocs)
break;
- numrelocs = min_t(int, numrelocs, priv->relocs_pending);
+ numrelocs = min_t(int, numrelocs, priv->hdr.num_relocs);
consumed = numrelocs * sizeof(struct nvhost_reloc);
if (copy_from_user(&priv->pinarray[priv->pinarray_size],
buf, consumed)) {
err = -EFAULT;
break;
}
+ trace_nvhost_channel_write_relocs(priv->ch->desc->name,
+ numrelocs);
priv->pinarray_size += numrelocs;
- priv->relocs_pending -= numrelocs;
+ priv->hdr.num_relocs -= numrelocs;
+ } else if (priv->hdr.num_waitchks) {
+ int numwaitchks =
+ (remaining / sizeof(struct nvhost_waitchk));
+ if (!numwaitchks)
+ break;
+ numwaitchks = min_t(int,
+ numwaitchks, priv->hdr.num_waitchks);
+ consumed = numwaitchks * sizeof(struct nvhost_waitchk);
+ if (copy_from_user(priv->cur_waitchk, buf, consumed)) {
+ err = -EFAULT;
+ break;
+ }
+ trace_nvhost_channel_write_waitchks(
+ priv->ch->desc->name, numwaitchks,
+ priv->hdr.waitchk_mask);
+ priv->cur_waitchk += numwaitchks;
+ priv->hdr.num_waitchks -= numwaitchks;
} else {
err = -EFAULT;
break;
static int nvhost_ioctl_channel_flush(
struct nvhost_channel_userctx *ctx,
- struct nvhost_get_param_args *args)
+ struct nvhost_get_param_args *args,
+ int null_kickoff)
{
struct device *device = &ctx->ch->dev->pdev->dev;
int num_unpin;
int err;
- if (ctx->relocs_pending || ctx->cmdbufs_pending) {
+ trace_nvhost_ioctl_channel_flush(ctx->ch->desc->name);
+
+ if (ctx->hdr.num_relocs ||
+ ctx->hdr.num_cmdbufs ||
+ ctx->hdr.num_waitchks) {
reset_submit(ctx);
dev_err(device, "channel submit out of sync\n");
return -EFAULT;
return num_unpin;
}
+ if (nvhost_debug_null_kickoff_pid == current->tgid)
+ null_kickoff = 1;
+
/* context switch if needed, and submit user's gathers to the channel */
- err = nvhost_channel_submit(ctx->ch, ctx->hwctx, ctx->nvmap,
+ BUG_ON(!channel_op(ctx->ch).submit);
+ err = channel_op(ctx->ch).submit(ctx->ch, ctx->hwctx, ctx->nvmap,
ctx->gathers, ctx->cur_gather,
+ ctx->waitchks, ctx->cur_waitchk,
+ ctx->hdr.waitchk_mask,
ctx->unpinarray, num_unpin,
- ctx->syncpt_id, ctx->syncpt_incrs,
+ ctx->hdr.syncpt_id, ctx->hdr.syncpt_incrs,
&args->value,
- ctx->null_kickoff != 0);
+ null_kickoff);
if (err)
nvmap_unpin_handles(ctx->nvmap, ctx->unpinarray, num_unpin);
switch (cmd) {
case NVHOST_IOCTL_CHANNEL_FLUSH:
- err = nvhost_ioctl_channel_flush(priv, (void *)buf);
+ err = nvhost_ioctl_channel_flush(priv, (void *)buf, 0);
break;
+ case NVHOST_IOCTL_CHANNEL_NULL_KICKOFF:
+ err = nvhost_ioctl_channel_flush(priv, (void *)buf, 1);
+ break;
+ case NVHOST_IOCTL_CHANNEL_SUBMIT_EXT:
+ {
+ struct nvhost_submit_hdr_ext *hdr;
+
+ if (priv->hdr.num_relocs ||
+ priv->hdr.num_cmdbufs ||
+ priv->hdr.num_waitchks) {
+ reset_submit(priv);
+ dev_err(&priv->ch->dev->pdev->dev,
+ "channel submit out of sync\n");
+ err = -EIO;
+ break;
+ }
+
+ hdr = (struct nvhost_submit_hdr_ext *)buf;
+ if (hdr->submit_version > NVHOST_SUBMIT_VERSION_MAX_SUPPORTED) {
+ dev_err(&priv->ch->dev->pdev->dev,
+ "submit version %d > max supported %d\n",
+ hdr->submit_version,
+ NVHOST_SUBMIT_VERSION_MAX_SUPPORTED);
+ err = -EINVAL;
+ break;
+ }
+ memcpy(&priv->hdr, hdr, sizeof(struct nvhost_submit_hdr_ext));
+ err = set_submit(priv);
+ trace_nvhost_ioctl_channel_submit(priv->ch->desc->name,
+ priv->hdr.submit_version,
+ priv->hdr.num_cmdbufs, priv->hdr.num_relocs,
+ priv->hdr.num_waitchks);
+ break;
+ }
case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
+ /* host syncpt ID is used by the RM (and never be given out) */
+ BUG_ON(priv->ch->desc->syncpts & (1 << NVSYNCPT_GRAPHICS_HOST));
((struct nvhost_get_param_args *)buf)->value =
priv->ch->desc->syncpts;
break;
struct nvhost_ctrl_userctx *priv = filp->private_data;
int i;
+ trace_nvhost_ctrlrelease(priv->dev->mod.name);
+
filp->private_data = NULL;
if (priv->mod_locks[0])
nvhost_module_idle(&priv->dev->mod);
- for (i = 1; i < NV_HOST1X_NB_MLOCKS; i++)
+ for (i = 1; i < priv->dev->nb_mlocks; i++)
if (priv->mod_locks[i])
nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
+ kfree(priv->mod_locks);
kfree(priv);
return 0;
}
{
struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
struct nvhost_ctrl_userctx *priv;
+ u32 *mod_locks;
+
+ trace_nvhost_ctrlopen(host->mod.name);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
+ mod_locks = kzalloc(sizeof(u32)*host->nb_mlocks, GFP_KERNEL);
+
+ if (!(priv && mod_locks)) {
+ kfree(priv);
+ kfree(mod_locks);
return -ENOMEM;
+ }
priv->dev = host;
+ priv->mod_locks = mod_locks;
filp->private_data = priv;
return 0;
}
struct nvhost_ctrl_userctx *ctx,
struct nvhost_ctrl_syncpt_read_args *args)
{
- if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ if (args->id >= ctx->dev->syncpt.nb_pts)
return -EINVAL;
+ trace_nvhost_ioctl_ctrl_syncpt_read(args->id);
args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
return 0;
}
struct nvhost_ctrl_userctx *ctx,
struct nvhost_ctrl_syncpt_incr_args *args)
{
- if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ if (args->id >= ctx->dev->syncpt.nb_pts)
return -EINVAL;
+ trace_nvhost_ioctl_ctrl_syncpt_incr(args->id);
nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
return 0;
}
-static int nvhost_ioctl_ctrl_syncpt_wait(
+static int nvhost_ioctl_ctrl_syncpt_waitex(
struct nvhost_ctrl_userctx *ctx,
- struct nvhost_ctrl_syncpt_wait_args *args)
+ struct nvhost_ctrl_syncpt_waitex_args *args)
{
u32 timeout;
- if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
+ if (args->id >= ctx->dev->syncpt.nb_pts)
return -EINVAL;
if (args->timeout == NVHOST_NO_TIMEOUT)
timeout = MAX_SCHEDULE_TIMEOUT;
else
timeout = (u32)msecs_to_jiffies(args->timeout);
+ trace_nvhost_ioctl_ctrl_syncpt_wait(args->id, args->thresh,
+ args->timeout);
return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
- args->thresh, timeout);
+ args->thresh, timeout, &args->value);
}
static int nvhost_ioctl_ctrl_module_mutex(
struct nvhost_ctrl_module_mutex_args *args)
{
int err = 0;
- if (args->id >= NV_HOST1X_NB_MLOCKS ||
+ if (args->id >= ctx->dev->nb_mlocks ||
args->lock > 1)
return -EINVAL;
+ trace_nvhost_ioctl_ctrl_module_mutex(args->lock, args->id);
if (args->lock && !ctx->mod_locks[args->id]) {
if (args->id == 0)
nvhost_module_busy(&ctx->dev->mod);
void *values = args->values;
u32 vals[64];
- if (!nvhost_access_module_regs(&ctx->dev->cpuaccess, args->id) ||
+ if (!(args->id < ctx->dev->nb_modules) ||
(num_offsets == 0))
return -EINVAL;
err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
break;
case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
- err = nvhost_ioctl_ctrl_syncpt_wait(priv, (void *)buf);
+ err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
break;
case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
break;
+ case NVHOST_IOCTL_CTRL_SYNCPT_WAITEX:
+ err = nvhost_ioctl_ctrl_syncpt_waitex(priv, (void *)buf);
+ break;
default:
err = -ENOTTY;
break;
*/
} else if (action == NVHOST_POWER_ACTION_OFF) {
int i;
- for (i = 0; i < NVHOST_NUMCHANNELS; i++)
+ for (i = 0; i < dev->nb_channels; i++)
nvhost_channel_suspend(&dev->channels[i]);
nvhost_syncpt_save(&dev->syncpt);
nvhost_intr_stop(&dev->intr);
if (nvhost_major) {
devno = MKDEV(nvhost_major, nvhost_minor);
- err = register_chrdev_region(devno, NVHOST_NUMCHANNELS + 1, IFACE_NAME);
+ err = register_chrdev_region(devno, host->nb_channels + 1,
+ IFACE_NAME);
} else {
err = alloc_chrdev_region(&devno, nvhost_minor,
- NVHOST_NUMCHANNELS + 1, IFACE_NAME);
+ host->nb_channels + 1, IFACE_NAME);
nvhost_major = MAJOR(devno);
}
if (err < 0) {
goto fail;
}
- for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ for (i = 0; i < host->nb_channels; i++) {
struct nvhost_channel *ch = &host->channels[i];
- if (!strcmp(ch->desc->name, "display") &&
- !nvhost_access_module_regs(&host->cpuaccess,
- NVHOST_MODULE_DISPLAY_A))
- continue;
-
cdev_init(&ch->cdev, &nvhost_channelops);
ch->cdev.owner = THIS_MODULE;
cdev_init(&host->cdev, &nvhost_ctrlops);
host->cdev.owner = THIS_MODULE;
- devno = MKDEV(nvhost_major, nvhost_minor + NVHOST_NUMCHANNELS);
+ devno = MKDEV(nvhost_major, nvhost_minor + host->nb_channels);
err = cdev_add(&host->cdev, devno, 1);
if (err < 0)
goto fail;
return err;
}
+static void nvhost_remove_chip_support(struct nvhost_master *host)
+{
+
+ kfree(host->channels);
+ host->channels = 0;
+
+ kfree(host->syncpt.min_val);
+ host->syncpt.min_val = 0;
+
+ kfree(host->syncpt.max_val);
+ host->syncpt.max_val = 0;
+
+ kfree(host->syncpt.base_val);
+ host->syncpt.base_val = 0;
+
+ kfree(host->intr.syncpt);
+ host->intr.syncpt = 0;
+
+ kfree(host->cpuaccess.regs);
+ host->cpuaccess.regs = 0;
+
+ kfree(host->cpuaccess.reg_mem);
+ host->cpuaccess.reg_mem = 0;
+
+ kfree(host->cpuaccess.lock_counts);
+ host->cpuaccess.lock_counts = 0;
+}
+
+static int __devinit nvhost_init_chip_support(struct nvhost_master *host)
+{
+ int err;
+ err = tegra_get_chip_info(&host->chip_info);
+ if (err)
+ return err;
+
+ switch (host->chip_info.arch) {
+ case TEGRA_SOC_CHIP_ARCH_T20:
+ err = nvhost_init_t20_support(host);
+ break;
+
+ case TEGRA_SOC_CHIP_ARCH_T30:
+ err = nvhost_init_t30_support(host);
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (err)
+ return err;
+
+ /* allocate items sized in chip specific support init */
+ host->channels = kzalloc(sizeof(struct nvhost_channel) *
+ host->nb_channels, GFP_KERNEL);
+
+ host->syncpt.min_val = kzalloc(sizeof(atomic_t) *
+ host->syncpt.nb_pts, GFP_KERNEL);
+
+ host->syncpt.max_val = kzalloc(sizeof(atomic_t) *
+ host->syncpt.nb_pts, GFP_KERNEL);
+
+ host->syncpt.base_val = kzalloc(sizeof(u32) *
+ host->syncpt.nb_bases, GFP_KERNEL);
+
+ host->intr.syncpt = kzalloc(sizeof(struct nvhost_intr_syncpt) *
+ host->syncpt.nb_pts, GFP_KERNEL);
+
+ host->cpuaccess.reg_mem = kzalloc(sizeof(struct resource *) *
+ host->nb_modules, GFP_KERNEL);
+
+ host->cpuaccess.regs = kzalloc(sizeof(void __iomem *) *
+ host->nb_modules, GFP_KERNEL);
+
+ host->cpuaccess.lock_counts = kzalloc(sizeof(atomic_t) *
+ host->nb_mlocks, GFP_KERNEL);
+
+ if (!(host->channels && host->syncpt.min_val &&
+ host->syncpt.max_val && host->syncpt.base_val &&
+ host->intr.syncpt && host->cpuaccess.reg_mem &&
+ host->cpuaccess.regs && host->cpuaccess.lock_counts)) {
+ /* frees happen in the support removal phase */
+ return -ENOMEM;
+ }
+
+ return 0;
+}
static int __devinit nvhost_probe(struct platform_device *pdev)
{
struct nvhost_master *host;
err = -ENXIO;
goto fail;
}
- host->sync_aperture = host->aperture +
- (NV_HOST1X_CHANNEL0_BASE +
- HOST1X_CHANNEL_SYNC_REG_BASE);
- for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
+ err = nvhost_init_chip_support(host);
+ if (err) {
+ dev_err(&pdev->dev, "failed to init chip support\n");
+ goto fail;
+ }
+
+ for (i = 0; i < host->nb_channels; i++) {
struct nvhost_channel *ch = &host->channels[i];
- err = nvhost_channel_init(ch, host, i);
+ BUG_ON(!host_channel_op(host).init);
+ err = host_channel_op(host).init(ch, host, i);
if (err < 0) {
dev_err(&pdev->dev, "failed to init channel %d\n", i);
goto fail;
}
}
+
err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
- if (err) goto fail;
+ if (err)
+ goto fail;
+
err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
- if (err) goto fail;
+ if (err)
+ goto fail;
+
err = nvhost_user_init(host);
- if (err) goto fail;
+ if (err)
+ goto fail;
+
err = nvhost_module_init(&host->mod, "host1x", power_host, NULL, &pdev->dev);
- if (err) goto fail;
+ if (err)
+ goto fail;
+
platform_set_drvdata(pdev, host);
return 0;
fail:
+ nvhost_remove_chip_support(host);
if (host->nvmap)
nvmap_client_put(host->nvmap);
/* TODO: [ahatala 2010-05-04] */
static int __exit nvhost_remove(struct platform_device *pdev)
{
+ struct nvhost_master *host = platform_get_drvdata(pdev);
+ nvhost_remove_chip_support(host);
+ /*kfree(host);?*/
return 0;
}
static int __init nvhost_mod_init(void)
{
+ register_sets = tegra_gpu_register_sets();
return platform_driver_probe(&nvhost_driver, nvhost_probe);
}
module_init(nvhost_mod_init);
module_exit(nvhost_mod_exit);
+module_param_call(register_sets, NULL, param_get_uint, ®ister_sets, 0444);
+MODULE_PARM_DESC(register_sets, "Number of register sets");
+
MODULE_AUTHOR("NVIDIA");
MODULE_DESCRIPTION("Graphics host driver for Tegra products");
MODULE_VERSION("1.0");