video: tegra: host: Tegra12 updates to host
Mark Stadler [Wed, 1 Aug 2012 20:40:26 +0000 (13:40 -0700)]
Change-Id: I341c55571b8f0f60b7a4bfae374c8c4c771fd27b
Signed-off-by: Mark Stadler <mastadler@nvidia.com>

23 files changed:
drivers/video/tegra/host/Makefile
drivers/video/tegra/host/bus.c [new file with mode: 0644]
drivers/video/tegra/host/bus_client.c
drivers/video/tegra/host/bus_client.h
drivers/video/tegra/host/chip_support.c
drivers/video/tegra/host/chip_support.h
drivers/video/tegra/host/class_ids.h
drivers/video/tegra/host/debug.c
drivers/video/tegra/host/dev.c
drivers/video/tegra/host/dev.h
drivers/video/tegra/host/nvhost_acm.c
drivers/video/tegra/host/nvhost_allocator.c [new file with mode: 0644]
drivers/video/tegra/host/nvhost_allocator.h [new file with mode: 0644]
drivers/video/tegra/host/nvhost_as.c [new file with mode: 0644]
drivers/video/tegra/host/nvhost_as.h [new file with mode: 0644]
drivers/video/tegra/host/nvhost_channel.h
drivers/video/tegra/host/nvhost_hwctx.h
drivers/video/tegra/host/nvhost_memmgr.c
drivers/video/tegra/host/nvhost_syncpt.h
drivers/video/tegra/host/nvmap.c
include/linux/nvhost.h
include/linux/nvhost_as_ioctl.h [new file with mode: 0644]
include/linux/nvhost_ioctl.h

index f5b79dc..2ec7a96 100644 (file)
@@ -1,8 +1,29 @@
 GCOV_PROFILE := y
 EXTRA_CFLAGS += -Idrivers/video/tegra/host -Idrivers/devfreq
 
+#
+# Arrange for code sharing among configurations.
+#
+config_present = $(if $(findstring y,$(1)),y,)
+
+all_configs = $(call config_present,$(CONFIG_ARCH_TEGRA_2x_SOC) \
+ $(CONFIG_ARCH_TEGRA_3x_SOC) $(CONFIG_ARCH_TEGRA_11x_SOC) \
+ $(CONFIG_ARCH_TEGRA_12x_SOC) $(CONFIG_ARCH_TEGRA_14x_SOC))
+
+t3x_or_higher_config = $(call config_present,$(CONFIG_ARCH_TEGRA_3x_SOC)\
+  $(t11x_or_higher_config))
+
+t11x_or_higher_config = $(call config_present,$(CONFIG_ARCH_TEGRA_11x_SOC)\
+  $(t14x_or_higher_config))
+
+t14x_or_higher_config = $(call config_present,$(CONFIG_ARCH_TEGRA_14x_SOC)\
+  $(t12x_or_higher_config))
+
+t12x_or_higher_config = $(call config_present,$(CONFIG_ARCH_TEGRA_12x_SOC))
+
 nvhost-objs = \
        nvhost_acm.o \
+       nvhost_as.o \
        nvhost_syncpt.o \
        nvhost_cdma.o \
        nvhost_intr.o \
@@ -16,21 +37,23 @@ nvhost-objs = \
        nvhost_scale.o \
        user_hwctx.o
 
-obj-$(CONFIG_TEGRA_GRHOST) += mpe/
-obj-$(CONFIG_TEGRA_GRHOST) += gr3d/
-obj-$(CONFIG_TEGRA_GRHOST) += host1x/
-obj-$(CONFIG_TEGRA_GRHOST) += t20/
-obj-$(CONFIG_TEGRA_GRHOST) += t30/
-obj-$(CONFIG_TEGRA_GRHOST) += gr2d/
-obj-$(CONFIG_TEGRA_GRHOST) += isp/
-ifneq ($(CONFIG_VIDEO_TEGRA),y)
-obj-$(CONFIG_TEGRA_GRHOST) += vi/
-endif
-obj-$(CONFIG_TEGRA_GRHOST) += t114/
-obj-$(CONFIG_TEGRA_GRHOST) += t148/
-obj-$(CONFIG_TEGRA_GRHOST) += msenc/
-obj-$(CONFIG_TEGRA_GRHOST) += tsec/
-obj-$(CONFIG_TEGRA_GRHOST) += nvhost.o
+obj-$(all_configs) += nvhost.o
+obj-$(all_configs) += host1x/
+obj-$(all_configs) += t20/
+obj-$(all_configs) += mpe/
+obj-$(all_configs) += gr3d/
+obj-$(t3x_or_higher_config) += t30/
+obj-$(t11x_or_higher_config) += t114/
+obj-$(t14x_or_higher_config) += t148/
+obj-$(t11x_or_higher_config) += msenc/
+obj-$(t11x_or_higher_config) += tsec/
+obj-$(t11x_or_higher_config) += gr2d/
+obj-$(t11x_or_higher_config) += isp/
+obj-$(t11x_or_higher_config) += vi/
+obj-$(t12x_or_higher_config) += t124/
+obj-$(t12x_or_higher_config) += gk20a/
+obj-$(t12x_or_higher_config) += vic03/
+obj-$(t12x_or_higher_config) += nvhost_allocator.o
 
 obj-$(CONFIG_TEGRA_GRHOST_USE_NVMAP) += nvmap.o
 obj-$(CONFIG_TEGRA_GRHOST_USE_DMABUF) += dmabuf.o
diff --git a/drivers/video/tegra/host/bus.c b/drivers/video/tegra/host/bus.c
new file mode 100644 (file)
index 0000000..d75cdc8
--- /dev/null
@@ -0,0 +1,643 @@
+/*
+ * drivers/video/tegra/host/bus.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Erik Gilling <konkers@google.com>
+ *
+ * Copyright (C) 2010-2012 NVIDIA Corporation
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/export.h>
+#include <linux/nvhost.h>
+#include <linux/io.h>
+
+#include "bus.h"
+#include "dev.h"
+
+struct nvhost_bus *nvhost_bus_inst;
+struct nvhost_master *nvhost;
+
+struct resource *nvhost_get_resource(struct nvhost_device *dev,
+                                      unsigned int type, unsigned int num)
+{
+       int i;
+
+       for (i = 0; i < dev->num_resources; i++) {
+               struct resource *r = &dev->resource[i];
+
+               if (type == resource_type(r) && num-- == 0)
+                       return r;
+       }
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource);
+
+int nvhost_get_irq(struct nvhost_device *dev, unsigned int num)
+{
+       struct resource *r = nvhost_get_resource(dev, IORESOURCE_IRQ, num);
+
+       return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq);
+
+struct resource *nvhost_get_resource_byname(struct nvhost_device *dev,
+                                             unsigned int type,
+                                             const char *name)
+{
+       int i;
+
+       for (i = 0; i < dev->num_resources; i++) {
+               struct resource *r = &dev->resource[i];
+
+               if (type == resource_type(r) && !strcmp(r->name, name))
+                       return r;
+       }
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_resource_byname);
+
+int nvhost_get_irq_byname(struct nvhost_device *dev, const char *name)
+{
+       struct resource *r = nvhost_get_resource_byname(dev, IORESOURCE_IRQ,
+                                                         name);
+
+       return r ? r->start : -ENXIO;
+}
+EXPORT_SYMBOL_GPL(nvhost_get_irq_byname);
+
+static struct nvhost_device_id *nvhost_bus_match_id(struct nvhost_device *dev,
+       struct nvhost_device_id *id_table)
+{
+       while (id_table->name[0]) {
+               if (strcmp(dev->name, id_table->name) == 0
+                               && dev->version == id_table->version)
+                       return id_table;
+               id_table++;
+       }
+       return NULL;
+}
+
+static int nvhost_bus_match(struct device *_dev, struct device_driver *drv)
+{
+       struct nvhost_device *dev = to_nvhost_device(_dev);
+       struct nvhost_driver *ndrv = to_nvhost_driver(drv);
+
+       /* check if driver support multiple devices through id_table */
+       if (ndrv->id_table)
+               return nvhost_bus_match_id(dev, ndrv->id_table) != NULL;
+       else /* driver does not support id_table */
+               return !strcmp(dev->name, drv->name);
+}
+
+static int nvhost_drv_probe(struct device *_dev)
+{
+       struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+       struct nvhost_device *dev = to_nvhost_device(_dev);
+
+       if (drv && drv->probe) {
+               if (drv->id_table)
+                       return drv->probe(dev, nvhost_bus_match_id(dev, drv->id_table));
+               else
+                       return drv->probe(dev, NULL);
+       }
+       else
+               return -ENODEV;
+}
+
+static int nvhost_drv_remove(struct device *_dev)
+{
+       struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+       struct nvhost_device *dev = to_nvhost_device(_dev);
+
+       return drv->remove(dev);
+}
+
+static void nvhost_drv_shutdown(struct device *_dev)
+{
+       struct nvhost_driver *drv = to_nvhost_driver(_dev->driver);
+       struct nvhost_device *dev = to_nvhost_device(_dev);
+
+       drv->shutdown(dev);
+}
+
+int nvhost_driver_register(struct nvhost_driver *drv)
+{
+       drv->driver.bus = &nvhost_bus_inst->nvhost_bus_type;
+       if (drv->probe)
+               drv->driver.probe = nvhost_drv_probe;
+       if (drv->remove)
+               drv->driver.remove = nvhost_drv_remove;
+       if (drv->shutdown)
+               drv->driver.shutdown = nvhost_drv_shutdown;
+
+       return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL(nvhost_driver_register);
+
+void nvhost_driver_unregister(struct nvhost_driver *drv)
+{
+       driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(nvhost_driver_unregister);
+
+int nvhost_add_devices(struct nvhost_device **devs, int num)
+{
+       int i, ret = 0;
+
+       for (i = 0; i < num; i++) {
+               ret = nvhost_device_register(devs[i]);
+               if (ret) {
+                       while (--i >= 0)
+                               nvhost_device_unregister(devs[i]);
+                       break;
+               }
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(nvhost_add_devices);
+
+int nvhost_device_register(struct nvhost_device *dev)
+{
+       int i, ret = 0;
+
+       if (!dev)
+               return -EINVAL;
+
+       device_initialize(&dev->dev);
+
+       /*  If the dev does not have a parent, assign host1x as parent */
+       if (!dev->dev.parent && nvhost && nvhost->dev != dev)
+               dev->dev.parent = &nvhost->dev->dev;
+
+       dev->dev.bus = &nvhost_bus_inst->nvhost_bus_type;
+
+       if (dev->id != -1)
+               dev_set_name(&dev->dev, "%s.%d", dev->name,  dev->id);
+       else
+               dev_set_name(&dev->dev, "%s", dev->name);
+
+       for (i = 0; i < dev->num_resources; i++) {
+               struct resource *p, *r = &dev->resource[i];
+
+               if (r->name == NULL)
+                       r->name = dev_name(&dev->dev);
+
+               p = r->parent;
+               if (!p) {
+                       if (resource_type(r) == IORESOURCE_MEM)
+                               p = &iomem_resource;
+                       else if (resource_type(r) == IORESOURCE_IO)
+                               p = &ioport_resource;
+               }
+
+               if (p && insert_resource(p, r)) {
+                       pr_err("%s: failed to claim resource %d\n",
+                              dev_name(&dev->dev), i);
+                       ret = -EBUSY;
+                       goto failed;
+               }
+       }
+
+       ret = device_add(&dev->dev);
+       if (ret == 0)
+               return ret;
+
+failed:
+       while (--i >= 0) {
+               struct resource *r = &dev->resource[i];
+               unsigned long type = resource_type(r);
+
+               if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+                       release_resource(r);
+       }
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(nvhost_device_register);
+
+void nvhost_device_unregister(struct nvhost_device *dev)
+{
+       int i;
+       if (dev) {
+               device_del(&dev->dev);
+
+               for (i = 0; i < dev->num_resources; i++) {
+                       struct resource *r = &dev->resource[i];
+                       unsigned long type = resource_type(r);
+
+                       if (type == IORESOURCE_MEM || type == IORESOURCE_IO)
+                               release_resource(r);
+               }
+
+               put_device(&dev->dev);
+       }
+}
+EXPORT_SYMBOL_GPL(nvhost_device_unregister);
+
+void nvhost_device_writel(struct nvhost_device *dev, u32 r, u32 v)
+{
+       writel(v, dev->aperture[0] + r);
+}
+EXPORT_SYMBOL_GPL(nvhost_device_writel);
+
+u32 nvhost_device_readl(struct nvhost_device *dev, u32 r)
+{
+       return readl(dev->aperture[0] + r);
+}
+EXPORT_SYMBOL_GPL(nvhost_device_readl);
+
+#ifdef CONFIG_PM_SLEEP
+
+static int nvhost_legacy_suspend(struct device *dev, pm_message_t mesg)
+{
+       struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+       struct nvhost_device *pdev = to_nvhost_device(dev);
+       int ret = 0;
+
+       if (dev->driver && pdrv->suspend)
+               ret = pdrv->suspend(pdev, mesg);
+
+       return ret;
+}
+
+static int nvhost_legacy_resume(struct device *dev)
+{
+       struct nvhost_driver *pdrv = to_nvhost_driver(dev->driver);
+       struct nvhost_device *pdev = to_nvhost_device(dev);
+       int ret = 0;
+
+       if (dev->driver && pdrv->resume)
+               ret = pdrv->resume(pdev);
+
+       return ret;
+}
+
+static int nvhost_pm_prepare(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (drv && drv->pm && drv->pm->prepare)
+               ret = drv->pm->prepare(dev);
+
+       return ret;
+}
+
+static void nvhost_pm_complete(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+
+       if (drv && drv->pm && drv->pm->complete)
+               drv->pm->complete(dev);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define nvhost_pm_prepare              NULL
+#define nvhost_pm_complete             NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_SUSPEND
+
+int __weak nvhost_pm_suspend(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->suspend)
+                       ret = drv->pm->suspend(dev);
+       } else {
+               ret = nvhost_legacy_suspend(dev, PMSG_SUSPEND);
+       }
+
+       return ret;
+}
+
+int __weak nvhost_pm_suspend_noirq(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->suspend_noirq)
+                       ret = drv->pm->suspend_noirq(dev);
+       }
+
+       return ret;
+}
+
+int __weak nvhost_pm_resume(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->resume)
+                       ret = drv->pm->resume(dev);
+       } else {
+               ret = nvhost_legacy_resume(dev);
+       }
+
+       return ret;
+}
+
+int __weak nvhost_pm_resume_noirq(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->resume_noirq)
+                       ret = drv->pm->resume_noirq(dev);
+       }
+
+       return ret;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define nvhost_pm_suspend              NULL
+#define nvhost_pm_resume               NULL
+#define nvhost_pm_suspend_noirq        NULL
+#define nvhost_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int nvhost_pm_freeze(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->freeze)
+                       ret = drv->pm->freeze(dev);
+       } else {
+               ret = nvhost_legacy_suspend(dev, PMSG_FREEZE);
+       }
+
+       return ret;
+}
+
+static int nvhost_pm_freeze_noirq(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->freeze_noirq)
+                       ret = drv->pm->freeze_noirq(dev);
+       }
+
+       return ret;
+}
+
+static int nvhost_pm_thaw(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->thaw)
+                       ret = drv->pm->thaw(dev);
+       } else {
+               ret = nvhost_legacy_resume(dev);
+       }
+
+       return ret;
+}
+
+static int nvhost_pm_thaw_noirq(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->thaw_noirq)
+                       ret = drv->pm->thaw_noirq(dev);
+       }
+
+       return ret;
+}
+
+static int nvhost_pm_poweroff(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->poweroff)
+                       ret = drv->pm->poweroff(dev);
+       } else {
+               ret = nvhost_legacy_suspend(dev, PMSG_HIBERNATE);
+       }
+
+       return ret;
+}
+
+static int nvhost_pm_poweroff_noirq(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->poweroff_noirq)
+                       ret = drv->pm->poweroff_noirq(dev);
+       }
+
+       return ret;
+}
+
+static int nvhost_pm_restore(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->restore)
+                       ret = drv->pm->restore(dev);
+       } else {
+               ret = nvhost_legacy_resume(dev);
+       }
+
+       return ret;
+}
+
+static int nvhost_pm_restore_noirq(struct device *dev)
+{
+       struct device_driver *drv = dev->driver;
+       int ret = 0;
+
+       if (!drv)
+               return 0;
+
+       if (drv->pm) {
+               if (drv->pm->restore_noirq)
+                       ret = drv->pm->restore_noirq(dev);
+       }
+
+       return ret;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define nvhost_pm_freeze               NULL
+#define nvhost_pm_thaw         NULL
+#define nvhost_pm_poweroff             NULL
+#define nvhost_pm_restore              NULL
+#define nvhost_pm_freeze_noirq NULL
+#define nvhost_pm_thaw_noirq           NULL
+#define nvhost_pm_poweroff_noirq       NULL
+#define nvhost_pm_restore_noirq        NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+#ifdef CONFIG_PM_RUNTIME
+
+int __weak nvhost_pm_runtime_suspend(struct device *dev)
+{
+       return pm_generic_runtime_suspend(dev);
+};
+
+int __weak nvhost_pm_runtime_resume(struct device *dev)
+{
+       return pm_generic_runtime_resume(dev);
+};
+
+int __weak nvhost_pm_runtime_idle(struct device *dev)
+{
+       return pm_generic_runtime_idle(dev);
+};
+
+#else /* !CONFIG_PM_RUNTIME */
+
+#define nvhost_pm_runtime_suspend NULL
+#define nvhost_pm_runtime_resume NULL
+#define nvhost_pm_runtime_idle NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
+static const struct dev_pm_ops nvhost_dev_pm_ops = {
+       .prepare = nvhost_pm_prepare,
+       .complete = nvhost_pm_complete,
+       .suspend = nvhost_pm_suspend,
+       .resume = nvhost_pm_resume,
+       .freeze = nvhost_pm_freeze,
+       .thaw = nvhost_pm_thaw,
+       .poweroff = nvhost_pm_poweroff,
+       .restore = nvhost_pm_restore,
+       .suspend_noirq = nvhost_pm_suspend_noirq,
+       .resume_noirq = nvhost_pm_resume_noirq,
+       .freeze_noirq = nvhost_pm_freeze_noirq,
+       .thaw_noirq = nvhost_pm_thaw_noirq,
+       .poweroff_noirq = nvhost_pm_poweroff_noirq,
+       .restore_noirq = nvhost_pm_restore_noirq,
+       .runtime_suspend = nvhost_pm_runtime_suspend,
+       .runtime_resume = nvhost_pm_runtime_resume,
+       .runtime_idle = nvhost_pm_runtime_idle,
+};
+
+static int set_parent(struct device *dev, void *data)
+{
+       struct nvhost_device *ndev = to_nvhost_device(dev);
+       struct nvhost_master *host = data;
+       if (!dev->parent && ndev != host->dev)
+               dev->parent = &host->dev->dev;
+       return 0;
+}
+
+int nvhost_bus_add_host(struct nvhost_master *host)
+{
+       nvhost = host;
+
+       /*  Assign host1x as parent to all devices in nvhost bus */
+       bus_for_each_dev(&nvhost_bus_inst->nvhost_bus_type, NULL, host, set_parent);
+
+       return 0;
+}
+
+struct nvhost_bus *nvhost_bus_get(void)
+{
+       return nvhost_bus_inst;
+}
+
+int nvhost_bus_init(void)
+{
+       int err;
+       struct nvhost_chip_support *chip_ops;
+
+       pr_info("host1x bus init\n");
+
+       nvhost_bus_inst = kzalloc(sizeof(*nvhost_bus_inst), GFP_KERNEL);
+       if (nvhost_bus_inst == NULL) {
+               pr_err("%s: Cannot allocate nvhost_bus\n", __func__);
+               return -ENOMEM;
+       }
+
+       chip_ops = kzalloc(sizeof(*chip_ops), GFP_KERNEL);
+       if (chip_ops == NULL) {
+               pr_err("%s: Cannot allocate nvhost_chip_support\n", __func__);
+               kfree(nvhost_bus_inst);
+               nvhost_bus_inst = NULL;
+               return -ENOMEM;
+       }
+
+       nvhost_bus_inst->nvhost_bus_type.name = "nvhost";
+       nvhost_bus_inst->nvhost_bus_type.match = nvhost_bus_match;
+       nvhost_bus_inst->nvhost_bus_type.pm = &nvhost_dev_pm_ops;
+       nvhost_bus_inst->nvhost_chip_ops = chip_ops;
+
+       err = bus_register(&nvhost_bus_inst->nvhost_bus_type);
+
+       return err;
+}
+postcore_initcall(nvhost_bus_init);
index 48339b7..c35be4f 100644 (file)
@@ -44,6 +44,8 @@
 #include "debug.h"
 #include "bus_client.h"
 #include "dev.h"
+#include "class_ids.h"
+#include "nvhost_as.h"
 #include "nvhost_memmgr.h"
 #include "chip_support.h"
 #include "nvhost_acm.h"
@@ -177,7 +179,9 @@ static int nvhost_channelopen(struct inode *inode, struct file *filp)
                goto fail;
 
        if (ch->ctxhandler && ch->ctxhandler->alloc) {
+               nvhost_module_busy(ch->dev);
                priv->hwctx = ch->ctxhandler->alloc(ch->ctxhandler, ch);
+               nvhost_module_idle(ch->dev);
                if (!priv->hwctx)
                        goto fail;
        }
@@ -419,6 +423,152 @@ fail:
        return err;
 }
 
+static int nvhost_ioctl_channel_alloc_obj_ctx(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_alloc_obj_ctx_args *args)
+{
+       int ret;
+
+       BUG_ON(!channel_op().alloc_obj);
+       ret = channel_op().alloc_obj(ctx->hwctx, args);
+       return ret;
+}
+
+static int nvhost_ioctl_channel_free_obj_ctx(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_free_obj_ctx_args *args)
+{
+       int ret;
+
+       BUG_ON(!channel_op().free_obj);
+       ret = channel_op().free_obj(ctx->hwctx, args);
+       return ret;
+}
+
+static int nvhost_ioctl_channel_alloc_gpfifo(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_alloc_gpfifo_args *args)
+{
+       int ret;
+
+       BUG_ON(!channel_op().alloc_gpfifo);
+       ret = channel_op().alloc_gpfifo(ctx->hwctx, args);
+       return ret;
+}
+
+static int nvhost_ioctl_channel_submit_gpfifo(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_submit_gpfifo_args *args)
+{
+       struct nvhost_gpfifo *gpfifo;
+       u32 size = args->num_entries * sizeof(struct nvhost_gpfifo);
+       int ret = 0;
+
+       gpfifo = kzalloc(size, GFP_KERNEL);
+       if (IS_ERR_OR_NULL(gpfifo))
+               return -ENOMEM;
+
+       if (copy_from_user(gpfifo, (void __user *)args->gpfifo, size)) {
+               ret = -EINVAL;
+               goto clean_up;
+       }
+
+       BUG_ON(!channel_op().submit_gpfifo);
+       ret = channel_op().submit_gpfifo(ctx->hwctx, gpfifo,
+                       args->num_entries, &args->fence, args->flags);
+clean_up:
+       kfree(gpfifo);
+       return ret;
+}
+
+static int nvhost_ioctl_channel_map_buffer(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_map_buffer_args *map_buffer_args)
+{
+       int ret = 0;
+
+       BUG_ON(!channel_op().map_buffer);
+       ret = channel_op().map_buffer(ctx->hwctx, map_buffer_args);
+       return ret;
+}
+
+static int nvhost_ioctl_channel_unmap_buffer(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_unmap_buffer_args *args)
+{
+       int ret;
+
+       BUG_ON(!channel_op().unmap_buffer);
+       ret = channel_op().unmap_buffer(ctx->hwctx, args);
+       return ret;
+}
+
+static int nvhost_ioctl_channel_wait(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_wait_args *args)
+{
+       int ret;
+
+       BUG_ON(!channel_op().wait);
+       ret = channel_op().wait(ctx->hwctx, args);
+       return ret;
+}
+
+static int nvhost_ioctl_channel_zcull_get_size(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_zcull_get_size_args *args)
+{
+       int ret;
+
+       BUG_ON(!channel_zcull_op().get_size);
+       ret = channel_zcull_op().get_size(ctx->hwctx, args);
+       return ret;
+}
+
+static int nvhost_ioctl_channel_zcull_bind(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_zcull_bind_args *args)
+{
+       int ret;
+
+       BUG_ON(!channel_zcull_op().bind);
+       ret = channel_zcull_op().bind(ctx->hwctx, args);
+       return ret;
+}
+
+static int nvhost_ioctl_channel_zcull_get_info(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_zcull_get_info_args *args)
+{
+       int ret;
+
+       BUG_ON(!channel_zcull_op().get_info);
+       ret = channel_zcull_op().get_info(ctx->hwctx, args);
+       return ret;
+}
+
+static int nvhost_ioctl_channel_zbc_set_table(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_zbc_set_table_args *args)
+{
+       int ret;
+
+       BUG_ON(!channel_zbc_op().set_table);
+       ret = channel_zbc_op().set_table(ctx->hwctx, args);
+       return ret;
+}
+
+static int nvhost_ioctl_channel_zbc_query_table(
+       struct nvhost_channel_userctx *ctx,
+       struct nvhost_zbc_query_table_args *args)
+{
+       int ret;
+
+       BUG_ON(!channel_zbc_op().query_table);
+       ret = channel_zbc_op().query_table(ctx->hwctx, args);
+       return ret;
+}
+
 static int nvhost_ioctl_channel_submit(struct nvhost_channel_userctx *ctx,
                struct nvhost_submit_args *args)
 {
@@ -838,6 +988,7 @@ static long nvhost_channelctl(struct file *filp,
        unsigned int cmd, unsigned long arg)
 {
        struct nvhost_channel_userctx *priv = filp->private_data;
+       struct device *dev = &priv->ch->dev->dev;
        u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
        int err = 0;
 
@@ -963,7 +1114,6 @@ static long nvhost_channelctl(struct file *filp,
                        err = PTR_ERR(new_client);
                        break;
                }
-
                if (priv->memmgr)
                        nvhost_memmgr_put_mgr(priv->memmgr);
 
@@ -974,6 +1124,42 @@ static long nvhost_channelctl(struct file *filp,
 
                break;
        }
+       case NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX:
+               err = nvhost_ioctl_channel_alloc_obj_ctx(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX:
+               err = nvhost_ioctl_channel_free_obj_ctx(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO:
+               err = nvhost_ioctl_channel_alloc_gpfifo(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO:
+               err = nvhost_ioctl_channel_submit_gpfifo(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_MAP_BUFFER:
+               err = nvhost_ioctl_channel_map_buffer(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_UNMAP_BUFFER:
+               err = nvhost_ioctl_channel_unmap_buffer(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_WAIT:
+               err = nvhost_ioctl_channel_wait(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_ZCULL_GET_SIZE:
+               err = nvhost_ioctl_channel_zcull_get_size(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_ZCULL_BIND:
+               err = nvhost_ioctl_channel_zcull_bind(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_ZCULL_GET_INFO:
+               err = nvhost_ioctl_channel_zcull_get_info(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_ZBC_SET_TABLE:
+               err = nvhost_ioctl_channel_zbc_set_table(priv, (void *)buf);
+               break;
+       case NVHOST_IOCTL_CHANNEL_ZBC_QUERY_TABLE:
+               err = nvhost_ioctl_channel_zbc_query_table(priv, (void *)buf);
+               break;
        case NVHOST_IOCTL_CHANNEL_READ_3D_REG:
                err = nvhost_ioctl_channel_read_3d_reg(priv, (void *)buf);
                break;
@@ -1029,6 +1215,7 @@ static long nvhost_channelctl(struct file *filp,
                err = nvhost_ioctl_channel_set_ctxswitch(priv, (void *)buf);
                break;
        default:
+               nvhost_err(dev, "unrecognized ioctl cmd: 0x%x", cmd);
                err = -ENOTTY;
                break;
        }
@@ -1047,13 +1234,88 @@ static const struct file_operations nvhost_channelops = {
        .unlocked_ioctl = nvhost_channelctl
 };
 
+struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd)
+{
+       struct nvhost_channel_userctx *userctx;
+       struct file *f = fget(fd);
+       if (!f)
+               return 0;
+
+       if (f->f_op != &nvhost_channelops) {
+               fput(f);
+               return 0;
+       }
+
+       userctx = (struct nvhost_channel_userctx *)f->private_data;
+       fput(f);
+       return userctx->hwctx;
+}
+
+
+static const struct file_operations nvhost_asops = {
+       .owner = THIS_MODULE,
+       .release = nvhost_as_dev_release,
+       .open = nvhost_as_dev_open,
+       .unlocked_ioctl = nvhost_as_dev_ctl,
+};
+
+static struct {
+       int class_id;
+       const char *dev_name;
+} class_id_dev_name_map[] = {
+       /*      { NV_HOST1X_CLASS_ID, ""}, */
+       { NV_VIDEO_ENCODE_MPEG_CLASS_ID, "mpe" },
+       { NV_VIDEO_ENCODE_MSENC_CLASS_ID, "msenc" },
+       { NV_GRAPHICS_3D_CLASS_ID, "gr3d" },
+       { NV_GRAPHICS_GPU_CLASS_ID, "gr3d"},  /* TBD: move to "gpu" */
+       { NV_GRAPHICS_VIC_CLASS_ID, "vic"},
+       { NV_TSEC_CLASS_ID, "tsec" },
+};
+
+static struct {
+       int module_id;
+       const char *dev_name;
+} module_id_dev_name_map[] = {
+       { NVHOST_MODULE_VI, "vi"},
+       { NVHOST_MODULE_ISP, "isp"},
+       { NVHOST_MODULE_MPE, "mpe"},
+       { NVHOST_MODULE_MSENC, "msenc"},
+       { NVHOST_MODULE_TSEC, "tsec"},
+       { NVHOST_MODULE_GPU, "gpu"},
+       { NVHOST_MODULE_VIC, "vic"},
+};
+
+static const char *get_device_name_for_dev(struct nvhost_device *dev)
+{
+       int i;
+       /* first choice is to use the class id if specified */
+       for (i = 0; i < ARRAY_SIZE(class_id_dev_name_map); i++)
+               if (dev->class == class_id_dev_name_map[i].class_id)
+                       return class_id_dev_name_map[i].dev_name;
+
+       /* second choice is module name if specified */
+       for (i = 0; i < ARRAY_SIZE(module_id_dev_name_map); i++)
+               if (dev->moduleid == module_id_dev_name_map[i].module_id)
+                       return module_id_dev_name_map[i].dev_name;
+
+
+       /* last choice is to just use the given dev name */
+       return dev->name;
+}
+
 int nvhost_client_user_init(struct platform_device *dev)
 {
        int err, devno;
        struct nvhost_device_data *pdata = platform_get_drvdata(dev);
 
        struct nvhost_channel *ch = pdata->channel;
-       err = alloc_chrdev_region(&devno, 0, 1, IFACE_NAME);
+       struct nvhost_master *host = nvhost_get_host(dev);
+       const char *use_dev_name;
+
+       BUG_ON(!ch);
+       BUG_ON(!host);
+       // reserve 2 minor #s for both <dev> and as-<dev>
+       err = alloc_chrdev_region(&devno, 0, 2, IFACE_NAME);
        if (err < 0) {
                dev_err(&dev->dev, "failed to allocate devno\n");
                goto fail;
@@ -1068,14 +1330,43 @@ int nvhost_client_user_init(struct platform_device *dev)
                        "failed to add chan %i cdev\n", pdata->index);
                goto fail;
        }
+       use_dev_name = get_device_name_for_dev(dev);
+
        ch->node = device_create(nvhost_get_host(dev)->nvhost_class,
-                       NULL, devno, NULL,
-                       IFACE_NAME "-%s", dev_name(&dev->dev));
+                                NULL, devno, NULL,
+                                (dev->id == 0) ?
+                                IFACE_NAME "-%s" :
+                                IFACE_NAME "-%s.%d",
+                                use_dev_name, dev->id);
+
        if (IS_ERR(ch->node)) {
                err = PTR_ERR(ch->node);
                dev_err(&dev->dev,
-                       "failed to create %s channel device\n",
-                       dev_name(&dev->dev));
+                       "failed to create %s channel device for %s\n",
+                       use_dev_name, dev->name);
+               goto fail;
+       }
+
+       /* do the same as above for the address space driver */
+       cdev_init(&ch->as_cdev, &nvhost_asops);
+       ch->as_cdev.owner = THIS_MODULE;
+
+       ++ devno; // create a new minor for as-<dev>
+       err = cdev_add(&ch->as_cdev, devno, 1);
+       if (err < 0) {
+               dev_err(&pdata->dev,
+                       "failed to add chan %i as_cdev\n", pdata->index);
+               goto fail;
+       }
+       ch->as_node = device_create(host->nvhost_class, NULL, devno, NULL,
+                                   (dev->id == 0) ?
+                                   IFACE_NAME "-as-%s" :
+                                   IFACE_NAME "-as-%s.%d",
+                                   use_dev_name, pdata->id);
+       if (IS_ERR(ch->as_node)) {
+               err = PTR_ERR(ch->as_node);
+               dev_err(&pdata->dev,
+                       "failed to create chan aspace %i device\n", pdata->index);
                goto fail;
        }
 
@@ -1182,30 +1473,47 @@ int nvhost_client_device_resume(struct device *dev)
 
 int nvhost_client_device_get_resources(struct platform_device *dev)
 {
-       int i;
-       void __iomem *regs = NULL;
+       struct resource *r[NVHOST_MODULE_MAX_IORESOURCE_MEM];
        struct nvhost_device_data *pdata = platform_get_drvdata(dev);
+       int n;
+
+       if (dev->num_resources > NVHOST_MODULE_MAX_IORESOURCE_MEM) {
+               dev_err(&dev->dev, "too many io mem resources: %d, max is %d\n",
+                       dev->num_resources, NVHOST_MODULE_MAX_IORESOURCE_MEM);
+               return -ENOMEM;
+       }
 
-       for (i = 0; i < dev->num_resources; i++) {
-               struct resource *r = NULL;
+       for (n = 0; n < NVHOST_MODULE_MAX_IORESOURCE_MEM; n++)
+               r[n] = NULL;
 
-               r = platform_get_resource(dev, IORESOURCE_MEM, i);
-               /* We've run out of mem resources */
-               if (!r)
-                       break;
+       for (n = 0; n < dev->num_resources; n++ ) {
+               r[n] = platform_get_resource(dev, IORESOURCE_MEM, n);
+               if (!r[n])
+                       goto fail;
 
-               regs = devm_request_and_ioremap(&dev->dev, r);
-               if (!regs)
+               pdata->reg_mem[n] = request_mem_region(r[n]->start,
+                                                    resource_size(r[n]),
+                                                    dev_name(&dev->dev));
+               if (!pdata->reg_mem[n])
                        goto fail;
 
-               pdata->aperture[i] = regs;
+               pdata->aperture[n] = ioremap(r[n]->start, resource_size(r[n]));
+               if (!pdata->aperture[n])
+                       goto fail;
        }
 
        return 0;
 
 fail:
+       for (n = 0; n < dev->num_resources; n++ ) {
+               if (r[n]) {
+                       if (pdata->aperture[n])
+                               iounmap(dev->aperture[n]);
+                       if (pdata->reg_mem[n])
+                               release_mem_region(r[n]->start, resource_size(r[n]));
+               }
+       }
        dev_err(&dev->dev, "failed to get register memory\n");
-
        return -ENXIO;
 }
 EXPORT_SYMBOL(nvhost_client_device_get_resources);
index e0222b1..a9cb54f 100644 (file)
@@ -48,4 +48,6 @@ nvhost_client_request_firmware(struct platform_device *dev,
 
 int nvhost_client_device_get_resources(struct platform_device *dev);
 
+struct nvhost_hwctx *nvhost_channel_get_file_hwctx(int fd);
+
 #endif
index 4e82642..cf338cc 100644 (file)
@@ -27,6 +27,7 @@
 #include "t20/t20.h"
 #include "t30/t30.h"
 #include "t114/t114.h"
+#include "t124/t124.h"
 #include "t148/t148.h"
 
 #include <mach/hardware.h>
@@ -67,6 +68,11 @@ int nvhost_init_chip_support(struct nvhost_master *host)
                err = nvhost_init_t114_support(host, nvhost_chip_ops);
                break;
 
+       case TEGRA_CHIPID_TEGRA12:
+               chip_ops->soc_name = "tegra12x";
+               err = nvhost_init_t124_support(host, chip_ops);
+               break;
+
        case TEGRA_CHIPID_TEGRA14:
                nvhost_chip_ops->soc_name = "tegra14x";
                err = nvhost_init_t148_support(host, nvhost_chip_ops);
index ed381e0..537cd05 100644 (file)
@@ -20,6 +20,8 @@
 #ifndef _NVHOST_CHIP_SUPPORT_H_
 #define _NVHOST_CHIP_SUPPORT_H_
 
+#include <linux/nvhost_ioctl.h>
+
 #include <linux/types.h>
 
 struct output;
@@ -33,6 +35,7 @@ struct nvhost_hwctx;
 struct nvhost_cdma;
 struct nvhost_job;
 struct push_buffer;
+struct nvhost_as;
 struct nvhost_syncpt;
 struct dentry;
 struct nvhost_job;
@@ -43,6 +46,22 @@ struct mem_mgr;
 struct platform_device;
 struct host1x_actmon;
 
+struct nvhost_zcull_ops {
+       int (*get_size)(struct nvhost_hwctx *,
+                   struct nvhost_zcull_get_size_args *args);
+       int (*bind)(struct nvhost_hwctx *,
+                   struct nvhost_zcull_bind_args *args);
+       int (*get_info)(struct nvhost_hwctx *,
+                   struct nvhost_zcull_get_info_args *args);
+};
+
+struct nvhost_zbc_ops {
+       int (*set_table)(struct nvhost_hwctx *,
+                   struct nvhost_zbc_set_table_args *args);
+       int (*query_table)(struct nvhost_hwctx *,
+                   struct nvhost_zbc_query_table_args *args);
+};
+
 struct nvhost_channel_ops {
        const char * soc_name;
        int (*init)(struct nvhost_channel *,
@@ -51,7 +70,26 @@ struct nvhost_channel_ops {
        int (*submit)(struct nvhost_job *job);
        int (*save_context)(struct nvhost_channel *channel);
        int (*drain_read_fifo)(struct nvhost_channel *ch,
-               u32 *ptr, unsigned int count, unsigned int *pending);
+       u32 *ptr, unsigned int count, unsigned int *pending);
+       int (*alloc_obj)(struct nvhost_hwctx *,
+                       struct nvhost_alloc_obj_ctx_args *args);
+       int (*free_obj)(struct nvhost_hwctx *,
+                       struct nvhost_free_obj_ctx_args *args);
+       int (*alloc_gpfifo)(struct nvhost_hwctx *,
+                       struct nvhost_alloc_gpfifo_args *args);
+       int (*submit_gpfifo)(struct nvhost_hwctx *,
+                       struct nvhost_gpfifo *gpfifo,
+                       u32 num_entries,
+                       struct nvhost_fence *fence,
+                       u32 flags);
+       int (*map_buffer)(struct nvhost_hwctx *,
+                         struct nvhost_map_buffer_args *args);
+       int (*unmap_buffer)(struct nvhost_hwctx *,
+                           struct nvhost_unmap_buffer_args *args);
+       int (*wait)(struct nvhost_hwctx *,
+                   struct nvhost_wait_args *args);
+       struct nvhost_zcull_ops zcull;
+       struct nvhost_zbc_ops zbc;
 };
 
 struct nvhost_cdma_ops {
@@ -138,6 +176,10 @@ struct nvhost_dev_ops {
        void (*free_nvhost_channel)(struct nvhost_channel *ch);
 };
 
+struct nvhost_as_ops {
+       int (*init)(struct nvhost_master *host, struct nvhost_as *as);
+};
+
 struct nvhost_actmon_ops {
        int (*init)(struct host1x_actmon *actmon);
        void (*deinit)(struct host1x_actmon *actmon);
@@ -172,10 +214,92 @@ struct nvhost_chip_support {
        struct nvhost_syncpt_ops syncpt;
        struct nvhost_intr_ops intr;
        struct nvhost_dev_ops nvhost_dev;
+       struct nvhost_as_ops as;
        struct nvhost_actmon_ops actmon;
        struct nvhost_tickctrl_ops tickctrl;
+       void (*remove_support)(struct nvhost_chip_support *op);
+       void *priv;
 };
 
+/* The reason these accumulate is that 3x uses
+ * some of the 2x code, and likewise 12x vs prior.
+ */
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
+#define TEGRA_2X_OR_HIGHER_CONFIG
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_3x_SOC
+#define TEGRA_3X_OR_HIGHER_CONFIG
+#define TEGRA_2X_OR_HIGHER_CONFIG
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_11x_SOC
+#define TEGRA_11X_OR_HIGHER_CONFIG
+#define TEGRA_3X_OR_HIGHER_CONFIG
+#define TEGRA_2X_OR_HIGHER_CONFIG
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_14x_SOC
+#define TEGRA_14X_OR_HIGHER_CONFIG
+#define TEGRA_11X_OR_HIGHER_CONFIG
+#define TEGRA_3X_OR_HIGHER_CONFIG
+#define TEGRA_2X_OR_HIGHER_CONFIG
+#endif
+
+#ifdef CONFIG_ARCH_TEGRA_12x_SOC
+#define TEGRA_12X_OR_HIGHER_CONFIG
+#define TEGRA_14X_OR_HIGHER_CONFIG
+#define TEGRA_11X_OR_HIGHER_CONFIG
+#define TEGRA_3X_OR_HIGHER_CONFIG
+#define TEGRA_2X_OR_HIGHER_CONFIG
+#endif
+
+#ifdef TEGRA_2X_OR_HIGHER_CONFIG
+#else
+static inline int nvhost_init_t20_support(struct nvhost_master *host,
+                                         struct nvhost_chip_support *op)
+{
+       return -ENODEV;
+}
+#endif
+
+#ifdef TEGRA_3X_OR_HIGHER_CONFIG
+#else
+static inline int nvhost_init_t30_support(struct nvhost_master *host,
+                                         struct nvhost_chip_support *op)
+{
+       return -ENODEV;
+}
+#endif
+
+#ifdef TEGRA_11X_OR_HIGHER_CONFIG
+#else
+static inline int nvhost_init_t114_support(struct nvhost_master *host,
+                                          struct nvhost_chip_support *op)
+{
+       return -ENODEV;
+}
+#endif
+
+#ifdef TEGRA_14X_OR_HIGHER_CONFIG
+#else
+static inline int nvhost_init_t148_support(struct nvhost_master *host,
+                                          struct nvhost_chip_support *op)
+{
+       return -ENODEV;
+}
+#endif
+
+#ifdef TEGRA_12X_OR_HIGHER_CONFIG
+#else
+static inline int nvhost_init_t124_support(struct nvhost_master *host,
+                                          struct nvhost_chip_support *op)
+{
+       return -ENODEV;
+}
+#endif
+
+
 struct nvhost_chip_support *nvhost_get_chip_ops(void);
 
 #define host_device_op()       (nvhost_get_chip_ops()->nvhost_dev)
@@ -185,9 +309,12 @@ struct nvhost_chip_support *nvhost_get_chip_ops(void);
 #define intr_op()              (nvhost_get_chip_ops()->intr)
 #define cdma_op()              (nvhost_get_chip_ops()->cdma)
 #define cdma_pb_op()           (nvhost_get_chip_ops()->push_buffer)
+#define channel_zcull_op()     (nvhost_get_chip_ops()->channel.zcull)
+#define channel_zbc_op()       (nvhost_get_chip_ops()->channel.zbc)
+
 #define actmon_op()            (nvhost_get_chip_ops()->actmon)
 #define tickctrl_op()          (nvhost_get_chip_ops()->tickctrl)
 
-int nvhost_init_chip_support(struct nvhost_master *host);
+int nvhost_init_chip_support(struct nvhost_master *);
 
 #endif /* _NVHOST_CHIP_SUPPORT_H_ */
index dff2620..19bc3a8 100644 (file)
@@ -27,6 +27,8 @@ enum {
        NV_VIDEO_ENCODE_MPEG_CLASS_ID   = 0x20,
        NV_VIDEO_ENCODE_MSENC_CLASS_ID  = 0x21,
        NV_GRAPHICS_3D_CLASS_ID         = 0x60,
+       NV_GRAPHICS_GPU_CLASS_ID        = 0x61,
+       NV_GRAPHICS_VIC_CLASS_ID        = 0x5D,
        NV_TSEC_CLASS_ID                = 0xE0,
 };
 
index 4a40752..ad63db0 100644 (file)
@@ -241,11 +241,6 @@ void nvhost_debug_init(struct nvhost_master *master)
                        &nvhost_debug_force_timeout_dump);
        nvhost_debug_force_timeout_dump = 0;
 }
-#else
-void nvhost_debug_init(struct nvhost_master *master)
-{
-}
-#endif
 
 void nvhost_debug_dump(struct nvhost_master *master)
 {
@@ -254,3 +249,4 @@ void nvhost_debug_dump(struct nvhost_master *master)
        };
        show_all(master, &o);
 }
+#endif
index bcc156a..d1d762f 100644 (file)
 
 #include <mach/gpufuse.h>
 
+#include "dev.h"
+
+#if defined(NVHOST_DEBUG)
+u32 nvhost_dbg_mask = NVHOST_DBG_MASK; /* can be changed dynamically, as well */
+#endif
+
 /* host1x device list is used in 2 places:
  * 1. In ioctl(NVHOST_IOCTL_CTRL_MODULE_REGRDWR) of host1x device
  * 2. debug-fs dump of host1x and client device
index 12dfda5..606b497 100644 (file)
@@ -30,4 +30,86 @@ void nvhost_device_list_for_all(void *data,
 struct platform_device *nvhost_device_list_match_by_id(u32 id);
 void nvhost_device_list_remove(struct platform_device *pdev);
 
+
+/* debug info */
+/*#define NVHOST_DEBUG*/
+#define NVHOST_DBG_MASK (dbg_info|dbg_fn)
+
+enum nvhost_dbg_categories {
+       dbg_info    = BIT(0),  /* lightly verbose info */
+       dbg_err     = BIT(1),  /* verbosity around errors*/
+       dbg_fn      = BIT(2),  /* fn name tracing */
+       dbg_reg     = BIT(3),  /* register accesses, very verbose */
+       dbg_pte     = BIT(4),  /* gmmu ptes */
+       dbg_intr    = BIT(5),  /* interrupts */
+       dbg_pmu     = BIT(6),  /* gk20a pmu */
+       dbg_mem     = BIT(31), /* memory accesses, very verbose */
+};
+
+#if defined(NVHOST_DEBUG)
+extern u32 nvhost_dbg_mask;
+#define nvhost_dbg(dbg_mask, format, arg...)                           \
+do {                                                                   \
+       if ((dbg_mask) & nvhost_dbg_mask)                               \
+               printk(KERN_DEBUG "nvhost %s: " format "\n", __func__, ##arg);\
+} while (0)
+
+#else /* NVHOST_DEBUG */
+#define nvhost_dbg(dbg_mask, format, arg...)                           \
+do {                                                                   \
+       if (0)                                                          \
+               printk(KERN_DEBUG "nvhost %s: " format "\n", __func__, ##arg);\
+} while (0)
+
+#endif
+
+
+/* convenience,shorter err/fn/dbg_info */
+#define nvhost_err(d, fmt, arg...) \
+       dev_err(d, "%s: " fmt "\n", __func__, ##arg)
+
+#define nvhost_warn(d, fmt, arg...) \
+       dev_warn(d, "%s: " fmt "\n", __func__, ##arg)
+
+#define nvhost_dbg_fn(fmt, arg...) \
+       nvhost_dbg(dbg_fn, fmt, ##arg)
+
+#define nvhost_dbg_info(fmt, arg...) \
+       nvhost_dbg(dbg_info, fmt, ##arg)
+
+/* mem access with dbg_mem logging */
+static inline u8 mem_rd08(void *ptr, int b)
+{
+       u8 _b = ((const u8 *)ptr)[b];
+       nvhost_dbg(dbg_mem, " %p = 0x%x", ptr+sizeof(u8)*b, _b);
+       return _b;
+}
+static inline u16 mem_rd16(void *ptr, int s)
+{
+       u16 _s = ((const u16 *)ptr)[s];
+       nvhost_dbg(dbg_mem, " %p = 0x%x", ptr+sizeof(u16)*s, _s);
+       return _s;
+}
+static inline u32 mem_rd32(void *ptr, int w)
+{
+       u32 _w = ((const u32 *)ptr)[w];
+       nvhost_dbg(dbg_mem, " %p = 0x%x", ptr + sizeof(u32)*w, _w);
+       return _w;
+}
+static inline void mem_wr08(void *ptr, int b, u8 data)
+{
+       nvhost_dbg(dbg_mem, " %p = 0x%x", ptr+sizeof(u8)*b, data);
+       ((u8 *)ptr)[b] = data;
+}
+static inline void mem_wr16(void *ptr, int s, u16 data)
+{
+       nvhost_dbg(dbg_mem, " %p = 0x%x", ptr+sizeof(u16)*s, data);
+       ((u16 *)ptr)[s] = data;
+}
+static inline void mem_wr32(void *ptr, int w, u32 data)
+{
+       nvhost_dbg(dbg_mem, " %p = 0x%x", ptr+sizeof(u32)*w, data);
+       ((u32 *)ptr)[w] = data;
+}
+
 #endif
index 41e7772..0243ea8 100644 (file)
@@ -54,6 +54,7 @@ struct nvhost_module_client {
 
 static void do_powergate_locked(int id)
 {
+       nvhost_dbg_fn("%d", id);
        if (id != -1 && tegra_powergate_is_powered(id))
                tegra_powergate_partition(id);
 }
@@ -242,6 +243,8 @@ int nvhost_module_set_rate(struct platform_device *dev, void *priv,
        int ret = 0;
        struct nvhost_device_data *pdata = platform_get_drvdata(dev);
 
+       nvhost_dbg_fn("%s", dev->name);
+
        mutex_lock(&client_list_lock);
        list_for_each_entry(m, &pdata->client_list, node) {
                if (m->priv == priv) {
@@ -275,6 +278,8 @@ int nvhost_module_add_client(struct platform_device *dev, void *priv)
        struct nvhost_module_client *client;
        struct nvhost_device_data *pdata = platform_get_drvdata(dev);
 
+       nvhost_dbg_fn("%s num_clks=%d priv=%p", dev->name, dev->num_clks, priv);
+
        client = kzalloc(sizeof(*client), GFP_KERNEL);
        if (!client)
                return -ENOMEM;
@@ -296,6 +301,8 @@ void nvhost_module_remove_client(struct platform_device *dev, void *priv)
        int found = 0;
        struct nvhost_device_data *pdata = platform_get_drvdata(dev);
 
+       nvhost_dbg_fn("%s priv=%p", dev->name, priv);
+
        mutex_lock(&client_list_lock);
        list_for_each_entry(m, &pdata->client_list, node) {
                if (priv == m->priv) {
@@ -426,28 +433,35 @@ int nvhost_module_init(struct platform_device *dev)
        struct nvhost_device_data *pdata = platform_get_drvdata(dev);
 
        /* initialize clocks to known state (=enabled) */
+
+       dev->num_clks = 0;
        INIT_LIST_HEAD(&pdata->client_list);
        while (pdata->clocks[i].name && i < NVHOST_MODULE_MAX_CLOCKS) {
                char devname[MAX_DEVID_LENGTH];
                long rate = pdata->clocks[i].default_rate;
                struct clk *c;
 
-               snprintf(devname, MAX_DEVID_LENGTH, "tegra_%s",
-                       dev_name(&dev->dev));
+               snprintf(devname, MAX_DEVID_LENGTH,
+                        (dev->id <= 0) ? "tegra_%s" : "tegra_%s.%d",
+                        dev->name, dev->id);
                c = clk_get_sys(devname, pdata->clocks[i].name);
                if (IS_ERR(c)) {
                        dev_err(&dev->dev, "Cannot get clock %s\n",
                                        pdata->clocks[i].name);
+                       /* arguably we should fail init here instead... */
+                       i++;
                        continue;
                }
-
+               nvhost_dbg_fn("%s->clk[%d] -> %s:%s:%p",
+                             dev->name, dev->num_clks,
+                             devname, dev->clocks[i].name,
+                             c);
                rate = clk_round_rate(c, rate);
                clk_prepare_enable(c);
                clk_set_rate(c, rate);
-               pdata->clk[i] = c;
+               pdata->clk[pdata->num_clks++] = c;
                i++;
        }
-       pdata->num_clks = i;
 
        /* reset the module */
        mutex_lock(&pdata->lock);
diff --git a/drivers/video/tegra/host/nvhost_allocator.c b/drivers/video/tegra/host/nvhost_allocator.c
new file mode 100644 (file)
index 0000000..78551fe
--- /dev/null
@@ -0,0 +1,1206 @@
+/*
+ * drivers/video/tegra/host/nvhost_allocator.c
+ *
+ * nvhost allocator
+ *
+ * Copyright (c) 2011, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include "nvhost_allocator.h"
+
+static inline void link_block_list(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block,
+               struct nvhost_alloc_block *prev,
+               struct rb_node *rb_parent);
+static inline void link_block_rb(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block,
+               struct rb_node **rb_link,
+               struct rb_node *rb_parent);
+static void link_block(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block,
+               struct nvhost_alloc_block *prev, struct rb_node **rb_link,
+               struct rb_node *rb_parent);
+static void insert_block(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block);
+
+static void unlink_block(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block,
+               struct nvhost_alloc_block *prev);
+static struct nvhost_alloc_block *unlink_blocks(
+               struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block,
+               struct nvhost_alloc_block *prev, u32 end);
+
+static struct nvhost_alloc_block *find_block(
+               struct nvhost_allocator *allocator, u32 addr);
+static struct nvhost_alloc_block *find_block_prev(
+               struct nvhost_allocator *allocator, u32 addr,
+               struct nvhost_alloc_block **pprev);
+static struct nvhost_alloc_block *find_block_prepare(
+               struct nvhost_allocator *allocator, u32 addr,
+               struct nvhost_alloc_block **pprev, struct rb_node ***rb_link,
+               struct rb_node **rb_parent);
+
+static u32 check_free_space(u32 addr, u32 limit, u32 len, u32 align);
+static void update_free_addr_cache(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block,
+               u32 addr, u32 len, bool free);
+static int find_free_area(struct nvhost_allocator *allocator,
+               u32 *addr, u32 len);
+static int find_free_area_nc(struct nvhost_allocator *allocator,
+               u32 *addr, u32 *len);
+
+static void adjust_block(struct nvhost_alloc_block *block,
+               u32 start, u32 end,
+               struct nvhost_alloc_block *insert);
+static struct nvhost_alloc_block *merge_block(
+               struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block, u32 addr, u32 end);
+static int split_block(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block,
+               u32 addr, int new_below);
+
+static int block_alloc_single_locked(struct nvhost_allocator *allocator,
+               u32 *addr, u32 len);
+static int block_alloc_list_locked(struct nvhost_allocator *allocator,
+               u32 *addr, u32 len,
+               struct nvhost_alloc_block **pblock);
+static int block_free_locked(struct nvhost_allocator *allocator,
+               u32 addr, u32 len);
+static void block_free_list_locked(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *list);
+
+/* link a block into allocator block list */
+static inline void link_block_list(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block,
+               struct nvhost_alloc_block *prev,
+               struct rb_node *rb_parent)
+{
+       struct nvhost_alloc_block *next;
+
+       block->prev = prev;
+       if (prev) {
+               next = prev->next;
+               prev->next = block;
+       } else {
+               allocator->block_first = block;
+               if (rb_parent)
+                       next = rb_entry(rb_parent,
+                                       struct nvhost_alloc_block, rb);
+               else
+                       next = NULL;
+       }
+       block->next = next;
+       if (next)
+               next->prev = block;
+}
+
+/* link a block into allocator rb tree */
+static inline void link_block_rb(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block, struct rb_node **rb_link,
+               struct rb_node *rb_parent)
+{
+       rb_link_node(&block->rb, rb_parent, rb_link);
+       rb_insert_color(&block->rb, &allocator->rb_root);
+}
+
+/* add a block to allocator with known location */
+static void link_block(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block,
+               struct nvhost_alloc_block *prev, struct rb_node **rb_link,
+               struct rb_node *rb_parent)
+{
+       struct nvhost_alloc_block *next;
+
+       link_block_list(allocator, block, prev, rb_parent);
+       link_block_rb(allocator, block, rb_link, rb_parent);
+       allocator->block_count++;
+
+       next = block->next;
+       allocator_dbg(allocator, "link new block %d:%d between block %d:%d "
+               "and block %d:%d",
+               block->start, block->end,
+               prev ? prev->start : -1, prev ? prev->end : -1,
+               next ? next->start : -1, next ? next->end : -1);
+}
+
+/* add a block to allocator */
+static void insert_block(struct nvhost_allocator *allocator,
+                       struct nvhost_alloc_block *block)
+{
+       struct nvhost_alloc_block *prev;
+       struct rb_node **rb_link, *rb_parent;
+
+       find_block_prepare(allocator, block->start,
+                       &prev, &rb_link, &rb_parent);
+       link_block(allocator, block, prev, rb_link, rb_parent);
+}
+
+/* remove a block from allocator */
+static void unlink_block(struct nvhost_allocator *allocator,
+                       struct nvhost_alloc_block *block,
+                       struct nvhost_alloc_block *prev)
+{
+       struct nvhost_alloc_block *next = block->next;
+
+       allocator_dbg(allocator, "unlink block %d:%d between block %d:%d "
+               "and block %d:%d",
+               block->start, block->end,
+               prev ? prev->start : -1, prev ? prev->end : -1,
+               next ? next->start : -1, next ? next->end : -1);
+
+       BUG_ON(block->start < allocator->base);
+       BUG_ON(block->end > allocator->limit);
+
+       if (prev)
+               prev->next = next;
+       else
+               allocator->block_first = next;
+
+       if (next)
+               next->prev = prev;
+       rb_erase(&block->rb, &allocator->rb_root);
+       if (allocator->block_recent == block)
+               allocator->block_recent = prev;
+
+       allocator->block_count--;
+}
+
+/* remove a list of blocks from allocator. the list can contain both
+   regular blocks and non-contiguous blocks. skip all non-contiguous
+   blocks, remove regular blocks into a separate list, return list head */
+static struct nvhost_alloc_block *
+unlink_blocks(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block,
+               struct nvhost_alloc_block *prev,
+               u32 end)
+{
+       struct nvhost_alloc_block **insertion_point;
+       struct nvhost_alloc_block *last_unfreed_block = prev;
+       struct nvhost_alloc_block *last_freed_block = NULL;
+       struct nvhost_alloc_block *first_freed_block = NULL;
+
+       insertion_point = (prev ? &prev->next : &allocator->block_first);
+       *insertion_point = NULL;
+
+       do {
+               if (!block->nc_block) {
+                       allocator_dbg(allocator, "unlink block %d:%d",
+                               block->start, block->end);
+                       if (last_freed_block)
+                               last_freed_block->next = block;
+                       block->prev = last_freed_block;
+                       rb_erase(&block->rb, &allocator->rb_root);
+                       last_freed_block = block;
+                       allocator->block_count--;
+                       if (!first_freed_block)
+                               first_freed_block = block;
+               } else {
+                       allocator_dbg(allocator, "skip nc block %d:%d",
+                               block->start, block->end);
+                       if (!*insertion_point)
+                               *insertion_point = block;
+                       if (last_unfreed_block)
+                               last_unfreed_block->next = block;
+                       block->prev = last_unfreed_block;
+                       last_unfreed_block = block;
+               }
+               block = block->next;
+       } while (block && block->start < end);
+
+       if (!*insertion_point)
+               *insertion_point = block;
+
+       if (block)
+               block->prev = last_unfreed_block;
+       if (last_unfreed_block)
+               last_unfreed_block->next = block;
+       if (last_freed_block)
+               last_freed_block->next = NULL;
+
+       allocator->block_recent = NULL;
+
+       return first_freed_block;
+}
+
+/* Look up the first block which satisfies addr < block->end,
+   NULL if none */
+static struct nvhost_alloc_block *
+find_block(struct nvhost_allocator *allocator, u32 addr)
+{
+       struct nvhost_alloc_block *block = allocator->block_recent;
+
+       if (!(block && block->end > addr && block->start <= addr)) {
+               struct rb_node *rb_node;
+
+               rb_node = allocator->rb_root.rb_node;
+               block = NULL;
+
+               while (rb_node) {
+                       struct nvhost_alloc_block *block_tmp;
+
+                       block_tmp = rb_entry(rb_node,
+                                       struct nvhost_alloc_block, rb);
+
+                       if (block_tmp->end > addr) {
+                               block = block_tmp;
+                               if (block_tmp->start <= addr)
+                                       break;
+                               rb_node = rb_node->rb_left;
+                       } else
+                               rb_node = rb_node->rb_right;
+                       if (block)
+                               allocator->block_recent = block;
+               }
+       }
+       return block;
+}
+
+/* Same as find_block, but also return a pointer to the previous block */
+static struct nvhost_alloc_block *
+find_block_prev(struct nvhost_allocator *allocator, u32 addr,
+               struct nvhost_alloc_block **pprev)
+{
+       struct nvhost_alloc_block *block = NULL, *prev = NULL;
+       struct rb_node *rb_node;
+       if (!allocator)
+               goto out;
+
+       block = allocator->block_first;
+
+       rb_node = allocator->rb_root.rb_node;
+
+       while (rb_node) {
+               struct nvhost_alloc_block *block_tmp;
+               block_tmp = rb_entry(rb_node, struct nvhost_alloc_block, rb);
+
+               if (addr < block_tmp->end)
+                       rb_node = rb_node->rb_left;
+               else {
+                       prev = block_tmp;
+                       if (!prev->next || addr < prev->next->end)
+                               break;
+                       rb_node = rb_node->rb_right;
+               }
+       }
+
+out:
+       *pprev = prev;
+       return prev ? prev->next : block;
+}
+
+/* Same as find_block, but also return a pointer to the previous block
+   and return rb_node to prepare for rbtree insertion */
+static struct nvhost_alloc_block *
+find_block_prepare(struct nvhost_allocator *allocator, u32 addr,
+               struct nvhost_alloc_block **pprev, struct rb_node ***rb_link,
+               struct rb_node **rb_parent)
+{
+       struct nvhost_alloc_block *block;
+       struct rb_node **__rb_link, *__rb_parent, *rb_prev;
+
+       __rb_link = &allocator->rb_root.rb_node;
+       rb_prev = __rb_parent = NULL;
+       block = NULL;
+
+       while (*__rb_link) {
+               struct nvhost_alloc_block *block_tmp;
+
+               __rb_parent = *__rb_link;
+               block_tmp = rb_entry(__rb_parent,
+                               struct nvhost_alloc_block, rb);
+
+               if (block_tmp->end > addr) {
+                       block = block_tmp;
+                       if (block_tmp->start <= addr)
+                               break;
+                       __rb_link = &__rb_parent->rb_left;
+               } else {
+                       rb_prev = __rb_parent;
+                       __rb_link = &__rb_parent->rb_right;
+               }
+       }
+
+       *pprev = NULL;
+       if (rb_prev)
+               *pprev = rb_entry(rb_prev, struct nvhost_alloc_block, rb);
+       *rb_link = __rb_link;
+       *rb_parent = __rb_parent;
+       return block;
+}
+
+/* return available space */
+static u32 check_free_space(u32 addr, u32 limit, u32 len, u32 align)
+{
+       if (addr >= limit)
+               return 0;
+       if (addr + len <= limit)
+               return len;
+       return (limit - addr) & ~(align - 1);
+}
+
+/* update first_free_addr/last_free_addr based on new free addr
+   called when free block(s) and allocate block(s) */
+static void update_free_addr_cache(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *next,
+               u32 addr, u32 len, bool free)
+{
+       /* update from block free */
+       if (free) {
+               if (allocator->first_free_addr > addr)
+                       allocator->first_free_addr = addr;
+       } else { /* update from block alloc */
+               if (allocator->last_free_addr < addr + len)
+                       allocator->last_free_addr = addr + len;
+               if (allocator->first_free_addr == addr) {
+                       if (!next || next->start > addr + len)
+                               allocator->first_free_addr = addr + len;
+                       else
+                               allocator->first_free_addr = next->end;
+               }
+       }
+
+       if (allocator->first_free_addr > allocator->last_free_addr)
+               allocator->first_free_addr = allocator->last_free_addr;
+}
+
+/* find a free address range for a fixed len */
+static int find_free_area(struct nvhost_allocator *allocator,
+                       u32 *addr, u32 len)
+{
+       struct nvhost_alloc_block *block;
+       u32 start_addr;
+
+       /* fixed addr allocation */
+       if (*addr) {
+               block = find_block(allocator, *addr);
+               if (allocator->limit - len >= *addr &&
+                   (!block || *addr + len <= block->start)) {
+                       update_free_addr_cache(allocator, block,
+                                       *addr, len, false);
+                       return 0;
+               } else
+                       return -ENOMEM;
+       }
+
+       /* cached_hole_size has max free space up to last_free_addr */
+       if (len > allocator->cached_hole_size)
+               start_addr = *addr = allocator->last_free_addr;
+       else {
+               start_addr = *addr = allocator->base;
+               allocator->cached_hole_size = 0;
+       }
+
+       allocator_dbg(allocator, "start search addr : %d", start_addr);
+
+full_search:
+       for (block = find_block(allocator, *addr); ; block = block->next) {
+               if (allocator->limit - len < *addr) {
+                       /* start a new search in case we missed any hole */
+                       if (start_addr != allocator->base) {
+                               start_addr = *addr = allocator->base;
+                               allocator->cached_hole_size = 0;
+                               allocator_dbg(allocator, "start a new search from base");
+                               goto full_search;
+                       }
+                       return -ENOMEM;
+               }
+               if (!block || *addr + len <= block->start) {
+                       update_free_addr_cache(allocator, block,
+                                       *addr, len, false);
+                       allocator_dbg(allocator, "free space from %d, len %d",
+                               *addr, len);
+                       allocator_dbg(allocator, "next free addr: %d",
+                               allocator->last_free_addr);
+                       return 0;
+               }
+               if (*addr + allocator->cached_hole_size < block->start)
+                       allocator->cached_hole_size = block->start - *addr;
+               *addr = block->end;
+       }
+}
+
+/* find a free address range for as long as it meets alignment or meet len */
+static int find_free_area_nc(struct nvhost_allocator *allocator,
+                       u32 *addr, u32 *len)
+{
+       struct nvhost_alloc_block *block;
+       u32 start_addr;
+       u32 avail_len;
+
+       /* fixed addr allocation */
+       if (*addr) {
+               block = find_block(allocator, *addr);
+               if (allocator->limit - *len >= *addr) {
+                       if (!block)
+                               return 0;
+
+                       avail_len = check_free_space(*addr, block->start,
+                                               *len, allocator->align);
+                       if (avail_len != 0) {
+                               update_free_addr_cache(allocator, block,
+                                       *addr, avail_len, false);
+                               allocator_dbg(allocator,
+                                       "free space between %d, %d, len %d",
+                                       *addr, block->start, avail_len);
+                               allocator_dbg(allocator, "next free addr: %d",
+                                       allocator->last_free_addr);
+                               *len = avail_len;
+                               return 0;
+                       } else
+                               return -ENOMEM;
+               } else
+                       return -ENOMEM;
+       }
+
+       start_addr = *addr = allocator->first_free_addr;
+
+       allocator_dbg(allocator, "start search addr : %d", start_addr);
+
+       for (block = find_block(allocator, *addr); ; block = block->next) {
+               if (allocator->limit - *len < *addr)
+                       return -ENOMEM;
+               if (!block) {
+                       update_free_addr_cache(allocator, block,
+                                       *addr, *len, false);
+                       allocator_dbg(allocator, "free space from %d, len %d",
+                               *addr, *len);
+                       allocator_dbg(allocator, "next free addr: %d",
+                               allocator->first_free_addr);
+                       return 0;
+               }
+
+               avail_len = check_free_space(*addr, block->start,
+                                       *len, allocator->align);
+               if (avail_len != 0) {
+                       update_free_addr_cache(allocator, block,
+                                       *addr, avail_len, false);
+                       allocator_dbg(allocator, "free space between %d, %d, len %d",
+                               *addr, block->start, avail_len);
+                       allocator_dbg(allocator, "next free addr: %d",
+                               allocator->first_free_addr);
+                       *len = avail_len;
+                       return 0;
+               }
+               if (*addr + allocator->cached_hole_size < block->start)
+                       allocator->cached_hole_size = block->start - *addr;
+               *addr = block->end;
+       }
+}
+
+/* expand/shrink a block with new start and new end
+   split_block function provides insert block for shrink */
+static void adjust_block(struct nvhost_alloc_block *block,
+               u32 start, u32 end, struct nvhost_alloc_block *insert)
+{
+       struct nvhost_allocator *allocator = block->allocator;
+
+       allocator_dbg(allocator, "curr block %d:%d, new start %d, new end %d",
+               block->start, block->end, start, end);
+
+       /* expand */
+       if (!insert) {
+               if (start == block->end) {
+                       struct nvhost_alloc_block *next = block->next;
+
+                       if (next && end == next->start) {
+                               /* ....AAAA.... */
+                               /* PPPP....NNNN */
+                               /* PPPPPPPPPPPP */
+                               unlink_block(allocator, next, block);
+                               block->end = next->end;
+                               kmem_cache_free(allocator->block_cache, next);
+                       } else {
+                               /* ....AAAA.... */
+                               /* PPPP........ */
+                               /* PPPPPPPP.... */
+                               block->end = end;
+                       }
+               }
+
+               if (end == block->start) {
+                       /* ....AAAA.... */
+                       /* ........NNNN */
+                       /* PP..NNNNNNNN        ....NNNNNNNN */
+                       block->start = start;
+               }
+       } else { /* shrink */
+               /* BBBBBBBB -> BBBBIIII  OR  BBBBBBBB -> IIIIBBBB */
+               block->start = start;
+               block->end = end;
+               insert_block(allocator, insert);
+       }
+}
+
+/* given a range [addr, end], merge it with blocks before or after or both
+   if they can be combined into a contiguous block */
+static struct nvhost_alloc_block *
+merge_block(struct nvhost_allocator *allocator,
+       struct nvhost_alloc_block *prev, u32 addr, u32 end)
+{
+       struct nvhost_alloc_block *next;
+
+       if (prev)
+               next = prev->next;
+       else
+               next = allocator->block_first;
+
+       allocator_dbg(allocator, "curr block %d:%d", addr, end);
+       if (prev)
+               allocator_dbg(allocator, "prev block %d:%d", prev->start, prev->end);
+       if (next)
+               allocator_dbg(allocator, "next block %d:%d", next->start, next->end);
+
+       /* don't merge with non-contiguous allocation block */
+       if (prev && prev->end == addr && !prev->nc_block) {
+               adjust_block(prev, addr, end, NULL);
+               return prev;
+       }
+
+       /* don't merge with non-contiguous allocation block */
+       if (next && end == next->start && !next->nc_block) {
+               adjust_block(next, addr, end, NULL);
+               return next;
+       }
+
+       return NULL;
+}
+
+/* split a block based on addr. addr must be within (start, end).
+   if new_below == 1, link new block before adjusted current block */
+static int split_block(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block, u32 addr, int new_below)
+{
+       struct nvhost_alloc_block *new_block;
+
+       allocator_dbg(allocator, "start %d, split %d, end %d, new_below %d",
+               block->start, addr, block->end, new_below);
+
+       BUG_ON(!(addr > block->start && addr < block->end));
+
+       new_block = kmem_cache_alloc(allocator->block_cache, GFP_KERNEL);
+       if (!new_block)
+               return -ENOMEM;
+
+       *new_block = *block;
+
+       if (new_below)
+               new_block->end = addr;
+       else
+               new_block->start = addr;
+
+       if (new_below)
+               adjust_block(block, addr, block->end, new_block);
+       else
+               adjust_block(block, block->start, addr, new_block);
+
+       return 0;
+}
+
+/* free a list of blocks */
+static void free_blocks(struct nvhost_allocator *allocator,
+                       struct nvhost_alloc_block *block)
+{
+       while (block) {
+               kmem_cache_free(allocator->block_cache, block);
+               block = block->next;
+       }
+}
+
+/* called with rw_sema acquired */
+static int block_alloc_single_locked(struct nvhost_allocator *allocator,
+                               u32 *addr_req, u32 len)
+{
+       struct nvhost_alloc_block *block, *prev;
+       struct rb_node **rb_link, *rb_parent;
+       u32 addr = *addr_req;
+       int err;
+
+       *addr_req = ~0;
+
+       err = find_free_area(allocator, &addr, len);
+       if (err)
+               return err;
+
+       find_block_prepare(allocator, addr, &prev, &rb_link, &rb_parent);
+
+       /* merge requested free space with existing block(s)
+          if they can be combined into one contiguous block */
+       block = merge_block(allocator, prev, addr, addr + len);
+       if (block) {
+               *addr_req = addr;
+               return 0;
+       }
+
+       /* create a new block if cannot merge */
+       block = kmem_cache_zalloc(allocator->block_cache, GFP_KERNEL);
+       if (!block)
+               return -ENOMEM;
+
+       block->allocator = allocator;
+       block->start = addr;
+       block->end = addr + len;
+
+       link_block(allocator, block, prev, rb_link, rb_parent);
+
+       *addr_req = addr;
+
+       return 0;
+}
+
+static int block_alloc_list_locked(struct nvhost_allocator *allocator,
+       u32 *addr_req, u32 nc_len, struct nvhost_alloc_block **pblock)
+{
+       struct nvhost_alloc_block *block;
+       struct nvhost_alloc_block *nc_head = NULL, *nc_prev = NULL;
+       u32 addr = *addr_req, len = nc_len;
+       int err = 0;
+
+       *addr_req = ~0;
+
+       while (nc_len > 0) {
+               err = find_free_area_nc(allocator, &addr, &len);
+               if (err) {
+                       allocator_dbg(allocator, "not enough free space");
+                       goto clean_up;
+               }
+
+               /* never merge non-contiguous allocation block,
+                  just create a new block */
+               block = kmem_cache_zalloc(allocator->block_cache,
+                                       GFP_KERNEL);
+               if (!block) {
+                       err = -ENOMEM;
+                       goto clean_up;
+               }
+
+               block->allocator = allocator;
+               block->start = addr;
+               block->end = addr + len;
+
+               insert_block(allocator, block);
+
+               block->nc_prev = nc_prev;
+               if (nc_prev)
+                       nc_prev->nc_next = block;
+               nc_prev = block;
+               block->nc_block = true;
+
+               if (!nc_head)
+                       nc_head = block;
+
+               if (*addr_req == ~0)
+                       *addr_req = addr;
+
+               addr = 0;
+               nc_len -= len;
+               len = nc_len;
+               allocator_dbg(allocator, "remaining length %d", nc_len);
+       }
+
+clean_up:
+       if (err) {
+               while (nc_head) {
+                       unlink_block(allocator, nc_head, nc_head->prev);
+                       nc_prev = nc_head;
+                       nc_head = nc_head->nc_next;
+                       kmem_cache_free(allocator->block_cache, nc_prev);
+               }
+               *pblock = NULL;
+               *addr_req = ~0;
+       } else {
+               *pblock = nc_head;
+       }
+
+       return err;
+}
+
+/* called with rw_sema acquired */
+static int block_free_locked(struct nvhost_allocator *allocator,
+                       u32 addr, u32 len)
+{
+       struct nvhost_alloc_block *block, *prev, *last;
+       u32 end;
+       int err;
+
+       /* no block has block->end > addr, already free */
+       block = find_block_prev(allocator, addr, &prev);
+       if (!block)
+               return 0;
+
+       allocator_dbg(allocator, "first block in free range %d:%d",
+               block->start, block->end);
+
+       end = addr + len;
+       /* not in any block, already free */
+       if (block->start >= end)
+               return 0;
+
+       /* don't touch nc_block in range free */
+       if (addr > block->start && !block->nc_block) {
+               int err = split_block(allocator, block, addr, 0);
+               if (err)
+                       return err;
+               prev = block;
+       }
+
+       last = find_block(allocator, end);
+       if (last && end > last->start && !last->nc_block) {
+
+               allocator_dbg(allocator, "last block in free range %d:%d",
+                       last->start, last->end);
+
+               err = split_block(allocator, last, end, 1);
+               if (err)
+                       return err;
+       }
+
+       block = prev ? prev->next : allocator->block_first;
+
+       allocator_dbg(allocator, "first block for free %d:%d",
+               block->start, block->end);
+
+       /* remove blocks between [addr, addr + len) from rb tree
+          and put them in a list */
+       block = unlink_blocks(allocator, block, prev, end);
+       free_blocks(allocator, block);
+
+       update_free_addr_cache(allocator, NULL, addr, len, true);
+
+       return 0;
+}
+
+/* called with rw_sema acquired */
+static void block_free_list_locked(struct nvhost_allocator *allocator,
+                       struct nvhost_alloc_block *list)
+{
+       struct nvhost_alloc_block *block;
+       u32 len;
+
+       update_free_addr_cache(allocator, NULL,
+                       list->start, list->end - list->start, true);
+
+       while (list) {
+               block = list;
+               unlink_block(allocator, block, block->prev);
+
+               len = block->end - block->start;
+               if (allocator->cached_hole_size < len)
+                       allocator->cached_hole_size = len;
+
+               list = block->nc_next;
+               kmem_cache_free(allocator->block_cache, block);
+       }
+}
+
+/* init allocator struct */
+int nvhost_allocator_init(struct nvhost_allocator *allocator,
+               const char *name, u32 start, u32 len, u32 align)
+{
+       memset(allocator, 0, sizeof(struct nvhost_allocator));
+
+       strncpy(allocator->name, name, 32);
+
+       allocator->block_cache =
+               kmem_cache_create(allocator->name,
+                       sizeof(struct nvhost_alloc_block), 0,
+                       SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
+       if (!allocator->block_cache)
+               return -ENOMEM;
+
+       allocator->rb_root = RB_ROOT;
+
+       allocator->base = start;
+       allocator->limit = start + len;
+       allocator->align = align;
+
+       allocator_dbg(allocator, "%s : base %d, limit %d, align %d",
+               allocator->name, allocator->base,
+               allocator->limit, allocator->align);
+
+       allocator->first_free_addr = allocator->last_free_addr = start;
+       allocator->cached_hole_size = len;
+
+       init_rwsem(&allocator->rw_sema);
+
+       allocator->alloc = nvhost_block_alloc;
+       allocator->alloc_nc = nvhost_block_alloc_nc;
+       allocator->free = nvhost_block_free;
+       allocator->free_nc = nvhost_block_free_nc;
+
+       return 0;
+}
+
+/* destroy allocator, free all remaining blocks if any */
+void nvhost_allocator_destroy(struct nvhost_allocator *allocator)
+{
+       struct nvhost_alloc_block *block, *next;
+       u32 free_count = 0;
+
+       down_write(&allocator->rw_sema);
+
+       for (block = allocator->block_first; block; ) {
+               allocator_dbg(allocator, "free remaining block %d:%d",
+                       block->start, block->end);
+               next = block->next;
+               kmem_cache_free(allocator->block_cache, block);
+               free_count++;
+               block = next;
+       }
+
+       up_write(&allocator->rw_sema);
+
+       /* block_count doesn't match real number of blocks */
+       BUG_ON(free_count != allocator->block_count);
+
+       kmem_cache_destroy(allocator->block_cache);
+
+       memset(allocator, 0, sizeof(struct nvhost_allocator));
+}
+
+/*
+ * *addr != ~0 for fixed address allocation. if *addr == 0, base addr is
+ * returned to caller in *addr.
+ *
+ * contiguous allocation, which allocates one block of
+ * contiguous address.
+*/
+int nvhost_block_alloc(struct nvhost_allocator *allocator, u32 *addr, u32 len)
+{
+       int ret;
+#if defined(ALLOCATOR_DEBUG)
+       struct nvhost_alloc_block *block;
+       bool should_fail = false;
+#endif
+
+       allocator_dbg(allocator, "[in] addr %d, len %d", *addr, len);
+
+       if (*addr + len > allocator->limit || /* check addr range */
+           *addr & (allocator->align - 1) || /* check addr alignment */
+            len == 0)                        /* check len */
+               return -EINVAL;
+
+       len = ALIGN(len, allocator->align);
+       if (!len)
+               return -ENOMEM;
+
+       down_write(&allocator->rw_sema);
+
+#if defined(ALLOCATOR_DEBUG)
+       if (*addr) {
+               for (block = allocator->block_first;
+                    block; block = block->next) {
+                       if (block->end > *addr && block->start < *addr + len) {
+                               should_fail = true;
+                               break;
+                       }
+               }
+       }
+#endif
+
+       ret = block_alloc_single_locked(allocator, addr, len);
+
+#if defined(ALLOCATOR_DEBUG)
+       if (!ret) {
+               bool allocated = false;
+               BUG_ON(should_fail);
+               BUG_ON(*addr < allocator->base);
+               BUG_ON(*addr + len > allocator->limit);
+               for (block = allocator->block_first;
+                    block; block = block->next) {
+                       if (!block->nc_block &&
+                           block->start <= *addr &&
+                           block->end >= *addr + len) {
+                               allocated = true;
+                               break;
+                       }
+               }
+               BUG_ON(!allocated);
+       }
+#endif
+
+       up_write(&allocator->rw_sema);
+
+       allocator_dbg(allocator, "[out] addr %d, len %d", *addr, len);
+
+       return ret;
+}
+
+/*
+ * *addr != ~0 for fixed address allocation. if *addr == 0, base addr is
+ * returned to caller in *addr.
+ *
+ * non-contiguous allocation, which returns a list of blocks with aggregated
+ * size == len. Individual block size must meet alignment requirement.
+ */
+int nvhost_block_alloc_nc(struct nvhost_allocator *allocator, u32 *addr,
+                       u32 len, struct nvhost_alloc_block **pblock)
+{
+       int ret;
+
+       allocator_dbg(allocator, "[in] addr %d, len %d", *addr, len);
+
+       BUG_ON(pblock == NULL);
+       *pblock = NULL;
+
+       if (*addr + len > allocator->limit || /* check addr range */
+           *addr & (allocator->align - 1) || /* check addr alignment */
+            len == 0)                        /* check len */
+               return -EINVAL;
+
+       len = ALIGN(len, allocator->align);
+       if (!len)
+               return -ENOMEM;
+
+       down_write(&allocator->rw_sema);
+
+       ret = block_alloc_list_locked(allocator, addr, len, pblock);
+
+#if defined(ALLOCATOR_DEBUG)
+       if (!ret) {
+               struct nvhost_alloc_block *block = *pblock;
+               BUG_ON(!block);
+               BUG_ON(block->start < allocator->base);
+               while (block->nc_next) {
+                       BUG_ON(block->end > block->nc_next->start);
+                       block = block->nc_next;
+               }
+               BUG_ON(block->end > allocator->limit);
+       }
+#endif
+
+       up_write(&allocator->rw_sema);
+
+       allocator_dbg(allocator, "[out] addr %d, len %d", *addr, len);
+
+       return ret;
+}
+
+/* free all blocks between start and end */
+int nvhost_block_free(struct nvhost_allocator *allocator, u32 addr, u32 len)
+{
+       int ret;
+
+       allocator_dbg(allocator, "[in] addr %d, len %d", addr, len);
+
+       if (addr + len > allocator->limit || /* check addr range */
+           addr < allocator->base ||
+           addr & (allocator->align - 1))   /* check addr alignment */
+               return -EINVAL;
+
+       len = ALIGN(len, allocator->align);
+       if (!len)
+               return -EINVAL;
+
+       down_write(&allocator->rw_sema);
+
+       ret = block_free_locked(allocator, addr, len);
+
+#if defined(ALLOCATOR_DEBUG)
+       if (!ret) {
+               struct nvhost_alloc_block *block;
+               for (block = allocator->block_first;
+                    block; block = block->next) {
+                       if (!block->nc_block)
+                               BUG_ON(block->start >= addr &&
+                                       block->end <= addr + len);
+               }
+       }
+#endif
+       up_write(&allocator->rw_sema);
+
+       allocator_dbg(allocator, "[out] addr %d, len %d", addr, len);
+
+       return ret;
+}
+
+/* free non-contiguous allocation block list */
+void nvhost_block_free_nc(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block)
+{
+       /* nothing to free */
+       if (!block)
+               return;
+
+       down_write(&allocator->rw_sema);
+       block_free_list_locked(allocator, block);
+       up_write(&allocator->rw_sema);
+}
+
+#if defined(ALLOCATOR_DEBUG)
+
+#include <linux/random.h>
+
+/* test suite */
+void nvhost_allocator_test(void)
+{
+       struct nvhost_allocator allocator;
+       struct nvhost_alloc_block *list[5];
+       u32 addr, len;
+       u32 count;
+       int n;
+
+       nvhost_allocator_init(&allocator, "test", 0, 10, 1);
+
+       /* alloc/free a single block in the beginning */
+       addr = 0;
+       nvhost_block_alloc(&allocator, &addr, 2);
+       nvhost_allocator_dump(&allocator);
+       nvhost_block_free(&allocator, addr, 2);
+       nvhost_allocator_dump(&allocator);
+       /* alloc/free a single block in the middle */
+       addr = 4;
+       nvhost_block_alloc(&allocator, &addr, 2);
+       nvhost_allocator_dump(&allocator);
+       nvhost_block_free(&allocator, addr, 2);
+       nvhost_allocator_dump(&allocator);
+       /* alloc/free a single block in the end */
+       addr = 8;
+       nvhost_block_alloc(&allocator, &addr, 2);
+       nvhost_allocator_dump(&allocator);
+       nvhost_block_free(&allocator, addr, 2);
+       nvhost_allocator_dump(&allocator);
+
+       /* allocate contiguous blocks */
+       addr = 0;
+       nvhost_block_alloc(&allocator, &addr, 2);
+       nvhost_allocator_dump(&allocator);
+       addr = 0;
+       nvhost_block_alloc(&allocator, &addr, 4);
+       nvhost_allocator_dump(&allocator);
+       addr = 0;
+       nvhost_block_alloc(&allocator, &addr, 4);
+       nvhost_allocator_dump(&allocator);
+
+       /* no free space */
+       addr = 0;
+       nvhost_block_alloc(&allocator, &addr, 2);
+       nvhost_allocator_dump(&allocator);
+
+       /* free in the end */
+       nvhost_block_free(&allocator, 8, 2);
+       nvhost_allocator_dump(&allocator);
+       /* free in the beginning */
+       nvhost_block_free(&allocator, 0, 2);
+       nvhost_allocator_dump(&allocator);
+       /* free in the middle */
+       nvhost_block_free(&allocator, 4, 2);
+       nvhost_allocator_dump(&allocator);
+
+       /* merge case PPPPAAAANNNN */
+       addr = 4;
+       nvhost_block_alloc(&allocator, &addr, 2);
+       nvhost_allocator_dump(&allocator);
+       /* merge case ....AAAANNNN */
+       addr = 0;
+       nvhost_block_alloc(&allocator, &addr, 2);
+       nvhost_allocator_dump(&allocator);
+       /* merge case PPPPAAAA.... */
+       addr = 8;
+       nvhost_block_alloc(&allocator, &addr, 2);
+       nvhost_allocator_dump(&allocator);
+
+       /* test free across multiple blocks and split */
+       nvhost_block_free(&allocator, 2, 2);
+       nvhost_allocator_dump(&allocator);
+       nvhost_block_free(&allocator, 6, 2);
+       nvhost_allocator_dump(&allocator);
+       nvhost_block_free(&allocator, 1, 8);
+       nvhost_allocator_dump(&allocator);
+
+       /* test non-contiguous allocation */
+       addr = 4;
+       nvhost_block_alloc(&allocator, &addr, 2);
+       nvhost_allocator_dump(&allocator);
+       addr = 0;
+       nvhost_block_alloc_nc(&allocator, &addr, 5, &list[0]);
+       nvhost_allocator_dump(&allocator);
+       nvhost_allocator_dump_nc_list(&allocator, list[0]);
+
+       /* test free a range overlaping non-contiguous blocks */
+       nvhost_block_free(&allocator, 2, 6);
+       nvhost_allocator_dump(&allocator);
+
+       /* test non-contiguous free */
+       nvhost_block_free_nc(&allocator, list[0]);
+       nvhost_allocator_dump(&allocator);
+
+       nvhost_allocator_destroy(&allocator);
+
+       /* random stress test */
+       nvhost_allocator_init(&allocator, "test", 4096, 4096 * 1024, 4096);
+       for (;;) {
+               printk(KERN_DEBUG "alloc tests...\n");
+               for (count = 0; count < 50; count++) {
+                       addr = 0;
+                       len = random32() % (4096 * 1024 / 16);
+                       nvhost_block_alloc(&allocator, &addr, len);
+                       nvhost_allocator_dump(&allocator);
+               }
+
+               printk(KERN_DEBUG "free tests...\n");
+               for (count = 0; count < 30; count++) {
+                       addr = (random32() % (4096 * 1024)) & ~(4096 - 1);
+                       len = random32() % (4096 * 1024 / 16);
+                       nvhost_block_free(&allocator, addr, len);
+                       nvhost_allocator_dump(&allocator);
+               }
+
+               printk(KERN_DEBUG "non-contiguous alloc tests...\n");
+               for (n = 0; n < 5; n++) {
+                       addr = 0;
+                       len = random32() % (4096 * 1024 / 8);
+                       nvhost_block_alloc_nc(&allocator, &addr, len, &list[n]);
+                       nvhost_allocator_dump(&allocator);
+                       nvhost_allocator_dump_nc_list(&allocator, list[n]);
+               }
+
+               printk(KERN_DEBUG "free tests...\n");
+               for (count = 0; count < 10; count++) {
+                       addr = (random32() % (4096 * 1024)) & ~(4096 - 1);
+                       len = random32() % (4096 * 1024 / 16);
+                       nvhost_block_free(&allocator, addr, len);
+                       nvhost_allocator_dump(&allocator);
+               }
+
+               printk(KERN_DEBUG "non-contiguous free tests...\n");
+               for (n = 4; n >= 0; n--) {
+                       nvhost_allocator_dump_nc_list(&allocator, list[n]);
+                       nvhost_block_free_nc(&allocator, list[n]);
+                       nvhost_allocator_dump(&allocator);
+               }
+
+               printk(KERN_DEBUG "fixed addr alloc tests...\n");
+               for (count = 0; count < 10; count++) {
+                       addr = (random32() % (4096 * 1024)) & ~(4096 - 1);
+                       len = random32() % (4096 * 1024 / 32);
+                       nvhost_block_alloc(&allocator, &addr, len);
+                       nvhost_allocator_dump(&allocator);
+               }
+
+               printk(KERN_DEBUG "free tests...\n");
+               for (count = 0; count < 10; count++) {
+                       addr = (random32() % (4096 * 1024)) & ~(4096 - 1);
+                       len = random32() % (4096 * 1024 / 16);
+                       nvhost_block_free(&allocator, addr, len);
+                       nvhost_allocator_dump(&allocator);
+               }
+       }
+       nvhost_allocator_destroy(&allocator);
+}
+
+#endif /* ALLOCATOR_DEBUG */
+
diff --git a/drivers/video/tegra/host/nvhost_allocator.h b/drivers/video/tegra/host/nvhost_allocator.h
new file mode 100644 (file)
index 0000000..af85467
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ * drivers/video/tegra/host/nvhost_allocator.h
+ *
+ * nvhost allocator
+ *
+ * Copyright (c) 2011, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __NVHOST_ALLOCATOR_H__
+#define __NVHOST_ALLOCATOR_H__
+
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/slab.h>
+
+/* #define ALLOCATOR_DEBUG */
+
+struct allocator_block;
+
+/* main struct */
+struct nvhost_allocator {
+
+       char name[32];                  /* name for allocator */
+       struct rb_root rb_root;         /* rb tree root for blocks */
+
+       u32 base;                       /* min value of this linear space */
+       u32 limit;                      /* max value = limit - 1 */
+       u32 align;                      /* alignment size, power of 2 */
+
+       struct nvhost_alloc_block *block_first; /* first block in list */
+       struct nvhost_alloc_block *block_recent; /* last visited block */
+
+       u32 first_free_addr;            /* first free addr,
+                                          non-contigous allocation preferred start,
+                                          in order to pick up small holes */
+       u32 last_free_addr;             /* last free addr,
+                                          contiguous allocation preferred start */
+       u32 cached_hole_size;           /* max free hole size up to last_free_addr */
+       u32 block_count;                /* number of blocks */
+
+       struct rw_semaphore rw_sema;    /* lock */
+       struct kmem_cache *block_cache; /* slab cache */
+
+       int (*alloc)(struct nvhost_allocator *allocator,
+               u32 *addr, u32 len);
+       int (*alloc_nc)(struct nvhost_allocator *allocator,
+               u32 *addr, u32 len,
+               struct nvhost_alloc_block **pblock);
+       int (*free)(struct nvhost_allocator *allocator,
+               u32 addr, u32 len);
+       void (*free_nc)(struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block);
+};
+
+/* a block of linear space range [start, end) */
+struct nvhost_alloc_block {
+       struct nvhost_allocator *allocator;     /* parent allocator */
+       struct rb_node rb;                      /* rb tree node */
+
+       u32 start;                              /* linear space range [start, end) */
+       u32 end;
+
+       void *priv;                             /* backing structure for this linear space block
+                                                  page table, comp tag, etc */
+
+       struct nvhost_alloc_block *prev;        /* prev block with lower address */
+       struct nvhost_alloc_block *next;        /* next block with higher address */
+
+       bool nc_block;
+       struct nvhost_alloc_block *nc_prev;     /* prev block for non-contiguous allocation */
+       struct nvhost_alloc_block *nc_next;     /* next block for non-contiguous allocation */
+};
+
+int nvhost_allocator_init(struct nvhost_allocator *allocator,
+                       const char *name, u32 base, u32 size, u32 align);
+void nvhost_allocator_destroy(struct nvhost_allocator *allocator);
+
+int nvhost_block_alloc(struct nvhost_allocator *allocator,
+                       u32 *addr, u32 len);
+int nvhost_block_alloc_nc(struct nvhost_allocator *allocator,
+                       u32 *addr, u32 len,
+                       struct nvhost_alloc_block **pblock);
+
+int nvhost_block_free(struct nvhost_allocator *allocator,
+                       u32 addr, u32 len);
+void nvhost_block_free_nc(struct nvhost_allocator *allocator,
+                       struct nvhost_alloc_block *block);
+
+#if defined(ALLOCATOR_DEBUG)
+
+#define allocator_dbg(alloctor, format, arg...)                                \
+do {                                                           \
+       if (1)                                                  \
+               printk(KERN_DEBUG "nvhost_allocator (%s) %s: " format "\n", alloctor->name, __func__, ##arg);\
+} while (0)
+
+static inline void
+nvhost_allocator_dump(struct nvhost_allocator *allocator) {
+       struct nvhost_alloc_block *block;
+       u32 count = 0;
+
+       down_read(&allocator->rw_sema);
+       for (block = allocator->block_first; block; block = block->next) {
+               allocator_dbg(allocator, "block %d - %d:%d, nc %d",
+                       count++, block->start, block->end, block->nc_block);
+
+               if (block->prev)
+                       BUG_ON(block->prev->end > block->start);
+               if (block->next)
+                       BUG_ON(block->next->start < block->end);
+       }
+       allocator_dbg(allocator, "tracked count %d, actual count %d",
+               allocator->block_count, count);
+       allocator_dbg(allocator, "first block %d:%d",
+               allocator->block_first ? allocator->block_first->start : -1,
+               allocator->block_first ? allocator->block_first->end : -1);
+       allocator_dbg(allocator, "first free addr %d", allocator->first_free_addr);
+       allocator_dbg(allocator, "last free addr %d", allocator->last_free_addr);
+       allocator_dbg(allocator, "cached hole size %d", allocator->cached_hole_size);
+       up_read(&allocator->rw_sema);
+
+       BUG_ON(count != allocator->block_count);
+}
+
+static inline void
+nvhost_allocator_dump_nc_list(
+               struct nvhost_allocator *allocator,
+               struct nvhost_alloc_block *block)
+{
+       down_read(&allocator->rw_sema);
+       while (block) {
+               printk(KERN_DEBUG "non-contiguous block %d:%d\n",
+                       block->start, block->end);
+               block = block->nc_next;
+       }
+       up_read(&allocator->rw_sema);
+}
+
+void nvhost_allocator_test(void);
+
+#else /* ALLOCATOR_DEBUG */
+
+#define allocator_dbg(format, arg...)
+
+#endif /* ALLOCATOR_DEBUG */
+
+#endif /*__NVHOST_ALLOCATOR_H__ */
diff --git a/drivers/video/tegra/host/nvhost_as.c b/drivers/video/tegra/host/nvhost_as.c
new file mode 100644 (file)
index 0000000..8e37dd2
--- /dev/null
@@ -0,0 +1,404 @@
+/*
+ * drivers/video/tegra/host/nvhost_as.c
+ *
+ * Tegra Host Address Spaces
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+
+#include <linux/nvhost_as_ioctl.h>
+
+#include "dev.h"
+#include "bus_client.h"
+#include "nvhost_hwctx.h"
+#include "nvhost_as.h"
+
+int nvhost_as_dev_open(struct inode *inode, struct file *filp)
+{
+       struct nvhost_as_share *as_share;
+       struct nvhost_channel *ch;
+       int err;
+
+       nvhost_dbg_fn("");
+
+       /* this will come from module, not channel, later */
+       ch = container_of(inode->i_cdev, struct nvhost_channel, as_cdev);
+       if (!ch->as) {
+               nvhost_dbg_fn("no as for the channel!");
+               return -ENOENT;
+       }
+
+       err = nvhost_as_alloc_share(ch, &as_share, true /*fd-attached path*/);
+       if (err) {
+               nvhost_dbg_fn("failed to alloc share");
+               return err;
+       }
+
+       filp->private_data = as_share;
+
+       return 0;
+}
+
+int nvhost_as_dev_release(struct inode *inode, struct file *filp)
+{
+       struct nvhost_as_share *as_share = filp->private_data;
+       nvhost_dbg_fn("");
+
+       return nvhost_as_release_share(as_share, 0/* no hwctx to release */);
+}
+
+long nvhost_as_dev_ctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       int err = 0;
+       struct nvhost_as_share *as_share = filp->private_data;
+       struct nvhost_channel *ch = as_share->ch;
+       struct device *dev = as_share->as_dev;
+
+       u8 buf[NVHOST_AS_IOCTL_MAX_ARG_SIZE];
+
+       if ((_IOC_TYPE(cmd) != NVHOST_AS_IOCTL_MAGIC) ||
+               (_IOC_NR(cmd) == 0) ||
+               (_IOC_NR(cmd) > NVHOST_AS_IOCTL_LAST))
+               return -EFAULT;
+
+       BUG_ON(_IOC_SIZE(cmd) > NVHOST_AS_IOCTL_MAX_ARG_SIZE);
+
+       if (_IOC_DIR(cmd) & _IOC_WRITE) {
+               if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
+                       return -EFAULT;
+       }
+
+       nvhost_module_busy(ch->dev);
+
+       switch (cmd) {
+       case NVHOST_AS_IOCTL_BIND_CHANNEL:
+               err = nvhost_as_ioctl_bind_channel(as_share,
+                              (struct nvhost_as_bind_channel_args *)buf);
+               break;
+       case NVHOST_AS_IOCTL_ALLOC_SPACE:
+               err = nvhost_as_ioctl_alloc_space(as_share,
+                                 (struct nvhost_as_alloc_space_args *)buf);
+               break;
+       case NVHOST_AS_IOCTL_FREE_SPACE:
+               err = nvhost_as_ioctl_free_space(as_share,
+                                      (struct nvhost_as_free_space_args *)buf);
+               break;
+       case NVHOST_AS_IOCTL_MAP_BUFFER:
+               err = nvhost_as_ioctl_map_buffer(as_share,
+                                      (struct nvhost_as_map_buffer_args *)buf);
+               break;
+       case NVHOST_AS_IOCTL_UNMAP_BUFFER:
+               err = nvhost_as_ioctl_unmap_buffer(as_share,
+                              (struct nvhost_as_unmap_buffer_args *)buf);
+               break;
+       default:
+               nvhost_err(dev, "unrecognized aspace ioctl cmd: 0x%x", cmd);
+               err = -ENOTTY;
+               break;
+       }
+
+       nvhost_module_idle(ch->dev);
+
+       if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
+               err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
+
+       return err;
+}
+
+
+int nvhost_as_init_device(struct nvhost_device *dev)
+{
+       struct nvhost_master *host = nvhost_get_host(dev);
+       struct nvhost_chip_support *op = nvhost_get_chip_ops();
+       struct nvhost_channel *ch = dev->channel;
+       struct nvhost_as *as;
+       int err = 0;
+
+       if (!op->as.init)
+               return 0;
+
+       if (!ch) {
+               nvhost_err(&dev->dev, "no channel in nvhost_as_init for %s",
+                          dev->name);
+               return -ENODEV;
+       }
+
+       if (!ch->as) {
+
+               nvhost_dbg_fn("allocating as for %s", dev->name);
+               as = kzalloc(sizeof(*as), GFP_KERNEL);
+               if (!as) {
+                       err = -ENOMEM;
+                       goto failed;
+               }
+               ch->as = as;
+               as->ch = ch;
+
+               mutex_init(&as->share_list_lock);
+               INIT_LIST_HEAD(&as->share_list);
+
+               err = op->as.init(host, as); /* this sets the as.ops (or not) */
+               if (err)
+                       goto failed;
+               if (!as->ops) {
+                       nvhost_dbg_fn("%s doesn't claim as support"
+                                     ", removing...", dev->name);
+                       /* support not available for this module */
+                       /* it isn't an error, just deallocate the as */
+                       kfree(as);
+                       ch->as = 0;
+               }
+       }
+
+       return 0;
+
+ failed:
+       kfree(as);
+       ch->as = 0;
+
+       return err;
+
+}
+
+/* dumb allocator... */
+static int generate_as_share_id(struct nvhost_as *as)
+{
+       nvhost_dbg_fn("");
+       return ++as->last_share_id;
+}
+/* still dumb */
+static void release_as_share_id(struct nvhost_as *as, int id)
+{
+       nvhost_dbg_fn("");
+       return;
+}
+
+int nvhost_as_alloc_share(struct nvhost_channel *ch,
+                         struct nvhost_as_share **_as_share,
+                         bool has_fd)
+{
+       struct nvhost_as *as = ch->as;
+       struct nvhost_as_share *as_share;
+       int err = 0;
+
+       nvhost_dbg_fn("");
+
+       *_as_share = 0;
+       as_share = kzalloc(sizeof(*as_share), GFP_KERNEL);
+       if (!as_share)
+               return -ENOMEM;
+
+       as_share->ch      = ch;
+       as_share->as      = as;
+       as_share->host    = nvhost_get_host(ch->dev);
+       as_share->as_dev  = ch->as_node;
+       as_share->id      = generate_as_share_id(as_share->as);
+
+       /* call module to allocate hw resources */
+       err = as->ops->alloc_share(as_share);
+       if (err)
+               goto failed;
+
+       /* When an fd is attached we'll get a call to release the as when the
+        * process exits (or otherwise closes the fd for the share).
+        * Setting up the ref_cnt in this manner allows for us to properly
+        * handle both that case and when we've created and bound a share
+        * w/o the attached fd.
+        */
+       if (has_fd)
+               as_share->ref_cnt.counter = 1;
+       /* else set at from kzalloc above 0 */
+
+       /* add the share to the set of all shares on the module */
+       mutex_lock(&as->share_list_lock);
+       list_add_tail(&as_share->share_list_node, &as->share_list);
+       mutex_unlock(&as->share_list_lock);
+
+       /* initialize the bound list */
+       mutex_init(&as_share->bound_list_lock);
+       INIT_LIST_HEAD(&as_share->bound_list);
+
+       *_as_share = as_share;
+       return 0;
+
+ failed:
+       kfree(as_share);
+       return err;
+}
+
+/*
+ * hwctxs and the device nodes call this to release.
+ * once the ref_cnt hits zero the share is deleted.
+ * hwctx == 0 when the device node is being released.
+ * otherwise it is a hwctx unbind.
+ */
+int nvhost_as_release_share(struct nvhost_as_share *as_share,
+                            struct nvhost_hwctx *hwctx)
+{
+       int err;
+
+       nvhost_dbg_fn("");
+
+       if (hwctx)
+               hwctx->as_share = 0;
+
+       if (atomic_dec_return(&as_share->ref_cnt) > 0)
+               return 0;
+
+       err = as_share->as->ops->release_share(as_share);
+
+       mutex_lock(&as_share->as->share_list_lock);
+       list_del(&as_share->share_list_node);
+       mutex_unlock(&as_share->as->share_list_lock);
+
+       release_as_share_id(as_share->as, as_share->id);
+
+       kfree(as_share);
+
+       return err;
+}
+
+
+static int bind_share(struct nvhost_as_share *as_share,
+                     struct nvhost_hwctx *hwctx)
+{
+       int err = 0;
+       nvhost_dbg_fn("");
+
+       atomic_inc(&as_share->ref_cnt);
+       err = as_share->as->ops->bind_hwctx(as_share, hwctx);
+       if (err) {
+               atomic_dec(&as_share->ref_cnt);
+               return err;
+       }
+       hwctx->as_share = as_share;
+
+       mutex_lock(&as_share->bound_list_lock);
+       list_add_tail(&hwctx->as_share_bound_list_node, &as_share->bound_list);
+       mutex_unlock(&as_share->bound_list_lock);
+
+       return 0;
+}
+
+/* when clients have not set up a share themselves this
+ * can be called to set up and bind to a new one.
+ * however since they (presumably) have no access to the
+ * address space device node for the module they must
+ * use the channel map/unmap apis (deprecated) to manipulate
+ * the share */
+int nvhost_as_alloc_and_bind_share(struct nvhost_channel *ch,
+                                  struct nvhost_hwctx *hwctx)
+{
+       struct nvhost_as *as = ch->as;
+       struct nvhost_as_share *as_share = 0;
+       int err = 0;
+
+       nvhost_dbg_fn("");
+
+       if (!as)
+               return -ENOENT;
+
+       err = nvhost_as_alloc_share(ch, &as_share, false /*no-fd path*/);
+       if (err)
+               return err;
+
+       err = bind_share(as_share, hwctx);
+       if (err) {
+               nvhost_as_release_share(as_share, hwctx);
+               return err;
+       }
+
+       return 0;
+}
+
+int nvhost_as_ioctl_bind_channel(struct nvhost_as_share *as_share,
+                                struct nvhost_as_bind_channel_args *args)
+{
+       int err = 0;
+       struct nvhost_hwctx *hwctx;
+
+       nvhost_dbg_fn("");
+
+       hwctx = nvhost_channel_get_file_hwctx(args->channel_fd);
+       if (!hwctx || hwctx->as_share)
+               return -EINVAL;
+
+       err = bind_share(as_share, hwctx);
+
+       return err;
+}
+
+int nvhost_as_ioctl_alloc_space(struct nvhost_as_share *as_share,
+                               struct nvhost_as_alloc_space_args *args)
+{
+       nvhost_dbg_fn("");
+       return as_share->as->ops->alloc_space(as_share, args);
+
+}
+
+int nvhost_as_ioctl_free_space(struct nvhost_as_share *as_share,
+                              struct nvhost_as_free_space_args *args)
+{
+       nvhost_dbg_fn("");
+       return as_share->as->ops->free_space(as_share, args);
+}
+
+int nvhost_as_ioctl_map_buffer(struct nvhost_as_share *as_share,
+                              struct nvhost_as_map_buffer_args *args)
+{
+       int err = 0;
+       struct mem_mgr *memmgr;
+       struct mem_handle *r;
+
+       nvhost_dbg_fn("");
+
+       /* note: this bumps up the ref cnt in the nvmap client.
+        * be sure to drop it with put later... we're not
+        * holding onto the nvmap client pointer */
+       memmgr = mem_op().get_mgr_file(args->nvmap_fd);
+
+       if (IS_ERR(memmgr)) {
+               err = PTR_ERR(memmgr);
+               return err;
+       }
+
+       r = mem_op().get(memmgr, args->nvmap_handle);
+       if (!r) {
+               err = -EINVAL;
+               goto finish;
+       }
+
+       err = as_share->as->ops->map_buffer(as_share,
+                                           memmgr,
+                                           r, &args->o_a.align, args->flags);
+       /* args->o_a.offset will be set if !err */
+
+ finish:
+       mem_op().put_mgr(memmgr);
+       return err;
+}
+
+int nvhost_as_ioctl_unmap_buffer(struct nvhost_as_share *as_share,
+                                struct nvhost_as_unmap_buffer_args *args)
+{
+       nvhost_dbg_fn("");
+       return as_share->as->ops->unmap_buffer(as_share, args->offset);
+}
diff --git a/drivers/video/tegra/host/nvhost_as.h b/drivers/video/tegra/host/nvhost_as.h
new file mode 100644 (file)
index 0000000..f5c46d7
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * drivers/video/tegra/host/nvhost_as.h
+ *
+ * Tegra Host Address Space
+ *
+ * Copyright (c) 2011, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __NVHOST_AS_H
+#define __NVHOST_AS_H
+
+#include <linux/atomic.h>
+#include <linux/nvhost_as_ioctl.h>
+
+#include "nvhost_channel.h"
+#include "nvhost_acm.h"
+#include "nvhost_memmgr.h"
+#include "chip_support.h"
+
+struct nvhost_as_share;
+
+struct nvhost_as_moduleops {
+       int (*alloc_share)(struct nvhost_as_share *);
+       int (*release_share)(struct nvhost_as_share *);
+       int (*alloc_space)(struct nvhost_as_share *,
+                          struct nvhost_as_alloc_space_args*);
+       int (*free_space)(struct nvhost_as_share *,
+                         struct nvhost_as_free_space_args*);
+       int (*bind_hwctx)(struct nvhost_as_share *,
+                         struct nvhost_hwctx *);
+       int (*map_buffer)(struct nvhost_as_share *,
+                         struct mem_mgr *memmgr,
+                         struct mem_handle *r,
+                         u64 *offset_align,
+                         u32 flags /*NVHOST_AS_MAP_BUFFER_FLAGS_*/);
+       int (*unmap_buffer)(struct nvhost_as_share *,
+                           u64 offset);
+};
+
+struct nvhost_as_share {
+       struct nvhost_as *as;
+       atomic_t ref_cnt;
+       int id;
+
+       struct nvhost_master *host;
+       struct nvhost_channel *ch;
+       struct device *as_dev;
+
+       struct mutex bound_list_lock;
+       struct list_head bound_list;
+
+       struct list_head share_list_node;
+       void *priv; /* holds pointer to module support for the share */
+};
+
+struct nvhost_as {
+       struct mutex share_list_lock;
+       struct list_head share_list; /* list of all shares */
+       struct nvhost_channel *ch;
+       const struct nvhost_as_moduleops *ops;
+       int last_share_id; /* dummy allocator for now */
+};
+
+
+int nvhost_as_init_device(struct nvhost_device *dev);
+int nvhost_as_alloc_share(struct nvhost_channel *ch,
+                         struct nvhost_as_share **as,
+                         bool has_fd);
+int nvhost_as_alloc_and_bind_share(struct nvhost_channel *ch,
+                                  struct nvhost_hwctx *hwctx);
+int nvhost_as_release_share(struct nvhost_as_share *as_share,
+                           struct nvhost_hwctx *hwctx);
+
+
+int nvhost_as_ioctl_alloc_space(struct nvhost_as_share *as_share,
+                               struct nvhost_as_alloc_space_args *args);
+int nvhost_as_ioctl_free_space(struct nvhost_as_share *as_share,
+                              struct nvhost_as_free_space_args *args);
+int nvhost_as_ioctl_bind_channel(struct nvhost_as_share *as_share,
+                                struct nvhost_as_bind_channel_args *args);
+int nvhost_as_ioctl_map_buffer(struct nvhost_as_share *as_share,
+                              struct nvhost_as_map_buffer_args *args);
+int nvhost_as_ioctl_unmap_buffer(struct nvhost_as_share *as_share,
+                                struct nvhost_as_unmap_buffer_args *args);
+
+/* struct file_operations driver interface */
+int nvhost_as_dev_open(struct inode *inode, struct file *filp);
+int nvhost_as_dev_release(struct inode *inode, struct file *filp);
+long nvhost_as_dev_ctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#endif
index 3aedaef..9a09cd9 100644 (file)
@@ -48,6 +48,13 @@ struct nvhost_channel {
        struct cdev cdev;
        struct nvhost_hwctx_handler *ctxhandler;
        struct nvhost_cdma cdma;
+
+       /* the address space block here
+        * belongs to the module. but for
+        * now just keep it here */
+       struct device *as_node;
+       struct cdev as_cdev;
+       struct nvhost_as *as;
 };
 
 int nvhost_channel_init(struct nvhost_channel *ch,
index ad47f9b..0672571 100644 (file)
@@ -43,6 +43,10 @@ struct nvhost_hwctx {
        u32 save_slots;
 
        u32 restore_incrs;
+       void *priv; /* chip support state */
+
+       struct list_head as_share_bound_list_node;
+       struct nvhost_as_share *as_share;
 };
 
 struct nvhost_hwctx_handler {
index 25871dc..d3fc164 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/err.h>
+#include <linux/bug.h>
 
 #include "nvhost_memmgr.h"
 #ifdef CONFIG_TEGRA_GRHOST_USE_NVMAP
index bfc76ff..397e30d 100644 (file)
@@ -157,6 +157,8 @@ int nvhost_mutex_try_lock(struct nvhost_syncpt *sp, int idx);
 
 void nvhost_mutex_unlock(struct nvhost_syncpt *sp, int idx);
 
+bool nvhost_syncpt_wrapping_comparison(u32 syncpt, u32 threshold);
+
 struct nvhost_sync_timeline *nvhost_syncpt_timeline(struct nvhost_syncpt *sp,
                int idx);
 #endif
index 3f0b4a7..996dff8 100644 (file)
@@ -25,7 +25,6 @@
 #include "nvmap.h"
 #include "nvhost_job.h"
 
-
 struct mem_mgr *nvhost_nvmap_alloc_mgr(void)
 {
        return (struct mem_mgr *)nvmap_create_client(nvmap_dev, "nvhost");
@@ -47,10 +46,10 @@ struct mem_mgr *nvhost_nvmap_get_mgr_file(int fd)
 }
 
 struct mem_handle *nvhost_nvmap_alloc(struct mem_mgr *mgr,
-               size_t size, size_t align, int flags)
+               size_t size, size_t align, int flags, unsigned int heap_mask)
 {
        return (struct mem_handle *)nvmap_alloc((struct nvmap_client *)mgr,
-                       size, align, flags, 0);
+                       size, align, flags, heap_mask);
 }
 
 void nvhost_nvmap_put(struct mem_mgr *mgr, struct mem_handle *handle)
@@ -173,3 +172,28 @@ struct mem_handle *nvhost_nvmap_get(struct mem_mgr *mgr,
                nvmap_duplicate_handle_user_id((struct nvmap_client *)mgr, id);
 }
 
+int nvhost_nvmap_get_param(struct mem_mgr *mgr, struct mem_handle *handle,
+               u32 param, u32 *result)
+{
+       return nvmap_get_handle_param_u32((struct nvmap_client *)mgr,
+                       nvmap_ref_to_handle((struct nvmap_handle_ref *)handle),
+                       param, result);
+}
+
+int nvhost_init_nvmap_support(struct nvhost_chip_support *chip)
+{
+       chip->mem.alloc_mgr = nvhost_nvmap_alloc_mgr;
+       chip->mem.put_mgr = nvhost_nvmap_put_mgr;
+       chip->mem.get_mgr = nvhost_nvmap_get_mgr;
+       chip->mem.get_mgr_file = nvhost_nvmap_get_mgr_file;
+       chip->mem.alloc = nvhost_nvmap_alloc;
+       chip->mem.put = nvhost_nvmap_put;
+       chip->mem.get = nvhost_nvmap_get;
+       chip->mem.pin = nvhost_nvmap_pin;
+       chip->mem.unpin = nvhost_nvmap_unpin;
+       chip->mem.mmap = nvhost_nvmap_mmap;
+       chip->mem.munmap = nvhost_nvmap_munmap;
+       chip->mem.get_param = nvhost_nvmap_get_param;
+
+       return 0;
+}
index 4e1ed7d..8fbf3a7 100644 (file)
@@ -43,6 +43,7 @@ struct mem_mgr;
 #define NVHOST_MODULE_MAX_IORESOURCE_MEM       3
 #define NVHOST_MODULE_NO_POWERGATE_IDS         .powergate_ids = {-1, -1}
 #define NVHOST_DEFAULT_CLOCKGATE_DELAY         .clockgate_delay = 25
+#define NVHOST_MODULE_MAX_IORESOURCE_MEM 3
 #define NVHOST_NAME_SIZE                       24
 #define NVSYNCPT_INVALID                       (-1)
 
@@ -128,9 +129,11 @@ struct nvhost_device_data {
        int             version;        /* ip version number of device */
        int             id;             /* Separates clients of same hw */
        int             index;          /* Hardware channel number */
+       struct resource *reg_mem[NVHOST_MODULE_MAX_IORESOURCE_MEM];
        void __iomem    *aperture[NVHOST_MODULE_MAX_IORESOURCE_MEM];
 
        u32             syncpts[NVHOST_MODULE_MAX_SYNCPTS];
+       u32             syncpt_base;    /* Device sync point base */
        u32             waitbases[NVHOST_MODULE_MAX_WAITBASES];
        u32             modulemutexes[NVHOST_MODULE_MAX_MODMUTEXES];
        u32             moduleid;       /* Module id for user space API */
@@ -154,6 +157,27 @@ struct nvhost_device_data {
        struct list_head client_list;   /* List of clients and rate requests */
 
        struct nvhost_channel *channel; /* Channel assigned for the module */
+
+       void    *priv;
+
+       /* Allocates a context handler for the device */
+       struct nvhost_hwctx_handler *(*alloc_hwctx_handler)(u32 syncpt,
+                       u32 waitbase, struct nvhost_channel *ch);
+       /* Preparing for power off. Used for context save. */
+       int (*prepare_poweroff)(struct nvhost_device *dev);
+       /* Finalize power on. Can be used for context restore. */
+       void (*finalize_poweron)(struct nvhost_device *dev);
+       /* Device is busy. */
+       void (*busy)(struct nvhost_device *);
+       /* Device is idle. */
+       void (*idle)(struct nvhost_device *);
+       /* Device is going to be suspended */
+       void (*suspend)(struct nvhost_device *);
+       /* Device is initialized */
+       void (*init)(struct nvhost_device *dev);
+       /* Device is de-initialized. */
+       void (*deinit)(struct nvhost_device *dev);
+
        struct kobject *power_kobj;     /* kobject to hold power sysfs entries */
        struct nvhost_device_power_attr *power_attrib;  /* sysfs attributes */
        struct dentry *debugfs;         /* debugfs directory */
diff --git a/include/linux/nvhost_as_ioctl.h b/include/linux/nvhost_as_ioctl.h
new file mode 100644 (file)
index 0000000..e0e7e1f
--- /dev/null
@@ -0,0 +1,146 @@
+/*
+ * include/linux/nvhost_as_ioctl.h
+ *
+ * Tegra Host Address Space Driver
+ *
+ * Copyright (c) 2011-2012, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef __LINUX_NVHOST_AS_IOCTL_H
+#define __LINUX_NVHOST_AS_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#if !defined(__KERNEL__)
+#define __user
+#endif
+
+#define NVHOST_AS_IOCTL_MAGIC 'A'
+
+/*
+ * /dev/nvhost-as-* devices
+ *
+ * Opening a '/dev/nvhost-as-<module_name>' device node creates a new address
+ * space.  nvhost channels (for the same module) can then be bound to such an
+ * address space to define the addresses it has access to.
+ *
+ * Once a nvhost channel has been bound to an address space it cannot be
+ * unbound.  There is no support for allowing an nvhost channel to change from
+ * one address space to another (or from one to none).
+ *
+ * As long as there is an open device file to the address space, or any bound
+ * nvhost channels it will be valid.  Once all references to the address space
+ * are removed the address space is deleted.
+ *
+ */
+
+
+/*
+ * Allocating an address space range:
+ *
+ * Address ranges created with this ioctl are reserved for later use with
+ * fixed-address buffer mappings.
+ *
+ * If _FLAGS_FIXED_OFFSET is specified then the new range starts at the 'offset'
+ * given.  Otherwise the address returned is chosen to be a multiple of 'align.'
+ *
+ */
+struct nvhost_as_alloc_space_args {
+       __u32 pages;     /* in, pages */
+       __u32 page_size; /* in, bytes */
+       __u32 flags;     /* in */
+#define NVHOST_AS_ALLOC_SPACE_FLAGS_FIXED_OFFSET 0x1
+       union {
+               __u64 offset; /* inout, byte address valid iff _FIXED_OFFSET */
+               __u64 align;  /* in, alignment multiple (0:={1 or n/a}) */
+       } o_a;
+};
+
+/*
+ * Releasing an address space range:
+ *
+ * The previously allocated region starting at 'offset' is freed.  If there are
+ * any buffers currently mapped inside the region the ioctl will fail.
+ */
+struct nvhost_as_free_space_args {
+       __u64 offset; /* in, byte address */
+};
+
+/*
+ * Binding a nvhost channel to an address space:
+ *
+ * A channel must be bound to an address space before allocating a gpfifo
+ * in nvhost.  The 'channel_fd' given here is the fd used to allocate the
+ * channel.  Once a channel has been bound to an address space it cannot
+ * be unbound (except for when the channel is destroyed).
+ */
+struct nvhost_as_bind_channel_args {
+       __u32 channel_fd; /* in */
+};
+
+/*
+ * Mapping nvmap buffers into an address space:
+ *
+ * The start address is the 'offset' given if _FIXED_OFFSET is specified.
+ * Otherwise the address returned is a multiple of 'align.'
+ *
+ * If 'page_size' is set to 0 the nvmap buffer's allocation alignment/sizing
+ * will be used to determine the page size (largest possible).  The page size
+ * chosen will be returned back to the caller in the 'page_size' parameter in
+ * that case.
+ */
+struct nvhost_as_map_buffer_args {
+       __u32 flags;          /* in/out */
+#define NVHOST_AS_MAP_BUFFER_FLAGS_FIXED_OFFSET 0x1
+       __u32 nvmap_fd;       /* in */
+       __u32 nvmap_handle;   /* in */
+       __u32 page_size;      /* inout, 0:= best fit to buffer */
+       union {
+               __u64 offset; /* inout, byte address valid iff _FIXED_OFFSET */
+               __u64 align;  /* in, alignment multiple (0:={1 or n/a})   */
+       } o_a;
+};
+
+/*
+ * Unmapping a buffer:
+ *
+ * To unmap a previously mapped buffer set 'offset' to the offset returned in
+ * the mapping call.  This includes where a buffer has been mapped into a fixed
+ * offset of a previously allocated address space range.
+ */
+struct nvhost_as_unmap_buffer_args {
+       __u64 offset; /* in, byte address */
+};
+
+#define NVHOST_AS_IOCTL_BIND_CHANNEL \
+       _IOWR(NVHOST_AS_IOCTL_MAGIC, 1, struct nvhost_as_bind_channel_args)
+#define NVHOST_AS_IOCTL_ALLOC_SPACE \
+       _IOWR(NVHOST_AS_IOCTL_MAGIC, 2, struct nvhost_as_alloc_space_args)
+#define NVHOST_AS_IOCTL_FREE_SPACE \
+       _IOWR(NVHOST_AS_IOCTL_MAGIC, 3, struct nvhost_as_free_space_args)
+#define NVHOST_AS_IOCTL_MAP_BUFFER \
+       _IOWR(NVHOST_AS_IOCTL_MAGIC, 4, struct nvhost_as_map_buffer_args)
+#define NVHOST_AS_IOCTL_UNMAP_BUFFER \
+       _IOWR(NVHOST_AS_IOCTL_MAGIC, 5, struct nvhost_as_unmap_buffer_args)
+
+#define NVHOST_AS_IOCTL_LAST           \
+       _IOC_NR(NVHOST_AS_IOCTL_UNMAP_BUFFER)
+#define NVHOST_AS_IOCTL_MAX_ARG_SIZE   \
+       sizeof(struct nvhost_as_map_buffer_args)
+
+
+#endif
index 1c0359d..5e0213b 100644 (file)
@@ -94,6 +94,11 @@ struct nvhost_syncpt_incr {
        __u32 syncpt_incrs;
 };
 
+struct nvhost_gpfifo {
+       __u64 gpu_va;
+       __u32 words;
+};
+
 struct nvhost_get_param_args {
        __u32 value;
 } __packed;
@@ -107,6 +112,82 @@ struct nvhost_set_nvmap_fd_args {
        __u32 fd;
 } __packed;
 
+struct nvhost_alloc_obj_ctx_args {
+       __u32 class_num; /* kepler3d, 2d, compute, etc       */
+       __u32 obj_id;    /* output, used to free later       */
+       __u32 vaspace_share; /*XXX to be removed */
+};
+
+struct nvhost_free_obj_ctx_args {
+       __u32 obj_id; /* obj ctx to free */
+};
+
+struct nvhost_alloc_gpfifo_args {
+       __u32 num_entries;
+#define NVHOST_ALLOC_GPFIFO_FLAGS_VPR_ENABLED  (1 << 0) /* set owner channel of this gpfifo as a vpr channel */
+       __u32 flags;
+
+};
+
+struct nvhost_fence {
+       __u32 syncpt_id; /* syncpoint id */
+       __u32 value;     /* syncpoint value to wait or value for other to wait */
+};
+
+struct nvhost_submit_gpfifo_args {
+       struct nvhost_gpfifo *gpfifo;
+       __u32 num_entries;
+       struct nvhost_fence fence;
+       __u32 flags;
+/* insert a wait on the fance before submitting gpfifo */
+#define NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_WAIT  BIT(0)
+ /* insert an fence update after submitting gpfifo and
+    return the new fence for other to wait on */
+#define NVHOST_SUBMIT_GPFIFO_FLAGS_FENCE_GET   BIT(1)
+};
+
+struct nvhost_map_buffer_args {
+       __u32 flags;
+#define NVHOST_MAP_BUFFER_FLAGS_ALIGN          0x0
+#define NVHOST_MAP_BUFFER_FLAGS_OFFSET         BIT(0)
+#define NVHOST_MAP_BUFFER_FLAGS_KIND_PITCH     0x0
+#define NVHOST_MAP_BUFFER_FLAGS_KIND_SPECIFIED BIT(1)
+#define NVHOST_MAP_BUFFER_FLAGS_CACHABLE_TRUE  0x0
+#define NVHOST_MAP_BUFFER_FLAGS_CACHABLE_FALSE BIT(2)
+       __u32 nvmap_handle;
+       union {
+               __u64 offset; /* valid if _offset flag given (in|out) */
+               __u64 align;  /* alignment multiple (0:={1 or n/a})   */
+       } offset_alignment;
+       __u32 kind;
+#define NVHOST_MAP_BUFFER_KIND_GENERIC_16BX2 0xfe
+};
+
+struct nvhost_unmap_buffer_args {
+       __u64 offset;
+};
+
+struct nvhost_wait_args {
+#define NVHOST_WAIT_TYPE_NOTIFIER      0x0
+#define NVHOST_WAIT_TYPE_SEMAPHORE     0x1
+       __u32 type;
+       __u32 timeout;
+       union {
+               struct {
+                       /* handle and offset for notifier memory */
+                       __u32 nvmap_handle;
+                       __u32 offset;
+               } notifier;
+               struct {
+                       /* handle and offset for semaphore memory */
+                       __u32 nvmap_handle;
+                       __u32 offset;
+                       /* semaphore payload to wait for */
+                       __u32 payload;
+               } semaphore;
+       } condition; /* determined by type field */
+};
+
 struct nvhost_read_3d_reg_args {
        __u32 offset;
        __u32 value;
@@ -143,6 +224,56 @@ struct nvhost_set_priority_args {
        __u32 priority;
 } __packed;
 
+struct nvhost_zcull_get_size_args {
+       __u32 size;
+};
+
+#define NVHOST_ZCULL_MODE_GLOBAL               0
+#define NVHOST_ZCULL_MODE_NO_CTXSW             1
+#define NVHOST_ZCULL_MODE_SEPARATE_BUFFER      2
+#define NVHOST_ZCULL_MODE_PART_OF_REGULAR_BUF  3
+
+struct nvhost_zcull_bind_args {
+       __u64 gpu_va;
+       __u32 mode;
+};
+
+struct nvhost_zcull_get_info_args {
+       __u32 width_align_pixels;
+       __u32 height_align_pixels;
+       __u32 pixel_squares_by_aliquots;
+       __u32 aliquot_total;
+       __u32 region_byte_multiplier;
+       __u32 region_header_size;
+       __u32 subregion_header_size;
+       __u32 subregion_width_align_pixels;
+       __u32 subregion_height_align_pixels;
+       __u32 subregion_count;
+};
+
+#define NVHOST_ZBC_COLOR_VALUE_SIZE    4
+#define NVHOST_ZBC_TYPE_INVALID                0
+#define NVHOST_ZBC_TYPE_COLOR          1
+#define NVHOST_ZBC_TYPE_DEPTH          2
+
+struct nvhost_zbc_set_table_args {
+       __u32 color_ds[NVHOST_ZBC_COLOR_VALUE_SIZE];
+       __u32 color_l2[NVHOST_ZBC_COLOR_VALUE_SIZE];
+       __u32 depth;
+       __u32 format;
+       __u32 type;     /* color or depth */
+};
+
+struct nvhost_zbc_query_table_args {
+       __u32 color_ds[NVHOST_ZBC_COLOR_VALUE_SIZE];
+       __u32 color_l2[NVHOST_ZBC_COLOR_VALUE_SIZE];
+       __u32 depth;
+       __u32 ref_cnt;
+       __u32 format;
+       __u32 type;             /* color or depth */
+       __u32 index_size;       /* [out] size, [in] index */
+};
+
 struct nvhost_ctrl_module_regrdwr_args {
        __u32 id;
        __u32 num_offsets;
@@ -229,9 +360,38 @@ struct nvhost_set_ctxswitch_args {
        _IOWR(NVHOST_IOCTL_MAGIC, 23, struct nvhost_get_param_arg)
 #define NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH     \
        _IOWR(NVHOST_IOCTL_MAGIC, 25, struct nvhost_set_ctxswitch_args)
+
+/* START of T124 IOCTLS */
+#define NVHOST_IOCTL_CHANNEL_ALLOC_GPFIFO      \
+       _IOW(NVHOST_IOCTL_MAGIC,  100, struct nvhost_alloc_gpfifo_args)
+#define NVHOST_IOCTL_CHANNEL_SUBMIT_GPFIFO     \
+       _IOWR(NVHOST_IOCTL_MAGIC, 101, struct nvhost_submit_gpfifo_args)
+#define NVHOST_IOCTL_CHANNEL_WAIT              \
+       _IOWR(NVHOST_IOCTL_MAGIC, 102, struct nvhost_wait_args)
+#define NVHOST_IOCTL_CHANNEL_ZCULL_BIND                \
+       _IOWR(NVHOST_IOCTL_MAGIC, 103, struct nvhost_zcull_bind_args)
+#define NVHOST_IOCTL_CHANNEL_ALLOC_OBJ_CTX     \
+       _IOWR(NVHOST_IOCTL_MAGIC, 104, struct nvhost_alloc_obj_ctx_args)
+#define NVHOST_IOCTL_CHANNEL_FREE_OBJ_CTX      \
+       _IOR(NVHOST_IOCTL_MAGIC,  105, struct nvhost_free_obj_ctx_args)
+
+#define NVHOST_IOCTL_CHANNEL_MAP_BUFFER        \
+       _IOWR(NVHOST_IOCTL_MAGIC, 118, struct nvhost_map_buffer_args)
+#define NVHOST_IOCTL_CHANNEL_UNMAP_BUFFER \
+       _IOWR(NVHOST_IOCTL_MAGIC, 119, struct nvhost_unmap_buffer_args)
+#define NVHOST_IOCTL_CHANNEL_ZCULL_GET_SIZE    \
+       _IOWR(NVHOST_IOCTL_MAGIC, 123, struct nvhost_zcull_get_size_args)
+#define NVHOST_IOCTL_CHANNEL_ZCULL_GET_INFO    \
+       _IOWR(NVHOST_IOCTL_MAGIC, 125, struct nvhost_zcull_get_info_args)
+#define NVHOST_IOCTL_CHANNEL_ZBC_SET_TABLE     \
+       _IOWR(NVHOST_IOCTL_MAGIC, 126, struct nvhost_zbc_set_table_args)
+#define NVHOST_IOCTL_CHANNEL_ZBC_QUERY_TABLE   \
+       _IOWR(NVHOST_IOCTL_MAGIC, 127, struct nvhost_zbc_query_table_args)
+
 #define NVHOST_IOCTL_CHANNEL_LAST              \
-       _IOC_NR(NVHOST_IOCTL_CHANNEL_SET_CTXSWITCH)
-#define NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE sizeof(struct nvhost_submit_args)
+       _IOC_NR(NVHOST_IOCTL_CHANNEL_ZBC_QUERY_TABLE)
+
+#define NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE sizeof(struct nvhost_zbc_query_table_args)
 
 struct nvhost_ctrl_syncpt_read_args {
        __u32 id;
@@ -292,6 +452,8 @@ enum nvhost_module_id {
        NVHOST_MODULE_MPE,
        NVHOST_MODULE_MSENC,
        NVHOST_MODULE_TSEC,
+       NVHOST_MODULE_GPU,
+       NVHOST_MODULE_VIC,
 };
 
 #define NVHOST_IOCTL_CTRL_SYNCPT_READ          \