#include "nvhost_intr.h"
#include "t114/t114.h"
#include "t148/t148.h"
+#include "t124/t124.h"
#define TSEC_IDLE_TIMEOUT_DEFAULT 10000 /* 10 milliseconds */
#define TSEC_IDLE_CHECK_PERIOD 10 /* 10 usec */
/* allocate pages for ucode */
m->mem_r = nvhost_memmgr_alloc(nvhost_get_host(dev)->memmgr,
roundup(ucode_fw->size+256, PAGE_SIZE),
- PAGE_SIZE, mem_mgr_flag_uncacheable);
+ PAGE_SIZE, mem_mgr_flag_uncacheable, 0);
if (IS_ERR(m->mem_r)) {
dev_err(&dev->dev, "nvmap alloc failed");
err = PTR_ERR(m->mem_r);
goto clean_up;
}
- m->pa = nvhost_memmgr_pin(nvhost_get_host(dev)->memmgr, m->mem_r);
+ m->pa = nvhost_memmgr_pin(nvhost_get_host(dev)->memmgr, m->mem_r,
+ &dev->dev);
if (IS_ERR(m->pa)) {
dev_err(&dev->dev, "nvmap pin failed for ucode");
err = PTR_ERR(m->pa);
clean_up:
if (m->mapped) {
- mem_op().munmap(m->mem_r, m->mapped);
+ nvhost_memmgr_munmap(m->mem_r, m->mapped);
m->mapped = NULL;
}
if (m->pa) {
- mem_op().unpin(nvhost_get_host(dev)->memmgr, m->mem_r, m->pa);
+ nvhost_memmgr_unpin(nvhost_get_host(dev)->memmgr, m->mem_r,
+ &dev->dev, m->pa);
m->pa = NULL;
}
if (m->mem_r) {
- mem_op().put(nvhost_get_host(dev)->memmgr, m->mem_r);
+ nvhost_memmgr_put(nvhost_get_host(dev)->memmgr, m->mem_r);
m->mem_r = NULL;
}
release_firmware(ucode_fw);
return err;
}
-void nvhost_tsec_init(struct platform_device *dev)
+int nvhost_tsec_init(struct platform_device *dev)
{
int err = 0;
struct tsec *m;
fw_name = tsec_get_fw_name(dev);
if (!fw_name) {
dev_err(&dev->dev, "couldn't determine firmware name");
- return;
+ return -EINVAL;
}
m = kzalloc(sizeof(struct tsec), GFP_KERNEL);
if (!m) {
dev_err(&dev->dev, "couldn't alloc ucode");
kfree(fw_name);
- return;
+ return -ENOMEM;
}
set_tsec(dev, m);
nvhost_module_busy(dev);
- tsec_boot(dev);
+ err = tsec_boot(dev);
+ if (err)
+ goto clean_up;
+
enable_tsec_irq(dev);
nvhost_module_idle(dev);
- return;
+ return 0;
clean_up:
dev_err(&dev->dev, "failed");
+ return err;
}
void nvhost_tsec_deinit(struct platform_device *dev)
/* unpin, free ucode memory */
if (m->mem_r) {
if (m->mapped)
- mem_op().munmap(m->mem_r, m->mapped);
+ nvhost_memmgr_munmap(m->mem_r, m->mapped);
if (m->pa)
- mem_op().unpin(nvhost_get_host(dev)->memmgr, m->mem_r,
- m->pa);
+ nvhost_memmgr_unpin(nvhost_get_host(dev)->memmgr,
+ m->mem_r, &dev->dev, m->pa);
if (m->mem_r)
- mem_op().put(nvhost_get_host(dev)->memmgr, m->mem_r);
+ nvhost_memmgr_put(nvhost_get_host(dev)->memmgr,
+ m->mem_r);
m->mem_r = 0;
}
+ kfree(m);
+ set_tsec(dev, NULL);
}
void nvhost_tsec_finalize_poweron(struct platform_device *dev)
}
static struct of_device_id tegra_tsec_of_match[] = {
+#ifdef TEGRA_11X_OR_HIGHER_CONFIG
{ .compatible = "nvidia,tegra114-tsec",
.data = (struct nvhost_device_data *)&t11_tsec_info },
+#endif
+#ifdef TEGRA_14X_OR_HIGHER_CONFIG
{ .compatible = "nvidia,tegra148-tsec",
.data = (struct nvhost_device_data *)&t14_tsec_info },
+#endif
+#ifdef TEGRA_12X_OR_HIGHER_CONFIG
+ { .compatible = "nvidia,tegra124-tsec",
+ .data = (struct nvhost_device_data *)&t124_tsec_info },
+#endif
{ },
};
tsec = dev;
+#ifdef CONFIG_PM_GENERIC_DOMAINS
+ tegra_pd_add_device(&dev->dev);
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+ if (pdata->clockgate_delay) {
+ pm_runtime_set_autosuspend_delay(&dev->dev,
+ pdata->clockgate_delay);
+ pm_runtime_use_autosuspend(&dev->dev);
+ }
+ pm_runtime_enable(&dev->dev);
+ pm_runtime_get_sync(&dev->dev);
+#else
+ nvhost_module_enable_clk(&dev->dev);
+#endif
+
err = nvhost_client_device_init(dev);
if (err)
return err;
- nvhost_module_busy(to_platform_device(dev->dev.parent));
-
/* Reset TSEC at boot-up. Otherwise it starts sending interrupts. */
- clk_prepare_enable(pdata->clk[0]);
tegra_periph_reset_assert(pdata->clk[0]);
udelay(10);
tegra_periph_reset_deassert(pdata->clk[0]);
- clk_disable_unprepare(pdata->clk[0]);
- tegra_pd_add_device(&tegra_mc_chain_b, &dev->dev);
- pm_runtime_use_autosuspend(&dev->dev);
- pm_runtime_set_autosuspend_delay(&dev->dev, pdata->clockgate_delay);
- pm_runtime_enable(&dev->dev);
+#ifdef CONFIG_PM_RUNTIME
+ if (pdata->clockgate_delay)
+ pm_runtime_put_sync_autosuspend(&dev->dev);
+ else
+ pm_runtime_put(&dev->dev);
+#endif
- nvhost_module_idle(to_platform_device(dev->dev.parent));
return err;
}
{
struct nvhost_master *host = nvhost_get_host(dev);
- /* Add clean-up */
host->intr.generic_isr[20] = NULL;
host->intr.generic_isr_thread[20] = NULL;
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int tsec_suspend(struct device *dev)
-{
- return nvhost_client_device_suspend(to_platform_device(dev));
-}
-
-static int tsec_resume(struct device *dev)
-{
- dev_info(dev, "resuming\n");
- return 0;
-}
#ifdef CONFIG_PM_RUNTIME
-static int tsec_runtime_suspend(struct device *dev)
-{
- return nvhost_module_disable_clk(to_platform_device(dev));
-}
+ pm_runtime_put(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+#else
+ nvhost_module_disable_clk(&dev->dev);
+#endif
-static int tsec_runtime_resume(struct device *dev)
-{
- return nvhost_module_enable_clk(to_platform_device(dev));
+ return 0;
}
-#endif /* CONFIG_PM_RUNTIME */
+#ifdef CONFIG_PM
static const struct dev_pm_ops tsec_pm_ops = {
- .suspend = tsec_suspend,
- .resume = tsec_resume,
+ .suspend = nvhost_client_device_suspend,
+ .resume = nvhost_client_device_resume,
#ifdef CONFIG_PM_RUNTIME
- .runtime_suspend = tsec_runtime_suspend,
- .runtime_resume = tsec_runtime_resume,
-#endif /* CONFIG_PM_RUNTIME */
+ .runtime_suspend = nvhost_module_disable_clk,
+ .runtime_resume = nvhost_module_enable_clk,
+#endif
};
-
-#define TSEC_PM_OPS (&tsec_pm_ops)
-
-#else
-
-#define TSEC_PM_OPS NULL
-
#endif
static struct platform_driver tsec_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "tsec",
- .pm = TSEC_PM_OPS,
+#ifdef CONFIG_PM
+ .pm = &tsec_pm_ops,
+#endif
#ifdef CONFIG_OF
.of_match_table = tegra_tsec_of_match,
#endif