*
* Tegra Graphics Host Automatic Clock Management
*
- * Copyright (c) 2010-2013, NVIDIA Corporation. All rights reserved.
+ * Copyright (c) 2010-2014, NVIDIA Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
#include <linux/tegra-powergate.h>
#include <linux/tegra-soc.h>
#include <trace/events/nvhost.h>
+#include <linux/platform_data/tegra_edp.h>
+#include <linux/tegra_pm_domains.h>
+#include <linux/nvhost_ioctl.h>
-#include <mach/mc.h>
-#include <mach/pm_domains.h>
+#include <tegra/mc.h>
#include "nvhost_acm.h"
+#include "nvhost_channel.h"
#include "dev.h"
#include "bus_client.h"
#define MAX_DEVID_LENGTH 16
#ifdef CONFIG_PM_GENERIC_DOMAINS
+static int nvhost_module_suspend(struct device *dev);
+static int nvhost_module_resume(struct device *dev);
static int nvhost_module_power_on(struct generic_pm_domain *domain);
static int nvhost_module_power_off(struct generic_pm_domain *domain);
+static int nvhost_module_prepare_poweroff(struct device *dev);
+static int nvhost_module_finalize_poweron(struct device *dev);
#endif
DEFINE_MUTEX(client_list_lock);
struct nvhost_module_client {
struct list_head node;
- unsigned long rate[NVHOST_MODULE_MAX_CLOCKS];
+ unsigned long constraint[NVHOST_MODULE_MAX_CLOCKS];
+ unsigned long type[NVHOST_MODULE_MAX_CLOCKS];
void *priv;
};
+#ifdef CONFIG_ARCH_TEGRA
static void do_powergate_locked(int id)
{
nvhost_dbg_fn("%d", id);
}
}
-void nvhost_module_reset(struct platform_device *dev)
+static unsigned long nvhost_emc_bw_to_freq_req(unsigned long rate)
+{
+ return tegra_emc_bw_to_freq_req((unsigned long)(rate >> 10));
+}
+#else
+static void do_powergate_locked(int id)
+{
+ nvhost_dbg_fn("%d", id);
+}
+
+static void do_unpowergate_locked(int id)
+{
+ nvhost_dbg_fn("");
+}
+
+static void do_module_reset_locked(struct platform_device *dev)
+{
+ struct nvhost_device_data *pdata = platform_get_drvdata(dev);
+
+ nvhost_dbg_fn("");
+
+ if (pdata->reset) {
+ pdata->reset(dev);
+ return;
+ }
+}
+static unsigned long nvhost_emc_bw_to_freq_req(unsigned long rate)
+{
+ return 0;
+}
+#endif
+
+void nvhost_module_reset(struct platform_device *dev, bool reboot)
{
struct nvhost_device_data *pdata = platform_get_drvdata(dev);
do_module_reset_locked(dev);
mutex_unlock(&pdata->lock);
- if (pdata->finalize_poweron)
+ if (reboot && pdata->finalize_poweron)
pdata->finalize_poweron(dev);
dev_dbg(&dev->dev, "%s: module %s out of reset\n",
__func__, dev_name(&dev->dev));
}
-void nvhost_module_busy(struct platform_device *dev)
+void nvhost_module_busy_noresume(struct platform_device *dev)
+{
+ if (dev->dev.parent && (dev->dev.parent != &platform_bus))
+ nvhost_module_busy_noresume(nvhost_get_parent(dev));
+
+#ifdef CONFIG_PM_RUNTIME
+ pm_runtime_get_noresume(&dev->dev);
+#endif
+}
+
+int nvhost_module_busy(struct platform_device *dev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(dev);
+ int ret = 0;
/* Explicitly turn on the host1x clocks
* - This is needed as host1x driver sets ignore_children = true
* - The code below fixes this use-case
*/
if (dev->dev.parent && (dev->dev.parent != &platform_bus))
- nvhost_module_busy(nvhost_get_parent(dev));
+ ret = nvhost_module_busy(nvhost_get_parent(dev));
+
+ if (ret)
+ return ret;
#ifdef CONFIG_PM_RUNTIME
- pm_runtime_get_sync(&dev->dev);
+ ret = pm_runtime_get_sync(&dev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&dev->dev);
+ if (dev->dev.parent && (dev->dev.parent != &platform_bus))
+ nvhost_module_idle(nvhost_get_parent(dev));
+ nvhost_err(&dev->dev, "failed to power on, err %d", ret);
+ return ret;
+ }
#endif
if (pdata->busy)
pdata->busy(dev);
+
+ return 0;
}
void nvhost_module_disable_poweroff(struct platform_device *dev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(dev);
- dev_pm_qos_add_request(&dev->dev, &pdata->no_poweroff_req,
- DEV_PM_QOS_FLAGS, PM_QOS_FLAG_NO_POWER_OFF);
+ if (!dev_pm_qos_request_active(&pdata->no_poweroff_req))
+ dev_pm_qos_add_request(&dev->dev, &pdata->no_poweroff_req,
+ DEV_PM_QOS_FLAGS, PM_QOS_FLAG_NO_POWER_OFF);
}
void nvhost_module_enable_poweroff(struct platform_device *dev)
{
struct nvhost_device_data *pdata = platform_get_drvdata(dev);
- dev_pm_qos_remove_request(&pdata->no_poweroff_req);
+ if (dev_pm_qos_request_active(&pdata->no_poweroff_req))
+ dev_pm_qos_remove_request(&pdata->no_poweroff_req);
}
void nvhost_module_idle_mult(struct platform_device *dev, int refs)
struct nvhost_device_data *pdata = platform_get_drvdata(dev);
#ifdef CONFIG_PM_RUNTIME
- if (atomic_read(&dev->dev.power.usage_count) == refs) {
+ /* call idle callback only if the device is turned on. */
+ if (atomic_read(&dev->dev.power.usage_count) == refs &&
+ pm_runtime_active(&dev->dev)) {
if (pdata->idle)
pdata->idle(dev);
}
{
struct clk *c;
struct nvhost_device_data *pdata = platform_get_drvdata(dev);
+ int err = 0;
c = pdata->clk[index];
if (!c)
return -EINVAL;
/* Need to enable client to get correct rate */
- nvhost_module_busy(dev);
+ err = nvhost_module_busy(dev);
+ if (err)
+ return err;
+
*rate = clk_get_rate(c);
nvhost_module_idle(dev);
return 0;
static int nvhost_module_update_rate(struct platform_device *dev, int index)
{
+ struct nvhost_device_data *pdata = platform_get_drvdata(dev);
+ unsigned long bw_constraint = 0, floor_rate = 0, pixelrate = 0;
unsigned long rate = 0;
struct nvhost_module_client *m;
- unsigned long devfreq_rate, default_rate;
- struct nvhost_device_data *pdata = platform_get_drvdata(dev);
int ret;
if (!pdata->clk[index])
return -EINVAL;
- /* If devfreq is on, use that clock rate, otherwise default */
- devfreq_rate = pdata->clocks[index].devfreq_rate;
- default_rate = devfreq_rate ?
- devfreq_rate : pdata->clocks[index].default_rate;
- default_rate = clk_round_rate(pdata->clk[index], default_rate);
-
+ /* aggregate client constraints */
list_for_each_entry(m, &pdata->client_list, node) {
- unsigned long r = m->rate[index];
- if (!r)
- r = default_rate;
- rate = max(r, rate);
+ unsigned long constraint = m->constraint[index];
+ unsigned long type = m->type[index];
+
+ if (!constraint)
+ continue;
+
+ if (type == NVHOST_BW)
+ bw_constraint += constraint;
+ if (type == NVHOST_PIXELRATE)
+ pixelrate += constraint;
+ else
+ floor_rate = max(floor_rate, constraint);
+ }
+
+ /* use client specific aggregation if available */
+ if (pdata->aggregate_constraints)
+ rate = pdata->aggregate_constraints(dev, index, floor_rate,
+ pixelrate, bw_constraint);
+
+ /* if frequency is not available, use default policy */
+ if (!rate) {
+ unsigned long bw_rate = nvhost_emc_bw_to_freq_req(rate);
+ rate = max(floor_rate, bw_rate);
}
+
+ /* take devfreq rate into account */
+ rate = max(rate, pdata->clocks[index].devfreq_rate);
+
+ /* if we still don't have any rate, use default */
if (!rate)
- rate = default_rate;
+ rate = pdata->clocks[index].default_rate;
- trace_nvhost_module_update_rate(dev->name,
- pdata->clocks[index].name, rate);
+ trace_nvhost_module_update_rate(dev->name, pdata->clocks[index].name,
+ rate);
ret = clk_set_rate(pdata->clk[index], rate);
- if (pdata->update_clk)
- pdata->update_clk(dev);
-
return ret;
}
int nvhost_module_set_rate(struct platform_device *dev, void *priv,
- unsigned long rate, int index, int bBW)
+ unsigned long constraint, int index, unsigned long type)
{
struct nvhost_module_client *m;
int ret = 0;
mutex_lock(&client_list_lock);
list_for_each_entry(m, &pdata->client_list, node) {
if (m->priv == priv) {
- if (bBW) {
- /*
- * If client sets BW, then we need to
- * convert it to freq.
- * rate is Bps and input param of
- * tegra_emc_bw_to_freq_req is KBps.
- */
- unsigned int freq_khz =
- tegra_emc_bw_to_freq_req
- ((unsigned long)(rate >> 10));
-
- m->rate[index] =
- clk_round_rate(pdata->clk[index],
- (unsigned long)(freq_khz << 10));
- } else
- m->rate[index] =
- clk_round_rate(pdata->clk[index], rate);
+ m->constraint[index] = constraint;
+ m->type[index] = type;
}
}
return ret;
}
+int nvhost_clk_get(struct platform_device *dev, char *name, struct clk **clk)
+{
+ int i;
+ struct nvhost_device_data *pdata = platform_get_drvdata(dev);
+
+ for (i = 0; i < pdata->num_clks; i++) {
+ if (strcmp(pdata->clocks[i].name, name) == 0) {
+ *clk = pdata->clk[i];
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
int nvhost_module_init(struct platform_device *dev)
{
int i = 0, err = 0;
for (i = 0; i < pdata->num_clks; ++i)
clk_disable_unprepare(pdata->clk[i]);
+ /* Disable railgating if pm runtime is not available */
+ pdata->can_powergate = IS_ENABLED(CONFIG_PM_RUNTIME) &&
+ IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS) &&
+ pdata->can_powergate;
+
/* power gate units that we can power gate */
if (pdata->can_powergate) {
do_powergate_locked(pdata->powergate_ids[0]);
}
EXPORT_SYMBOL(nvhost_module_init);
-int nvhost_module_suspend(struct device *dev)
-{
- struct nvhost_device_data *pdata = dev_get_drvdata(dev);
-
- if (pm_runtime_suspended(dev))
- return 0;
-
- if (pm_runtime_barrier(dev))
- return -EBUSY;
-
- if (pdata->suspend_ndev)
- pdata->suspend_ndev(dev);
-
- return 0;
-}
-
-int nvhost_module_resume(struct device *dev)
-{
- struct nvhost_device_data *pdata = dev_get_drvdata(dev);
-
- if (!pdata->can_powergate && pdata->finalize_poweron) {
- nvhost_module_enable_clk(dev);
- pdata->finalize_poweron(to_platform_device(dev));
- nvhost_module_disable_clk(dev);
- }
-
- return 0;
-}
-
void nvhost_module_deinit(struct platform_device *dev)
{
int i;
struct kobj_attribute *attr = NULL;
struct nvhost_device_data *pdata = platform_get_drvdata(dev);
- nvhost_module_suspend(&dev->dev);
+ devfreq_suspend_device(pdata->power_manager);
+
+ if (pdata->prepare_poweroff)
+ pdata->prepare_poweroff(dev);
+
+ if (!pm_runtime_enabled(&dev->dev))
+ nvhost_module_disable_clk(&dev->dev);
+ else
+ pm_runtime_disable(&dev->dev);
+
for (i = 0; i < pdata->num_clks; i++)
clk_put(pdata->clk[i]);
kobject_put(pdata->power_kobj);
}
-
}
-#ifdef CONFIG_PM
const struct dev_pm_ops nvhost_module_pm_ops = {
#if defined(CONFIG_PM_RUNTIME) && !defined(CONFIG_PM_GENERIC_DOMAINS)
.runtime_suspend = nvhost_module_disable_clk,
.runtime_resume = nvhost_module_enable_clk,
#endif
};
-#endif
+EXPORT_SYMBOL(nvhost_module_pm_ops);
-/* common runtime pm and power domain APIs */
-int nvhost_module_add_domain(struct generic_pm_domain *domain,
- struct platform_device *pdev)
+/*FIXME Use API to get host1x domain */
+struct generic_pm_domain *host1x_domain;
+
+int _nvhost_module_add_domain(struct generic_pm_domain *domain,
+ struct platform_device *pdev, bool client)
{
int ret = 0;
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS
struct nvhost_device_data *pdata;
struct dev_power_governor *pm_domain_gov = NULL;
return -EINVAL;
if (!pdata->can_powergate)
-#ifdef CONFIG_PM_GENERIC_DOMAINS
pm_domain_gov = &pm_domain_always_on_gov;
if (__pm_genpd_name_add_device(domain->name, &pdev->dev, NULL)) {
domain->dev_ops.stop = nvhost_module_disable_clk;
domain->dev_ops.save_state = nvhost_module_prepare_poweroff;
domain->dev_ops.restore_state = nvhost_module_finalize_poweron;
- /* overwrite save/restore fptrs set by pm_genpd_init */
- domain->domain.ops.suspend = nvhost_client_device_suspend;
- domain->domain.ops.resume = nvhost_client_device_resume;
+ if (client) {
+ domain->dev_ops.suspend = nvhost_module_suspend;
+ domain->dev_ops.resume = nvhost_module_resume;
+ }
+ /* Set only host1x as wakeup capable */
+ device_set_wakeup_capable(&pdev->dev, !client);
ret = pm_genpd_add_device(domain, &pdev->dev);
if (pdata->powergate_delay)
pm_genpd_set_poweroff_delay(domain,
pdata->powergate_delay);
- tegra_pd_add_sd(domain);
+ if (client)
+ pm_genpd_add_subdomain(host1x_domain, domain);
+ else {
+ tegra_pd_add_sd(domain);
+ host1x_domain = domain;
+ }
}
#endif
return ret;
}
+
+void nvhost_register_client_domain(struct generic_pm_domain *domain)
+{
+ pm_genpd_add_subdomain(host1x_domain, domain);
+}
+EXPORT_SYMBOL(nvhost_register_client_domain);
+
+/* common runtime pm and power domain APIs */
+int nvhost_module_add_domain(struct generic_pm_domain *domain,
+ struct platform_device *pdev)
+{
+ if (!strcmp(domain->name, "tegra-host1x"))
+ return _nvhost_module_add_domain(domain, pdev, 0);
+ else
+ return _nvhost_module_add_domain(domain, pdev, 1);
+}
EXPORT_SYMBOL(nvhost_module_add_domain);
int nvhost_module_enable_clk(struct device *dev)
if (!pdata)
return -EINVAL;
+ for (index = 0; index < pdata->num_channels; index++)
+ if (pdata->channels[index])
+ nvhost_channel_suspend(pdata->channels[index]);
+
for (index = 0; index < pdata->num_clks; index++)
clk_disable_unprepare(pdata->clk[index]);
EXPORT_SYMBOL(nvhost_module_disable_clk);
#ifdef CONFIG_PM_GENERIC_DOMAINS
+static int nvhost_module_suspend(struct device *dev)
+{
+ struct nvhost_device_data *pdata = dev_get_drvdata(dev);
+
+ /*
+ * device_prepare takes one ref, so expect usage count to
+ * be 1 at this point.
+ */
+ if (atomic_read(&dev->power.usage_count) > 1)
+ return -EBUSY;
+
+ devfreq_suspend_device(pdata->power_manager);
+
+ if (pdata->prepare_poweroff)
+ pdata->prepare_poweroff(to_platform_device(dev));
+
+ return 0;
+}
+
+static int nvhost_module_resume(struct device *dev)
+{
+ struct nvhost_device_data *pdata = dev_get_drvdata(dev);
+
+ if (pdata->finalize_poweron)
+ pdata->finalize_poweron(to_platform_device(dev));
+
+ devfreq_resume_device(pdata->power_manager);
+
+ return 0;
+}
+
static int nvhost_module_power_on(struct generic_pm_domain *domain)
{
struct nvhost_device_data *pdata;
do_unpowergate_locked(pdata->powergate_ids[1]);
}
- if (pdata->powerup_reset)
- do_module_reset_locked(pdata->pdev);
mutex_unlock(&pdata->lock);
return 0;
return 0;
}
-int nvhost_module_prepare_poweroff(struct device *dev)
+static int nvhost_module_prepare_poweroff(struct device *dev)
{
struct nvhost_device_data *pdata;
if (!pdata)
return -EINVAL;
+ devfreq_suspend_device(pdata->power_manager);
+
if (pdata->prepare_poweroff)
pdata->prepare_poweroff(to_platform_device(dev));
return 0;
}
-int nvhost_module_finalize_poweron(struct device *dev)
+static int nvhost_module_finalize_poweron(struct device *dev)
{
struct nvhost_device_data *pdata;
+ int ret = 0;
pdata = dev_get_drvdata(dev);
if (!pdata)
return -EINVAL;
if (pdata->finalize_poweron)
- pdata->finalize_poweron(to_platform_device(dev));
+ ret = pdata->finalize_poweron(to_platform_device(dev));
- return 0;
+ devfreq_resume_device(pdata->power_manager);
+
+ return ret;
}
#endif
/* public host1x power management APIs */
bool nvhost_module_powered_ext(struct platform_device *dev)
{
- struct platform_device *pdev;
-
- if (!nvhost_get_parent(dev)) {
- dev_err(&dev->dev, "Module powered called with wrong dev\n");
- return 0;
- }
-
- /* get the parent */
- pdev = to_platform_device(dev->dev.parent);
-
- return nvhost_module_powered(pdev);
+ if (dev->dev.parent && dev->dev.parent != &platform_bus)
+ dev = to_platform_device(dev->dev.parent);
+ return nvhost_module_powered(dev);
}
+EXPORT_SYMBOL(nvhost_module_powered_ext);
-void nvhost_module_busy_ext(struct platform_device *dev)
+int nvhost_module_busy_ext(struct platform_device *dev)
{
- struct platform_device *pdev;
-
- if (!nvhost_get_parent(dev)) {
- dev_err(&dev->dev, "Module busy called with wrong dev\n");
- return;
- }
-
- /* get the parent */
- pdev = to_platform_device(dev->dev.parent);
-
- nvhost_module_busy(pdev);
+ if (dev->dev.parent && dev->dev.parent != &platform_bus)
+ dev = to_platform_device(dev->dev.parent);
+ return nvhost_module_busy(dev);
}
EXPORT_SYMBOL(nvhost_module_busy_ext);
void nvhost_module_idle_ext(struct platform_device *dev)
{
- struct platform_device *pdev;
-
- if (!nvhost_get_parent(dev)) {
- dev_err(&dev->dev, "Module idle called with wrong dev\n");
- return;
- }
-
- /* get the parent */
- pdev = to_platform_device(dev->dev.parent);
-
- nvhost_module_idle(pdev);
+ if (dev->dev.parent && dev->dev.parent != &platform_bus)
+ dev = to_platform_device(dev->dev.parent);
+ nvhost_module_idle(dev);
}
EXPORT_SYMBOL(nvhost_module_idle_ext);