/*
*
* Copyright (C) 2010 Google, Inc.
- * Copyright (c) 2012 NVIDIA CORPORATION. All rights reserved.
*
* Author:
* Colin Cross <ccross@google.com>
*
- * Copyright (C) 2010-2011 NVIDIA Corporation
+ * Copyright (c) 2010-2013, NVIDIA CORPORATION. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
+#include <linux/lockdep.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/clk/tegra.h>
#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/bug.h>
+#include <linux/tegra-soc.h>
+#include <trace/events/power.h>
+#include <linux/tegra-timer.h>
+
+#include <mach/edp.h>
#include "board.h"
#include "clock.h"
#include "dvfs.h"
+#include "iomap.h"
+#include "tegra_emc.h"
+#include "cpu-tegra.h"
/* Global data of Tegra CPU CAR ops */
struct tegra_cpu_car_ops *tegra_cpu_car_ops;
* clk_get_rate_all_locked.
*
* Within a single clock, no clock operation can call another clock operation
- * on itself, except for clk_get_rate_locked and clk_set_rate_locked. Any
- * clock operation can call any other clock operation on any of it's possible
- * parents.
+ * on itself, except for clk_xxx_locked. Any clock operation can call any other
+ * clock operation on any of it's possible parents.
*
* clk_set_cansleep is used to mark a clock as sleeping. It is called during
* dvfs (Dynamic Voltage and Frequency Scaling) init on any clock that has a
- * dvfs requirement. It can only be called on clocks that are the sole parent
- * of all of their child clocks, meaning the child clock can not be reparented
- * onto a different, possibly non-sleeping, clock. This is inherently true
- * of all leaf clocks in the clock tree
+ * dvfs requirement, and propagated to all possible children of sleeping clock.
*
* An additional mutex, clock_list_lock, is used to protect the list of all
* clocks.
static DEFINE_MUTEX(clock_list_lock);
static LIST_HEAD(clocks);
+static unsigned long osc_freq;
#ifndef CONFIG_COMMON_CLK
struct clk *tegra_get_clock_by_name(const char *name)
mutex_unlock(&clock_list_lock);
return ret;
}
+EXPORT_SYMBOL(tegra_get_clock_by_name);
+
+static void clk_stats_update(struct clk *c)
+{
+ u64 cur_jiffies = get_jiffies_64();
+
+ if (c->refcnt) {
+ c->stats.time_on = c->stats.time_on +
+ (jiffies64_to_cputime64(cur_jiffies) -
+ (c->stats.last_update));
+ }
+
+ c->stats.last_update = cur_jiffies;
+}
/* Must be called with clk_lock(c) held */
static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
return c->max_rate;
}
+unsigned long clk_get_min_rate(struct clk *c)
+{
+ return c->min_rate;
+}
+
+bool tegra_is_clk_initialized(struct clk *c)
+{
+ return c->state != UNINITIALIZED;
+}
+
/* Must be called with clk_lock(c) held */
unsigned long clk_get_rate_locked(struct clk *c)
{
static void __clk_set_cansleep(struct clk *c)
{
struct clk *child;
+ int i;
BUG_ON(mutex_is_locked(&c->mutex));
BUG_ON(spin_is_locked(&c->spinlock));
+ /* Make sure that all possible descendants of sleeping clock are
+ marked as sleeping (to eliminate "sleeping parent - non-sleeping
+ child" relationship */
list_for_each_entry(child, &clocks, node) {
- if (child->parent != c)
- continue;
-
- WARN(child->ops && child->ops->set_parent,
- "can't make child clock %s of %s "
- "sleepable if it's parent could change",
- child->name, c->name);
+ bool possible_parent = (child->parent == c);
+
+ if (!possible_parent && child->inputs) {
+ for (i = 0; child->inputs[i].input; i++) {
+ if ((child->inputs[i].input == c) &&
+ tegra_clk_is_parent_allowed(child, c)) {
+ possible_parent = true;
+ break;
+ }
+ }
+ }
- __clk_set_cansleep(child);
+ if (possible_parent)
+ __clk_set_cansleep(child);
}
c->cansleep = true;
else
c->state = ON;
}
+ c->stats.last_update = get_jiffies_64();
+
+#ifdef CONFIG_LOCKDEP
+ lockdep_set_class_and_name(&c->mutex, &c->lockdep_class, c->name);
+ lockdep_set_class_and_name(&c->spinlock, &c->lockdep_class, c->name);
+#endif
mutex_lock(&clock_list_lock);
list_add(&c->node, &clocks);
mutex_unlock(&clock_list_lock);
}
-int clk_enable(struct clk *c)
+static int clk_enable_locked(struct clk *c)
{
int ret = 0;
- unsigned long flags;
-
- clk_lock_save(c, &flags);
if (clk_is_auto_dvfs(c)) {
ret = tegra_dvfs_set_rate(c, clk_get_rate_locked(c));
if (ret)
- goto out;
+ return ret;
}
if (c->refcnt == 0) {
- if (c->parent) {
- ret = clk_enable(c->parent);
+ if (!(c->flags & BUS_RATE_LIMIT) && c->parent) {
+ ret = tegra_clk_prepare_enable(c->parent);
if (ret)
- goto out;
+ return ret;
}
if (c->ops && c->ops->enable) {
ret = c->ops->enable(c);
+ trace_clock_enable(c->name, 1, 0);
if (ret) {
if (c->parent)
- clk_disable(c->parent);
- goto out;
+ tegra_clk_disable_unprepare(c->parent);
+ return ret;
}
c->state = ON;
c->set = true;
}
+ clk_stats_update(c);
}
c->refcnt++;
-out:
- clk_unlock_restore(c, &flags);
+
return ret;
}
-EXPORT_SYMBOL(clk_enable);
-void clk_disable(struct clk *c)
+static void clk_disable_locked(struct clk *c)
{
- unsigned long flags;
-
- clk_lock_save(c, &flags);
-
if (c->refcnt == 0) {
WARN(1, "Attempting to disable clock %s with refcnt 0", c->name);
- clk_unlock_restore(c, &flags);
return;
}
if (c->refcnt == 1) {
- if (c->ops && c->ops->disable)
+ if (c->ops && c->ops->disable) {
+ trace_clock_disable(c->name, 0, 0);
c->ops->disable(c);
-
- if (c->parent)
- clk_disable(c->parent);
+ }
+ if (!(c->flags & BUS_RATE_LIMIT) && c->parent)
+ tegra_clk_disable_unprepare(c->parent);
c->state = OFF;
+ clk_stats_update(c);
}
c->refcnt--;
if (clk_is_auto_dvfs(c) && c->refcnt == 0)
tegra_dvfs_set_rate(c, 0);
+}
+
+#ifdef CONFIG_HAVE_CLK_PREPARE
+/*
+ * The clk_enable/clk_disable may be called in atomic context, so they must not
+ * hold mutex. On the other hand clk_prepare/clk_unprepare can hold a mutex,
+ * as these APIs are called only in non-atomic context. Since tegra clock have
+ * "cansleep" attributte that indicates if clock requires preparation, we can
+ * split the interfaces respectively: do all work on sleeping clocks only in
+ * clk_prepare/clk_unprepare, and do all work for non-sleeping clocks only in
+ * clk_enable/clk_disable APIs. Calling "empty" APIs on either type of clocks
+ * is allowed as well, and actually expected, since clients may not know the
+ * clock attributes. However, calling clk_enable on non-prepared sleeping clock
+ * would fail.
+ */
+int clk_prepare(struct clk *c)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ if (!clk_cansleep(c))
+ return 0;
+ clk_lock_save(c, &flags);
+ ret = clk_enable_locked(c);
clk_unlock_restore(c, &flags);
+ return ret;
}
-EXPORT_SYMBOL(clk_disable);
+EXPORT_SYMBOL(clk_prepare);
-int clk_set_parent(struct clk *c, struct clk *parent)
+int clk_enable(struct clk *c)
{
int ret = 0;
unsigned long flags;
- unsigned long new_rate;
- unsigned long old_rate;
+
+ if (clk_cansleep(c)) {
+ if (WARN_ON(c->refcnt == 0))
+ return -ENOSYS;
+ return 0;
+ }
+
+ clk_lock_save(c, &flags);
+ ret = clk_enable_locked(c);
+ clk_unlock_restore(c, &flags);
+ return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_unprepare(struct clk *c)
+{
+ unsigned long flags;
+
+ if (!clk_cansleep(c))
+ return;
+
+ clk_lock_save(c, &flags);
+ clk_disable_locked(c);
+ clk_unlock_restore(c, &flags);
+}
+EXPORT_SYMBOL(clk_unprepare);
+
+void clk_disable(struct clk *c)
+{
+ unsigned long flags;
+
+ if (clk_cansleep(c))
+ return;
+
+ clk_lock_save(c, &flags);
+ clk_disable_locked(c);
+ clk_unlock_restore(c, &flags);
+}
+EXPORT_SYMBOL(clk_disable);
+#else
+int clk_enable(struct clk *c)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ clk_lock_save(c, &flags);
+ ret = clk_enable_locked(c);
+ clk_unlock_restore(c, &flags);
+ return ret;
+}
+EXPORT_SYMBOL(clk_enable);
+
+void clk_disable(struct clk *c)
+{
+ unsigned long flags;
clk_lock_save(c, &flags);
+ clk_disable_locked(c);
+ clk_unlock_restore(c, &flags);
+}
+EXPORT_SYMBOL(clk_disable);
+#endif
+
+int clk_rate_change_notify(struct clk *c, unsigned long rate)
+{
+ if (!c->rate_change_nh)
+ return -ENOSYS;
+ return raw_notifier_call_chain(c->rate_change_nh, rate, NULL);
+}
+
+int clk_set_parent_locked(struct clk *c, struct clk *parent)
+{
+ int ret = 0;
+ unsigned long new_rate;
+ unsigned long old_rate;
+ bool disable = false;
if (!c->ops || !c->ops->set_parent) {
ret = -ENOSYS;
goto out;
}
+ if (!tegra_clk_is_parent_allowed(c, parent)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
new_rate = clk_predict_rate_from_parent(c, parent);
old_rate = clk_get_rate_locked(c);
- if (new_rate > clk_get_max_rate(c)) {
+ if ((new_rate > clk_get_max_rate(c)) &&
+ (!parent->ops || !parent->ops->shared_bus_update)) {
pr_err("Failed to set parent %s for %s (violates clock limit"
" %lu)\n", parent->name, c->name, clk_get_max_rate(c));
#endif
}
+ /* The new clock control register setting does not take effect if
+ * clock is disabled. Later, when the clock is enabled it would run
+ * for several cycles on the old parent, which may hang h/w if the
+ * parent is already disabled. To guarantee h/w switch to the new
+ * setting enable clock while setting parent.
+ */
+ if ((c->refcnt == 0) && (c->flags & MUX)) {
+ pr_debug("Setting parent of clock %s with refcnt 0\n", c->name);
+ ret = clk_enable_locked(c);
+ if (ret)
+ goto out;
+ disable = true;
+ }
+
if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
(!c->parent || new_rate > old_rate)) {
ret = tegra_dvfs_set_rate(c, new_rate);
if (ret)
goto out;
+ trace_clock_set_parent(c->name, parent->name);
+
if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
new_rate < old_rate)
ret = tegra_dvfs_set_rate(c, new_rate);
+ if (new_rate != old_rate)
+ clk_rate_change_notify(c, new_rate);
+
out:
+ if (disable)
+ clk_disable_locked(c);
+ return ret;
+}
+
+
+int clk_set_parent(struct clk *c, struct clk *parent)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ clk_lock_save(c, &flags);
+ ret = clk_set_parent_locked(c, parent);
clk_unlock_restore(c, &flags);
+
return ret;
}
EXPORT_SYMBOL(clk_set_parent);
{
int ret = 0;
unsigned long old_rate, max_rate;
- long new_rate;
+ unsigned long new_rate;
+ bool disable = false;
+
+ if (!c->ops || !c->ops->set_rate)
+ return -ENOSYS;
old_rate = clk_get_rate_locked(c);
if (c->ops && c->ops->round_rate) {
new_rate = c->ops->round_rate(c, rate);
- if (new_rate < 0) {
+ if (IS_ERR_VALUE(new_rate)) {
ret = new_rate;
return ret;
}
rate = new_rate;
}
+ /* The new clock control register setting does not take effect if
+ * clock is disabled. Later, when the clock is enabled it would run
+ * for several cycles on the old rate, which may over-clock module
+ * at given voltage. To guarantee h/w switch to the new setting
+ * enable clock while setting rate.
+ */
+ if ((c->refcnt == 0) && (c->flags & (DIV_U71 | DIV_U16)) &&
+ clk_is_auto_dvfs(c)) {
+ pr_debug("Setting rate of clock %s with refcnt 0\n", c->name);
+ ret = clk_enable_locked(c);
+ if (ret)
+ goto out;
+ disable = true;
+ }
+
if (clk_is_auto_dvfs(c) && rate > old_rate && c->refcnt > 0) {
ret = tegra_dvfs_set_rate(c, rate);
if (ret)
- return ret;
+ goto out;
}
+ trace_clock_set_rate(c->name, rate, 0);
ret = c->ops->set_rate(c, rate);
if (ret)
- return ret;
+ goto out;
if (clk_is_auto_dvfs(c) && rate < old_rate && c->refcnt > 0)
ret = tegra_dvfs_set_rate(c, rate);
+ if (rate != old_rate)
+ clk_rate_change_notify(c, rate);
+
+out:
+ if (disable)
+ clk_disable_locked(c);
return ret;
}
if (!c->ops || !c->ops->set_rate)
return -ENOSYS;
+ trace_clock_set_start(c->name, rate, raw_smp_processor_id());
+
clk_lock_save(c, &flags);
ret = clk_set_rate_locked(c, rate);
clk_unlock_restore(c, &flags);
+ trace_clock_set_done(c->name, rate, raw_smp_processor_id());
+
return ret;
}
EXPORT_SYMBOL(clk_set_rate);
return rate;
}
-long clk_round_rate(struct clk *c, unsigned long rate)
+long clk_round_rate_locked(struct clk *c, unsigned long rate)
{
- unsigned long flags, max_rate;
+ unsigned long max_rate;
long ret;
- clk_lock_save(c, &flags);
-
if (!c->ops || !c->ops->round_rate) {
ret = -ENOSYS;
goto out;
ret = c->ops->round_rate(c, rate);
out:
+ return ret;
+}
+
+long clk_round_rate(struct clk *c, unsigned long rate)
+{
+ unsigned long flags;
+ long ret;
+
+ clk_lock_save(c, &flags);
+ ret = clk_round_rate_locked(c, rate);
clk_unlock_restore(c, &flags);
return ret;
}
return 0;
}
+#ifdef CONFIG_TEGRA_PRE_SILICON_SUPPORT
+static int tegra_clk_platform_mask(void)
+{
+ int mask;
+
+ mask = tegra_platform_is_silicon() ? TEGRA_CLK_INIT_PLATFORM_SI : 0;
+ mask |= tegra_platform_is_qt() ? TEGRA_CLK_INIT_PLATFORM_QT : 0;
+ mask |= tegra_platform_is_fpga() ? TEGRA_CLK_INIT_PLATFORM_FPGA : 0;
+ mask |= tegra_platform_is_linsim() ? TEGRA_CLK_INIT_PLATFORM_LINSIM : 0;
+ mask |= tegra_cpu_is_asim() ? TEGRA_CLK_INIT_CPU_ASIM : 0;
+
+ return mask;
+}
+#endif
+
static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table)
{
struct clk *c;
int ret = 0;
+ /* Skip if clock not enabled for this platform */
+#ifdef CONFIG_TEGRA_PRE_SILICON_SUPPORT
+ if (table->platform != TEGRA_CLK_INIT_PLATFORM_ALL) {
+ if ((table->platform & tegra_clk_platform_mask()) == 0)
+ return 0;
+ }
+#endif
+
c = tegra_get_clock_by_name(table->name);
if (!c) {
return -ENODEV;
}
+ if (table->rate && c->parent && c->parent->ops &&
+ c->parent->ops->shared_bus_update) {
+ c->u.shared_bus_user.rate = table->rate;
+ if (!table->enabled)
+ return 0;
+ }
+
+ if (table->enabled) {
+ ret = tegra_clk_prepare_enable(c);
+ if (ret) {
+ pr_warning("Unable to enable clock %s: %d\n",
+ table->name, ret);
+ return -EINVAL;
+ }
+ }
+
if (table->parent) {
p = tegra_get_clock_by_name(table->parent);
if (!p) {
}
}
- if (table->enabled) {
- ret = clk_enable(c);
- if (ret) {
- pr_warning("Unable to enable clock %s: %d\n",
- table->name, ret);
- return -EINVAL;
+ return 0;
+}
+
+/*
+ * If table refer pll directly it can be scaled only if all its children are OFF
+ */
+static bool tegra_can_scale_pll_direct(struct clk *pll)
+{
+ bool can_scale = true;
+ struct clk *c;
+
+ mutex_lock(&clock_list_lock);
+
+ list_for_each_entry(c, &clocks, node) {
+ if ((clk_get_parent(c) == pll) && (c->state == ON)) {
+ WARN(1, "tegra: failed initialize %s: in use by %s\n",
+ pll->name, c->name);
+ can_scale = false;
+ break;
}
}
+ mutex_unlock(&clock_list_lock);
+ return can_scale;
+}
- return 0;
+/*
+ * If table entry refer pll as cbus parent it can be scaled as long as all its
+ * children are cbus users (that will be switched to cbus backup during scaling)
+ */
+static bool tegra_can_scale_pll_cbus(struct clk *pll)
+{
+ bool can_scale = true;
+ struct clk *c;
+
+ mutex_lock(&clock_list_lock);
+
+ list_for_each_entry(c, &clocks, node) {
+ if ((clk_get_parent(c) == pll) &&
+ !(c->flags & PERIPH_ON_CBUS)) {
+ WARN(1, "tegra: failed initialize %s: in use by %s\n",
+ pll->name, c->name);
+ can_scale = false;
+ break;
+ }
+ }
+ mutex_unlock(&clock_list_lock);
+ return can_scale;
+}
+
+static int tegra_clk_init_cbus_pll_one(struct tegra_clk_init_table *table)
+{
+ bool can_scale = true;
+ struct clk *pll;
+ struct clk *c = tegra_get_clock_by_name(table->name);
+ if (!c)
+ return tegra_clk_init_one_from_table(table);
+
+ if (c->flags & PERIPH_ON_CBUS) {
+ /* table entry refer pllc/c2/c3 indirectly as cbus parent */
+ pll = clk_get_parent(c);
+ can_scale = tegra_can_scale_pll_cbus(pll);
+ } else if (c->state == ON) {
+ /* table entry refer pllc/c2/c3 directly, and it is ON */
+ pll = c;
+ can_scale = tegra_can_scale_pll_direct(pll);
+ }
+
+ if (can_scale)
+ return tegra_clk_init_one_from_table(table);
+ return -EBUSY;
+}
+
+void tegra_clk_init_cbus_plls_from_table(struct tegra_clk_init_table *table)
+{
+ for (; table->name; table++)
+ tegra_clk_init_cbus_pll_one(table);
}
void tegra_clk_init_from_table(struct tegra_clk_init_table *table)
}
EXPORT_SYMBOL(tegra_periph_reset_assert);
-/* Several extended clock configuration bits (e.g., clock routing, clock
- * phase control) are included in PLL and peripheral clock source
- * registers. */
-int tegra_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting)
+int tegra_is_clk_enabled(struct clk *c)
+{
+ return c->refcnt;
+}
+EXPORT_SYMBOL(tegra_is_clk_enabled);
+
+int tegra_clk_shared_bus_update(struct clk *c)
{
int ret = 0;
unsigned long flags;
- spin_lock_irqsave(&c->spinlock, flags);
-
- if (!c->ops || !c->ops->clk_cfg_ex) {
- ret = -ENOSYS;
- goto out;
- }
- ret = c->ops->clk_cfg_ex(c, p, setting);
+ clk_lock_save(c, &flags);
-out:
- spin_unlock_irqrestore(&c->spinlock, flags);
+ if (c->ops && c->ops->shared_bus_update)
+ ret = c->ops->shared_bus_update(c);
+ clk_unlock_restore(c, &flags);
return ret;
}
-static bool tegra_keep_boot_clocks = false;
-static int __init tegra_keep_boot_clocks_setup(char *__unused)
+/* dvfs initialization may lower default maximum rate */
+void tegra_init_max_rate(struct clk *c, unsigned long max_rate)
{
- tegra_keep_boot_clocks = true;
- return 1;
+ struct clk *shared_bus_user;
+
+ if (c->max_rate <= max_rate)
+ return;
+
+ /* skip message if shared bus user */
+ if (!c->parent || !c->parent->ops || !c->parent->ops->shared_bus_update)
+ pr_info("Lowering %s maximum rate from %lu to %lu\n",
+ c->name, c->max_rate, max_rate);
+
+ c->max_rate = max_rate;
+ list_for_each_entry(shared_bus_user,
+ &c->shared_bus_list, u.shared_bus_user.node) {
+ if (shared_bus_user->u.shared_bus_user.rate > max_rate)
+ shared_bus_user->u.shared_bus_user.rate = max_rate;
+ tegra_init_max_rate(shared_bus_user, max_rate);
+ }
+}
+
+/* Use boot rate as emc monitor output until actual monitoring starts */
+void __init tegra_clk_preset_emc_monitor(unsigned long rate)
+{
+ struct clk *c = tegra_get_clock_by_name("mon.emc");
+
+ if (c) {
+ c->u.shared_bus_user.rate = rate;
+ clk_enable(c);
+ }
}
-__setup("tegra_keep_boot_clocks", tegra_keep_boot_clocks_setup);
/*
- * Bootloader may not match kernel restrictions on CPU clock sources.
- * Make sure CPU clock is sourced from either main or backup parent.
+ * Set osc (safe) rate. Called only for peripherals left after boot under reset
+ * (peripherals that are taken out of reset by boot-loader must be at safe rate
+ * already - that will be checked by tegra_clk_verify_rates()).
*/
-static int tegra_sync_cpu_clock(void)
+void tegra_periph_clk_safe_rate_init(struct clk *c)
{
int ret;
- unsigned long rate;
- struct clk *c = tegra_get_clock_by_name("cpu");
+ unsigned long rate = tegra_clk_measure_input_freq();
- BUG_ON(!c);
- rate = clk_get_rate(c);
- ret = clk_set_rate(c, rate);
- if (ret)
- pr_err("%s: Failed to sync CPU at rate %lu\n", __func__, rate);
- else
- pr_info("CPU rate: %lu MHz\n", clk_get_rate(c) / 1000000);
- return ret;
+ if (c->boot_rate || (clk_get_rate(c->parent) <= rate))
+ return;
+
+ if (c->ops && c->ops->set_rate && (c->flags & PERIPH_DIV)) {
+ ret = c->ops->set_rate(c, rate);
+ if (ret)
+ pr_err("%s: failed to init %s rate %lu\n",
+ __func__, c->name, rate);
+ }
+}
+
+static void __init tegra_clk_verify_rates(void)
+{
+ struct clk *c;
+ unsigned long rate;
+
+ mutex_lock(&clock_list_lock);
+
+ list_for_each_entry(c, &clocks, node) {
+ rate = clk_get_rate(c);
+ if (rate > clk_get_max_rate(c))
+ WARN(1, "tegra: %s boot rate %lu exceeds max rate %lu\n",
+ c->name, rate, clk_get_max_rate(c));
+ c->boot_rate = rate;
+ }
+ mutex_unlock(&clock_list_lock);
+}
+
+void __init tegra_common_init_clock(void)
+{
+#if defined(CONFIG_HAVE_ARM_TWD) || defined(CONFIG_ARM_ARCH_TIMER)
+ tegra_cpu_timer_init();
+#endif
+ tegra_clk_verify_rates();
+}
+
+void __init tegra_clk_verify_parents(void)
+{
+ struct clk *c;
+ struct clk *p;
+
+ mutex_lock(&clock_list_lock);
+
+ list_for_each_entry(c, &clocks, node) {
+ p = clk_get_parent(c);
+ if (!tegra_clk_is_parent_allowed(c, p))
+ WARN(1, "tegra: parent %s is not allowed for %s\n",
+ p->name, c->name);
+ }
+ mutex_unlock(&clock_list_lock);
+}
+
+static bool tegra_keep_boot_clocks = false;
+static int __init tegra_keep_boot_clocks_setup(char *__unused)
+{
+ tegra_keep_boot_clocks = true;
+ return 1;
}
-late_initcall(tegra_sync_cpu_clock);
+__setup("tegra_keep_boot_clocks", tegra_keep_boot_clocks_setup);
/*
* Iterate through all clocks, disabling any for which the refcount is 0
#endif
return 0;
}
-late_initcall(tegra_init_disable_boot_clocks);
+
+/* Get ready DVFS rails and DFLL clock source (if available) for CPU */
+static int __init tegra_dvfs_rail_start_scaling(void)
+{
+ int ret;
+ unsigned long flags, rate;
+ struct clk *c = tegra_get_clock_by_name("cpu");
+ struct clk *dfll_cpu = tegra_get_clock_by_name("dfll_cpu");
+ bool init_dfll_first = tegra_dvfs_is_dfll_bypass();
+
+ BUG_ON(!c);
+ clk_lock_save(c, &flags);
+
+ /*
+ * Initialize dfll first if it provides bypass to regulator for legacy
+ * dvfs; otherwise legacy dvfs controls cpu voltage independently, and
+ * initialized before dfll.
+ */
+ if (init_dfll_first) {
+ if (dfll_cpu && dfll_cpu->ops && dfll_cpu->ops->init)
+ dfll_cpu->ops->init(dfll_cpu);
+ }
+
+ ret = tegra_dvfs_rail_connect_regulators();
+ if (!ret && !init_dfll_first) {
+ if (dfll_cpu && dfll_cpu->ops && dfll_cpu->ops->init)
+ dfll_cpu->ops->init(dfll_cpu);
+ }
+
+ /*
+ * Bootloader may not match kernel restrictions on CPU clock sources.
+ * Make sure CPU clock is sourced from either main or backup parent.
+ */
+ rate = clk_get_rate_locked(c);
+ if (clk_set_rate_locked(c, rate))
+ pr_err("%s: Failed to sync CPU at rate %lu\n", __func__, rate);
+ else
+ pr_info("CPU rate: %lu MHz\n", clk_get_rate_locked(c)/1000000);
+
+ clk_unlock_restore(c, &flags);
+ return ret;
+}
+
+static int __init tegra_clk_late_init(void)
+{
+ tegra_init_disable_boot_clocks(); /* must before dvfs start */
+ if (!tegra_dvfs_rail_start_scaling()) /* CPU lock protected */
+ tegra_dvfs_rail_register_notifiers(); /* not under CPU lock */
+ tegra_update_cpu_edp_limits();
+ return 0;
+}
+late_initcall(tegra_clk_late_init);
+
+
+/* Several extended clock configuration bits (e.g., clock routing, clock
+ * phase control) are included in PLL and peripheral clock source
+ * registers. */
+int tegra_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ clk_lock_save(c, &flags);
+
+ if (!c->ops || !c->ops->clk_cfg_ex) {
+ ret = -ENOSYS;
+ goto out;
+ }
+ ret = c->ops->clk_cfg_ex(c, p, setting);
+
+out:
+ clk_unlock_restore(c, &flags);
+ return ret;
+}
+EXPORT_SYMBOL(tegra_clk_cfg_ex);
+
+int tegra_register_clk_rate_notifier(struct clk *c, struct notifier_block *nb)
+{
+ int ret;
+ unsigned long flags;
+
+ if (!c->rate_change_nh)
+ return -ENOSYS;
+
+ clk_lock_save(c, &flags);
+ ret = raw_notifier_chain_register(c->rate_change_nh, nb);
+ clk_unlock_restore(c, &flags);
+ return ret;
+}
+
+void tegra_unregister_clk_rate_notifier(
+ struct clk *c, struct notifier_block *nb)
+{
+ unsigned long flags;
+
+ if (!c->rate_change_nh)
+ return;
+
+ clk_lock_save(c, &flags);
+ raw_notifier_chain_unregister(c->rate_change_nh, nb);
+ clk_unlock_restore(c, &flags);
+}
+
+int tegra_clk_register_export_ops(struct clk *c,
+ struct tegra_clk_export_ops *ops)
+{
+ unsigned long flags;
+
+ clk_lock_save(c, &flags);
+ c->u.export_clk.ops = ops;
+ if (c->ops && c->ops->init)
+ c->ops->init(c);
+ clk_unlock_restore(c, &flags);
+
+ return 0;
+}
+
+#define OSC_FREQ_DET 0x58
+#define OSC_FREQ_DET_TRIG BIT(31)
+
+#define OSC_FREQ_DET_STATUS 0x5C
+#define OSC_FREQ_DET_BUSY BIT(31)
+#define OSC_FREQ_DET_CNT_MASK 0xFFFF
+
+unsigned long tegra_clk_measure_input_freq(void)
+{
+ u32 clock_autodetect;
+ void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
+
+ if (osc_freq)
+ return osc_freq;
+
+ writel(OSC_FREQ_DET_TRIG | 1,
+ (void *)((uintptr_t)clk_base + OSC_FREQ_DET));
+ do {} while (readl(
+ (void *)((uintptr_t)clk_base + OSC_FREQ_DET_STATUS))
+ & OSC_FREQ_DET_BUSY);
+
+ clock_autodetect = readl(
+ (void *)((uintptr_t)clk_base + OSC_FREQ_DET_STATUS));
+ if (clock_autodetect >= 732 - 3 && clock_autodetect <= 732 + 3) {
+ osc_freq = 12000000;
+ } else if (clock_autodetect >= 794 - 3 && clock_autodetect <= 794 + 3) {
+ osc_freq = 13000000;
+ } else if (clock_autodetect >= 1172 - 3 && clock_autodetect <= 1172 + 3) {
+ osc_freq = 19200000;
+ } else if (clock_autodetect >= 1587 - 3 && clock_autodetect <= 1587 + 3) {
+ osc_freq = 26000000;
+#ifndef CONFIG_ARCH_TEGRA_2x_SOC
+ } else if (clock_autodetect >= 1025 - 3 && clock_autodetect <= 1025 + 3) {
+ osc_freq = 16800000;
+ } else if (clock_autodetect >= 2344 - 3 && clock_autodetect <= 2344 + 3) {
+ osc_freq = 38400000;
+ } else if (clock_autodetect >= 2928 - 3 && clock_autodetect <= 2928 + 3) {
+ osc_freq = 48000000;
+ } else if (tegra_platform_is_qt()) {
+ if (clock_autodetect >= 2 && clock_autodetect <= 9)
+ osc_freq = 115200;
+ else if (clock_autodetect >= 13 && clock_autodetect <= 15)
+ osc_freq = 230400;
+#endif
+ } else {
+ pr_err("%s: Unexpected clock autodetect value %d", __func__, clock_autodetect);
+ }
+
+ BUG_ON(osc_freq == 0);
+
+ return osc_freq;
+}
#ifdef CONFIG_DEBUG_FS
{
seq_printf(s, "%*s %-*s%21s%d mV\n",
level * 3 + 1, "",
- 30 - level * 3, d->dvfs_rail->reg_id,
+ 35 - level * 3, d->dvfs_rail->reg_id,
"",
d->cur_millivolts);
}
}
}
- seq_printf(s, "%*s%c%c%-*s %-6s %-3d %-8s %-10lu\n",
+ seq_printf(s, "%*s%c%c%-*s%c %-6s %-3d %-8s %-10lu",
level * 3 + 1, "",
rate > max_rate ? '!' : ' ',
!c->set ? '*' : ' ',
- 30 - level * 3, c->name,
+ 35 - level * 3, c->name,
+ c->cansleep ? '$' : ' ',
state, c->refcnt, div, rate);
+ if (c->parent && !list_empty(&c->parent->shared_bus_list)) {
+ enum shared_bus_users_mode mode = c->u.shared_bus_user.mode;
+ unsigned long request = c->u.shared_bus_user.rate;
+ seq_printf(s, " (%lu", request);
+
+ switch (mode) {
+ case SHARED_BW:
+ seq_printf(s, " / %lu+)",
+ request / tegra_emc_bw_efficiency * 100);
+ break;
+ case SHARED_ISO_BW:
+ seq_printf(s, " / %lu / %lu+)",
+ request / tegra_emc_bw_efficiency * 100,
+ request / tegra_emc_iso_share * 100);
+ break;
+ case SHARED_CEILING_BUT_ISO:
+ case SHARED_CEILING:
+ seq_printf(s, "%s)", "^");
+ break;
+ default:
+ seq_printf(s, ")");
+ }
+ }
+ seq_printf(s, "\n");
if (c->dvfs)
dvfs_show_one(s, c->dvfs, level + 1);
static int clock_tree_show(struct seq_file *s, void *data)
{
struct clk *c;
- seq_printf(s, " clock state ref div rate\n");
- seq_printf(s, "--------------------------------------------------------------\n");
+ seq_printf(s, " clock state ref div rate (shared req / bw_margin / iso_margin)\n");
+ seq_printf(s, "-----------------------------------------------------------------------------------------------------------\n");
mutex_lock(&clock_list_lock);
-
- clk_lock_all();
-
+ if (!tegra_platform_is_fpga())
+ clk_lock_all();
list_for_each_entry(c, &clocks, node)
if (c->parent == NULL)
clock_tree_show_one(s, c, 0);
-
- clk_unlock_all();
-
+ if (!tegra_platform_is_fpga())
+ clk_unlock_all();
mutex_unlock(&clock_list_lock);
return 0;
}
.release = single_release,
};
+static void syncevent_one(struct clk *c)
+{
+ struct clk *child;
+
+ if (c->state == ON)
+ trace_clock_enable(c->name, 1, smp_processor_id());
+ else
+ trace_clock_disable(c->name, 0, smp_processor_id());
+
+ trace_clock_set_rate(c->name, clk_get_rate_all_locked(c),
+ smp_processor_id());
+
+ list_for_each_entry(child, &clocks, node) {
+ if (child->parent != c)
+ continue;
+
+ syncevent_one(child);
+ }
+}
+
+static int syncevent_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct clk *c;
+ char buffer[40];
+ int buf_size;
+
+ memset(buffer, 0, sizeof(buffer));
+ buf_size = min(count, (sizeof(buffer)-1));
+
+ if (copy_from_user(buffer, user_buf, buf_size))
+ return -EFAULT;
+
+ if (!strnicmp("all", buffer, 3)) {
+ mutex_lock(&clock_list_lock);
+
+ clk_lock_all();
+
+ list_for_each_entry(c, &clocks, node) {
+ if (c->parent == NULL)
+ syncevent_one(c);
+ }
+
+ clk_unlock_all();
+
+ mutex_unlock(&clock_list_lock);
+ }
+
+ return count;
+}
+
+static const struct file_operations syncevent_fops = {
+ .write = syncevent_write,
+};
+
static int possible_parents_show(struct seq_file *s, void *data)
{
struct clk *c = s->private;
int i;
+ bool first = true;
for (i = 0; c->inputs[i].input; i++) {
- char *first = (i == 0) ? "" : " ";
- seq_printf(s, "%s%s", first, c->inputs[i].input->name);
+ if (tegra_clk_is_parent_allowed(c, c->inputs[i].input)) {
+ seq_printf(s, "%s%s", first ? "" : " ",
+ c->inputs[i].input->name);
+ first = false;
+ }
}
seq_printf(s, "\n");
return 0;
return 0;
}
+static int max_get(void *data, u64 *val)
+{
+ struct clk *c = (struct clk *)data;
+ *val = (u64)clk_get_max_rate(c);
+ return 0;
+}
+
#ifdef CONFIG_TEGRA_CLOCK_DEBUG_WRITE
-static const mode_t parent_rate_mode = S_IRUGO | S_IWUGO;
+static const mode_t parent_rate_mode = S_IRUGO | S_IWUSR;
static ssize_t parent_write(struct file *file,
const char __user *userbuf, size_t count, loff_t *ppos)
struct clk *c = (struct clk *)data;
if (val)
- return clk_enable(c);
+ return tegra_clk_prepare_enable(c);
else {
- clk_disable(c);
+ tegra_clk_disable_unprepare(c);
return 0;
}
}
DEFINE_SIMPLE_ATTRIBUTE(state_fops, state_get, state_set, "%llu\n");
+static int _max_set(struct clk *c, unsigned long val)
+{
+ int i;
+ bool found = false;
+ c->max_rate = val;
+
+ if (c->dvfs && c->dvfs->max_millivolts) {
+ /* Walk through dvfs freqs table and set freq of ith item to
+ * max_rate if found its dvfs voltage equals to max dvfs voltage
+ * otherwise set freq of last item to max_rate
+ */
+ for (i = 0; i < c->dvfs->num_freqs; i++) {
+ if (c->dvfs->millivolts[i] == c->dvfs->max_millivolts) {
+ c->dvfs->freqs[i] = c->max_rate;
+ found = true;
+ }
+ }
+ if (!found)
+ c->dvfs->freqs[i-1] = c->max_rate;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_TEGRA_CLOCK_DEBUG_FUNC
+int tegra_clk_set_max(struct clk *c, unsigned long rate)
+{
+ return _max_set(c, rate);
+}
+EXPORT_SYMBOL(tegra_clk_set_max);
+#endif
+
+static int max_set(void *data, u64 val)
+{
+ struct clk *c = (struct clk *)data;
+ return _max_set(c, (unsigned long)val);
+}
+DEFINE_SIMPLE_ATTRIBUTE(max_fops, max_get, max_set, "%llu\n");
+
#else
static const mode_t parent_rate_mode = S_IRUGO;
DEFINE_SIMPLE_ATTRIBUTE(rate_fops, rate_get, NULL, "%llu\n");
DEFINE_SIMPLE_ATTRIBUTE(state_fops, state_get, NULL, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(max_fops, max_get, NULL, "%llu\n");
#endif
+static int time_on_get(void *data, u64 *val)
+{
+ unsigned long flags;
+ struct clk *c = (struct clk *)data;
+
+ clk_lock_save(c, &flags);
+ clk_stats_update(c);
+ *val = cputime64_to_clock_t(c->stats.time_on);
+ clk_unlock_restore(c, &flags);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(time_on_fops, time_on_get, NULL, "%llu\n");
+
+static int possible_rates_show(struct seq_file *s, void *data)
+{
+ struct clk *c = s->private;
+ long rate = 0;
+
+ /* shared bus clock must round up, unless top of range reached */
+ while (rate <= c->max_rate) {
+ unsigned long rounded_rate = c->ops->round_rate(c, rate);
+ if (IS_ERR_VALUE(rounded_rate) || (rounded_rate <= rate))
+ break;
+
+ rate = rounded_rate + 2000; /* 2kHz resolution */
+ seq_printf(s, "%ld ", rounded_rate / 1000);
+ }
+ seq_printf(s, "(kHz)\n");
+ return 0;
+}
+
+static int possible_rates_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, possible_rates_show, inode->i_private);
+}
+
+static const struct file_operations possible_rates_fops = {
+ .open = possible_rates_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int use_alt_freq_get(void *data, u64 *val)
+{
+ struct clk *c = (struct clk *)data;
+ *val = c->dvfs->use_alt_freqs;
+ return 0;
+}
+static int use_alt_freq_set(void *data, u64 val)
+{
+ struct clk *c = (struct clk *)data;
+ return tegra_dvfs_use_alt_freqs_on_clk(c, val);
+}
+DEFINE_SIMPLE_ATTRIBUTE(use_alt_freq_fops,
+ use_alt_freq_get, use_alt_freq_set, "%llu\n");
+
+static ssize_t fmax_at_vmin_write(struct file *file,
+ const char __user *userbuf, size_t count, loff_t *ppos)
+{
+ struct clk *c = file->f_path.dentry->d_inode->i_private;
+ unsigned long f_max;
+ int v_min;
+ char buf[32];
+
+ if (sizeof(buf) <= count)
+ return -EINVAL;
+
+ if (copy_from_user(buf, userbuf, count))
+ return -EFAULT;
+
+ /* terminate buffer and trim - white spaces may be appended
+ * at the end when invoked from shell command line */
+ buf[count] = '\0';
+ strim(buf);
+
+ if (sscanf(buf, "%lu_at_%d", &f_max, &v_min) != 2)
+ return -EINVAL;
+
+ tegra_dvfs_set_fmax_at_vmin(c, f_max, v_min);
+
+ return count;
+}
+
+static const struct file_operations fmax_at_vmin_fops = {
+ .write = fmax_at_vmin_write,
+};
+
static int clk_debugfs_register_one(struct clk *c)
{
struct dentry *d;
if (!d)
goto err_out;
- d = debugfs_create_u32("max", S_IRUGO, c->dent, (u32 *)&c->max_rate);
+ d = debugfs_create_x32("shared_bus_flags", S_IRUGO, c->dent,
+ (u32 *)&c->shared_bus_flags);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_file(
+ "max", parent_rate_mode, c->dent, c, &max_fops);
+ if (!d)
+ goto err_out;
+
+ d = debugfs_create_u32("min", S_IRUGO, c->dent, (u32 *)&c->min_rate);
if (!d)
goto err_out;
if (!d)
goto err_out;
+ d = debugfs_create_file(
+ "time_on", S_IRUGO, c->dent, c, &time_on_fops);
+ if (!d)
+ goto err_out;
+
if (c->inputs) {
d = debugfs_create_file("possible_parents", S_IRUGO, c->dent,
c, &possible_parents_fops);
goto err_out;
}
+ /* show possible rates only of the top-most shared buses */
+ if ((c->ops && c->ops->round_rate && c->ops->shared_bus_update) &&
+ !(c->parent && c->parent->ops && c->parent->ops->round_rate &&
+ c->parent->ops->shared_bus_update)) {
+ d = debugfs_create_file("possible_rates", S_IRUGO, c->dent,
+ c, &possible_rates_fops);
+ if (!d)
+ goto err_out;
+ }
+
+ if (c->dvfs) {
+ d = debugfs_create_file("use_alt_freq", S_IRUGO | S_IWUSR,
+ c->dent, c, &use_alt_freq_fops);
+ if (!d)
+ goto err_out;
+ }
+
+ if (c->dvfs && c->dvfs->can_override) {
+ d = debugfs_create_file("fmax_at_vmin", S_IWUSR, c->dent,
+ c, &fmax_at_vmin_fops);
+ if (!d)
+ goto err_out;
+ }
+
return 0;
err_out:
if (!d)
goto err_out;
- if (dvfs_debugfs_init(clk_debugfs_root))
+ d = debugfs_create_file("syncevents", S_IRUGO|S_IWUSR, clk_debugfs_root, NULL,
+ &syncevent_fops);
+
+ if (!d || dvfs_debugfs_init(clk_debugfs_root))
goto err_out;
list_for_each_entry(c, &clocks, node) {