*
* CPU complex suspend & resume functions for Tegra SoCs
*
- * Copyright (c) 2009-2012, NVIDIA Corporation.
+ * Copyright (c) 2009-2013, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <linux/smp.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
-#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/cpu_pm.h>
#include <linux/err.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/tegra_audio.h>
#include <trace/events/power.h>
+#include <trace/events/nvsecurity.h>
#include <asm/cacheflush.h>
#include <asm/idmap.h>
#include "pm.h"
#include "pm-irq.h"
#include "reset.h"
+#include "pmc.h"
#include "sleep.h"
#include "timer.h"
#include "dvfs.h"
#include "cpu-tegra.h"
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC)
+#include "tegra14_scratch.h"
+#endif
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/nvpower.h>
struct suspend_context {
/*
u8 uart[5];
struct tegra_twd_context twd;
-#ifdef CONFIG_ARM_ARCH_TIMER
- struct arch_timer_context arch_timer;
-#endif
};
+#define PMC_CTRL 0x0
#ifdef CONFIG_PM_SLEEP
phys_addr_t tegra_pgd_phys; /* pgd used by hotplug & LP2 bootup */
static cpumask_t tegra_in_lp2;
static cpumask_t *iram_cpu_lp2_mask;
static unsigned long *iram_cpu_lp1_mask;
+static unsigned long *iram_mc_clk_mask;
static u8 *iram_save;
static unsigned long iram_save_size;
static void __iomem *iram_code = IO_ADDRESS(TEGRA_IRAM_CODE_AREA);
static void __iomem *clk_rst = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC)
+static void __iomem *tert_ictlr = IO_ADDRESS(TEGRA_TERTIARY_ICTLR_BASE);
+#endif
+static void __iomem *tmrus_reg_base = IO_ADDRESS(TEGRA_TMR1_BASE);
static int tegra_last_pclk;
+static u64 resume_time;
+static u64 resume_entry_time;
+static u64 suspend_time;
+static u64 suspend_entry_time;
+#endif
+
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC)
+static void update_pmc_registers(unsigned long rate);
#endif
struct suspend_context tegra_sctx;
+#if defined(CONFIG_CRYPTO_DEV_TEGRA_SE) && defined(CONFIG_ARCH_TEGRA_14x_SOC)
+extern struct device *get_se_device(void);
+extern int se_suspend(struct device *dev, bool pooling);
+extern struct device *get_smmu_device(void);
+extern int tegra_smmu_resume(struct device *dev);
+extern int tegra_smmu_suspend(struct device *dev);
+#endif
#define TEGRA_POWER_PWRREQ_POLARITY (1 << 8) /* core power request polarity */
#define TEGRA_POWER_PWRREQ_OE (1 << 9) /* core power request enable */
#define TEGRA_POWER_CPU_PWRREQ_OE (1 << 16) /* CPU power request enable */
#define TEGRA_POWER_CPUPWRGOOD_EN (1 << 19) /* CPU power good enable */
+#define TEGRA_DPAD_ORIDE_SYS_CLK_REQ (1 << 21)
+
#define PMC_CTRL 0x0
#define PMC_CTRL_LATCH_WAKEUPS (1 << 5)
#define PMC_WAKE_MASK 0xc
#define PMC_DPAD_ORIDE 0x1C
#define PMC_WAKE_DELAY 0xe0
#define PMC_DPD_SAMPLE 0x20
-#define PMC_IO_DPD_REQ_0 0x1b8
-#define PMC_IO_DPD2_REQ_0 0X1C0
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC) || defined(CONFIG_ARCH_TEGRA_12x_SOC)
+#define PMC_DPD_ENABLE 0x24
+#endif
+#define PMC_IO_DPD_REQ 0x1B8
+#define PMC_IO_DPD2_REQ 0x1C0
+
#define PMC_WAKE_STATUS 0x14
#define PMC_SW_WAKE_STATUS 0x18
static struct clk *tegra_dfll;
#endif
static struct clk *tegra_pclk;
-static const struct tegra_suspend_platform_data *pdata;
+static struct tegra_suspend_platform_data *pdata;
static enum tegra_suspend_mode current_suspend_mode = TEGRA_SUSPEND_NONE;
#if defined(CONFIG_TEGRA_CLUSTER_CONTROL) && INSTRUMENT_CLUSTER_SWITCH
-enum tegra_cluster_switch_time_id {
- tegra_cluster_switch_time_id_start = 0,
- tegra_cluster_switch_time_id_prolog,
- tegra_cluster_switch_time_id_switch,
- tegra_cluster_switch_time_id_epilog,
- tegra_cluster_switch_time_id_max
-};
-
static unsigned long
tegra_cluster_switch_times[tegra_cluster_switch_time_id_max];
-#define tegra_cluster_switch_time(flags, id) \
- do { \
- barrier(); \
- if (flags & TEGRA_POWER_CLUSTER_MASK) { \
- void __iomem *timer_us = \
- IO_ADDRESS(TEGRA_TMRUS_BASE); \
- if (id < tegra_cluster_switch_time_id_max) \
- tegra_cluster_switch_times[id] = \
- readl(timer_us); \
- wmb(); \
- } \
- barrier(); \
- } while(0)
-#else
-#define tegra_cluster_switch_time(flags, id) do {} while(0)
+struct tegra_cluster_switch_time_stats {
+ unsigned long sum;
+ unsigned long avg;
+ unsigned long exp_avg;
+ unsigned long max;
+ int cnt;
+};
+
+static struct tegra_cluster_switch_time_stats lp2g_stats;
+static struct tegra_cluster_switch_time_stats g2lp_stats;
+
+void tegra_cluster_switch_time(unsigned int flags, int id)
+{
+ unsigned long t;
+ struct tegra_cluster_switch_time_stats *stats;
+
+ if (!(flags & TEGRA_POWER_CLUSTER_MASK) ||
+ (id >= tegra_cluster_switch_time_id_max))
+ return;
+
+ tegra_cluster_switch_times[id] = tegra_read_usec_raw();
+ wmb();
+ if (id != tegra_cluster_switch_time_id_end)
+ return;
+
+ stats = flags & TEGRA_POWER_CLUSTER_G ? &lp2g_stats : &g2lp_stats;
+
+ t = tegra_cluster_switch_times[tegra_cluster_switch_time_id_end] -
+ tegra_cluster_switch_times[tegra_cluster_switch_time_id_start];
+ if (stats->max < t)
+ stats->max = t;
+
+ stats->sum += t;
+ stats->cnt++;
+ if (stats->cnt < CLUSTER_SWITCH_AVG_SAMPLES)
+ return;
+
+ stats->avg = stats->sum;
+ stats->cnt = stats->sum = 0;
+ if (!stats->exp_avg) {
+ stats->exp_avg = stats->avg; /* 1st window sample */
+ return;
+ }
+ stats->exp_avg = (stats->exp_avg * (CLUSTER_SWITCH_AVG_SAMPLES - 1) +
+ stats->avg) >> CLUSTER_SWITCH_TIME_AVG_SHIFT;
+}
#endif
#ifdef CONFIG_PM_SLEEP
[TEGRA_SUSPEND_LP0] = "lp0",
};
+void tegra_log_resume_time(void)
+{
+ u64 resume_end_time = readl(tmrus_reg_base + TIMERUS_CNTR_1US);
+
+ if (resume_entry_time > resume_end_time)
+ resume_end_time |= 1ull<<32;
+ resume_time = resume_end_time - resume_entry_time;
+}
+
+void tegra_log_suspend_time(void)
+{
+ suspend_entry_time = readl(tmrus_reg_base + TIMERUS_CNTR_1US);
+}
+
+static void tegra_get_suspend_time(void)
+{
+ u64 suspend_end_time;
+ suspend_end_time = readl(tmrus_reg_base + TIMERUS_CNTR_1US);
+
+ if (suspend_entry_time > suspend_end_time)
+ suspend_end_time |= 1ull<<32;
+ suspend_time = suspend_end_time - suspend_entry_time;
+}
+
unsigned long tegra_cpu_power_good_time(void)
{
if (WARN_ON_ONCE(!pdata))
return pdata->cpu_lp2_min_residency;
}
+unsigned long tegra_mc_clk_stop_min_residency(void)
+{
+ return 20000;
+}
+
#ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE
-unsigned long tegra_min_residency_noncpu(void)
+#define TEGRA_MIN_RESIDENCY_VMIN_FMIN 2000
+#define TEGRA_MIN_RESIDENCY_NCPU_SLOW 2000
+#define TEGRA_MIN_RESIDENCY_NCPU_FAST 13000
+#define TEGRA_MIN_RESIDENCY_CRAIL 20000
+
+unsigned long tegra_min_residency_vmin_fmin(void)
{
- return pdata->min_residency_noncpu;
+ return pdata && pdata->min_residency_vmin_fmin
+ ? pdata->min_residency_vmin_fmin
+ : TEGRA_MIN_RESIDENCY_VMIN_FMIN;
+}
+
+unsigned long tegra_min_residency_ncpu()
+{
+ if (is_lp_cluster()) {
+ return pdata && pdata->min_residency_ncpu_slow
+ ? pdata->min_residency_ncpu_slow
+ : TEGRA_MIN_RESIDENCY_NCPU_SLOW;
+ } else
+ return pdata && pdata->min_residency_ncpu_fast
+ ? pdata->min_residency_ncpu_fast
+ : TEGRA_MIN_RESIDENCY_NCPU_FAST;
}
unsigned long tegra_min_residency_crail(void)
{
- return pdata->min_residency_crail;
+ return pdata && pdata->min_residency_crail
+ ? pdata->min_residency_crail
+ : TEGRA_MIN_RESIDENCY_CRAIL;
+}
+
+bool tegra_crail_can_start_early(void)
+{
+ return pdata && pdata->crail_up_early;
}
#endif
-static void suspend_cpu_dfll_mode(void)
+static void suspend_cpu_dfll_mode(unsigned int flags)
{
#ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
/* If DFLL is used as CPU clock source go to open loop mode */
- if (!is_lp_cluster() && tegra_dfll &&
- tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail))
- tegra_clk_cfg_ex(tegra_dfll, TEGRA_CLK_DFLL_LOCK, 0);
+ if (!(flags & TEGRA_POWER_CLUSTER_MASK)) {
+ if (!is_lp_cluster() && tegra_dfll &&
+ tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail))
+ tegra_clk_cfg_ex(tegra_dfll, TEGRA_CLK_DFLL_LOCK, 0);
+ }
+
+ /* Suspend dfll bypass (safe rail down) on LP or if DFLL is Not used */
+ if (pdata && pdata->suspend_dfll_bypass &&
+ (!tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail) || is_lp_cluster()))
+ pdata->suspend_dfll_bypass();
#endif
}
-static void resume_cpu_dfll_mode(void)
+static void resume_cpu_dfll_mode(unsigned int flags)
{
#ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
+ /* If DFLL is Not used and resume on G restore bypass mode */
+ if (pdata && pdata->resume_dfll_bypass && !is_lp_cluster() &&
+ !tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail))
+ pdata->resume_dfll_bypass();
+
/* If DFLL is used as CPU clock source restore closed loop mode */
- if (!is_lp_cluster() && tegra_dfll &&
- tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail))
- tegra_clk_cfg_ex(tegra_dfll, TEGRA_CLK_DFLL_LOCK, 1);
+ if (!(flags & TEGRA_POWER_CLUSTER_MASK)) {
+ if (!is_lp_cluster() && tegra_dfll &&
+ tegra_dvfs_rail_is_dfll_mode(tegra_cpu_rail))
+ tegra_clk_cfg_ex(tegra_dfll, TEGRA_CLK_DFLL_LOCK, 1);
+ }
#endif
}
identity_mapping_add(tegra_pgd, IO_IRAM_VIRT,
IO_IRAM_VIRT + SECTION_SIZE, 0);
+#if defined(CONFIG_ARM_LPAE)
+ tegra_pgd_phys = (virt_to_phys(tegra_pgd) & PAGE_MASK);
+#else
/* inner/outer write-back/write-allocate, sharable */
tegra_pgd_phys = (virt_to_phys(tegra_pgd) & PAGE_MASK) | 0x4A;
+#endif
return 0;
}
udelay(130);
}
+#if !defined(CONFIG_OF) || !defined(CONFIG_COMMON_CLK)
static void set_power_timers(unsigned long us_on, unsigned long us_off,
long rate)
{
tegra_last_pclk = pclk;
last_us_off = us_off;
}
+#endif
+
+void tegra_limit_cpu_power_timers(unsigned long us_on, unsigned long us_off)
+{
+ /* make sure power timers would not exceed specified limits */
+ set_power_timers(us_on, us_off, clk_get_min_rate(tegra_pclk));
+}
+
+void (*tegra_tear_down_cpu)(void);
/*
* restore_cpu_complex
*/
static void restore_cpu_complex(u32 mode)
{
- int cpu = smp_processor_id();
+ int cpu = cpu_logical_map(smp_processor_id());
unsigned int reg;
#if defined(CONFIG_ARCH_TEGRA_2x_SOC) || defined(CONFIG_ARCH_TEGRA_3x_SOC)
unsigned int policy;
#endif
- BUG_ON(cpu != 0);
-
-#ifdef CONFIG_SMP
- cpu = cpu_logical_map(cpu);
-#endif
-
/*
* On Tegra11x PLLX and CPU burst policy is either preserved across LP2,
* or restored by common clock suspend/resume procedures. Hence, we don't
idle or system suspend, the local timer was shut down and
timekeeping switched over to the global system timer. In this
case keep local timer disabled, and restore only periodic load. */
+#ifdef CONFIG_HAVE_ARM_TWD
if (!(mode & (TEGRA_POWER_CLUSTER_MASK |
TEGRA_POWER_CLUSTER_IMMEDIATE))) {
-#ifdef CONFIG_ARM_ARCH_TIMER
- tegra_sctx.arch_timer.cntp_ctl = 0;
-#endif
-#ifdef CONFIG_HAVE_ARM_TWD
tegra_sctx.twd.twd_ctrl = 0;
-#endif
}
-#ifdef CONFIG_ARM_ARCH_TIMER
- arch_timer_resume(&tegra_sctx.arch_timer);
-#endif
-#ifdef CONFIG_HAVE_ARM_TWD
tegra_twd_resume(&tegra_sctx.twd);
#endif
}
*/
static void suspend_cpu_complex(u32 mode)
{
- int cpu = smp_processor_id();
+ int cpu = cpu_logical_map(smp_processor_id());
unsigned int reg;
int i;
BUG_ON(cpu != 0);
-#ifdef CONFIG_SMP
- cpu = cpu_logical_map(cpu);
-#endif
/* switch coresite to clk_m, save off original source */
tegra_sctx.clk_csite_src = readl(clk_rst + CLK_RESET_SOURCE_CSITE);
writel(3<<30, clk_rst + CLK_RESET_SOURCE_CSITE);
#ifdef CONFIG_HAVE_ARM_TWD
tegra_twd_suspend(&tegra_sctx.twd);
#endif
-#ifdef CONFIG_ARM_ARCH_TIMER
- arch_timer_suspend(&tegra_sctx.arch_timer);
-#endif
reg = readl(FLOW_CTRL_CPU_CSR(cpu));
reg &= ~FLOW_CTRL_CSR_WFE_BITMAP; /* clear wfe bitmap */
tegra_gic_cpu_disable(true);
}
-void tegra_clear_cpu_in_lp2(int cpu)
+void tegra_clear_cpu_in_pd(int cpu)
{
spin_lock(&tegra_lp2_lock);
BUG_ON(!cpumask_test_cpu(cpu, &tegra_in_lp2));
spin_unlock(&tegra_lp2_lock);
}
-bool tegra_set_cpu_in_lp2(int cpu)
+bool tegra_set_cpu_in_pd(int cpu)
{
bool last_cpu = false;
static void tegra_sleep_core(enum tegra_suspend_mode mode,
unsigned long v2p)
{
-#ifdef CONFIG_TRUSTED_FOUNDATIONS
+#ifdef CONFIG_TEGRA_USE_SECURE_KERNEL
+ outer_flush_range(__pa(&tegra_resume_timestamps_start),
+ __pa(&tegra_resume_timestamps_end));
+
if (mode == TEGRA_SUSPEND_LP0) {
+ trace_smc_sleep_core(NVSEC_SMC_START);
+
tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE3,
virt_to_phys(tegra_resume));
} else {
+ trace_smc_sleep_core(NVSEC_SMC_START);
+
tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE6,
(TEGRA_RESET_HANDLER_BASE +
tegra_cpu_reset_handler_offset));
}
+
+ trace_smc_sleep_core(NVSEC_SMC_DONE);
#endif
+ tegra_get_suspend_time();
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
cpu_suspend(v2p, tegra2_sleep_core_finish);
#else
static inline void tegra_sleep_cpu(unsigned long v2p)
{
-#ifdef CONFIG_TRUSTED_FOUNDATIONS
- tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE4,
+ cpu_suspend(v2p, tegra_sleep_cpu_finish);
+}
+
+static inline void tegra_stop_mc_clk(unsigned long v2p)
+{
+#ifdef CONFIG_TEGRA_USE_SECURE_KERNEL
+ outer_flush_range(__pa(&tegra_resume_timestamps_start),
+ __pa(&tegra_resume_timestamps_end));
+ trace_smc_sleep_core(NVSEC_SMC_START);
+
+ tegra_generic_smc(0xFFFFFFFC, 0xFFFFFFE5,
(TEGRA_RESET_HANDLER_BASE +
tegra_cpu_reset_handler_offset));
+
+ trace_smc_sleep_core(NVSEC_SMC_DONE);
#endif
- cpu_suspend(v2p, tegra_sleep_cpu_finish);
+ cpu_suspend(v2p, tegra3_stop_mc_clk_finish);
}
-unsigned int tegra_idle_lp2_last(unsigned int sleep_time, unsigned int flags)
+unsigned int tegra_idle_power_down_last(unsigned int sleep_time,
+ unsigned int flags)
{
u32 reg;
unsigned int remain;
-#ifndef CONFIG_ARCH_TEGRA_11x_SOC
- pgd_t *pgd;
-#endif
/* Only the last cpu down does the final suspend steps */
reg = readl(pmc + PMC_CTRL);
reg &= ~TEGRA_POWER_EFFECT_LP0;
writel(reg, pmc + PMC_CTRL);
- tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_start);
-
/*
* We can use clk_get_rate_all_locked() here, because all other cpus
* are in LP2 state and irqs are disabled
*/
+ suspend_cpu_dfll_mode(flags);
if (flags & TEGRA_POWER_CLUSTER_MASK) {
- trace_cpu_cluster(POWER_CPU_CLUSTER_START);
+ if (is_idle_task(current))
+ trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_START);
+ else
+ trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_START);
+#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+ set_power_timers(pdata->cpu_timer, 2);
+#else
set_power_timers(pdata->cpu_timer, 2,
clk_get_rate_all_locked(tegra_pclk));
+#endif
if (flags & TEGRA_POWER_CLUSTER_G) {
/*
* To reduce the vdd_cpu up latency when LP->G
* transition. Before the transition, enable
* the vdd_cpu rail.
*/
- if (is_lp_cluster()) {
+ if (!tegra_crail_can_start_early() && is_lp_cluster()) {
#if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
reg = readl(FLOW_CTRL_CPU_PWR_CSR);
reg |= FLOW_CTRL_CPU_PWR_CSR_RAIL_ENABLE;
}
tegra_cluster_switch_prolog(flags);
} else {
- suspend_cpu_dfll_mode();
+#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+ set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer);
+#else
set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer,
clk_get_rate_all_locked(tegra_pclk));
+#endif
#if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
reg = readl(FLOW_CTRL_CPU_CSR(0));
reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
}
if (sleep_time)
- tegra_lp2_set_trigger(sleep_time);
+ tegra_pd_set_trigger(sleep_time);
cpu_cluster_pm_enter();
suspend_cpu_complex(flags);
tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_prolog);
-#ifndef CONFIG_ARCH_TEGRA_11x_SOC
+#if defined(CONFIG_CACHE_L2X0)
+#if defined(CONFIG_TEGRA_USE_SECURE_KERNEL)
flush_cache_all();
- /*
- * No need to flush complete L2. Cleaning kernel and IO mappings
- * is enough for the LP code sequence that has L2 disabled but
- * MMU on.
- */
- pgd = cpu_get_pgd();
- outer_clean_range(__pa(pgd + USER_PTRS_PER_PGD),
- __pa(pgd + PTRS_PER_PGD));
outer_disable();
+#elif !defined(CONFIG_ARCH_TEGRA_14x_SOC)
+ tegra_resume_l2_init = 1;
+ __cpuc_flush_dcache_area(&tegra_resume_l2_init, sizeof(unsigned long));
+ outer_flush_range(__pa(&tegra_resume_l2_init),
+ __pa(&tegra_resume_l2_init) + sizeof(unsigned long));
+#endif
+#endif
+
+ /* T148: Check for mem_req and mem_req_soon only if it is
+ * MC clock stop state.
+ */
+ if (flags & TEGRA_POWER_STOP_MC_CLK) {
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC)
+ u32 val;
+
+ /* Check if mem_req or mem_req_soon is asserted or if voice
+ * call is active call, if yes then we skip SDRAM
+ * self-refresh and just do CPU power-gating.
+ */
+ val = readl(pmc + PMC_IPC_STS);
+ if ((val & (PMC_IPC_STS_MEM_REQ | PMC_IPC_STS_MEM_REQ_SOON)) ||
+ tegra_is_voice_call_active()) {
+
+ /* Reset LP1 and MC clock mask if we skipping SDRAM
+ * self-refresh.
+ */
+ *iram_cpu_lp1_mask = 0;
+ *iram_mc_clk_mask = 0;
+ writel(0, pmc + PMC_SCRATCH41);
+
+ tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
+ } else {
+ /* Clear mem_sts since SDRAM will not be accessible
+ * to BBC in this state.
+ */
+ val = PMC_IPC_CLR_MEM_STS;
+ writel(val, pmc + PMC_IPC_CLR);
+
+ tegra_stop_mc_clk(PHYS_OFFSET - PAGE_OFFSET);
+ }
+#else
+ /* If it is not T148 then we do not have to
+ * check mem_req and mem_req_soon.
+ */
+ tegra_stop_mc_clk(PHYS_OFFSET - PAGE_OFFSET);
#endif
- tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
+ } else {
+ tegra_sleep_cpu(PHYS_OFFSET - PAGE_OFFSET);
+ }
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC)
+ tegra_init_cache(true);
+#elif defined(CONFIG_TEGRA_USE_SECURE_KERNEL)
tegra_init_cache(false);
+#endif
+
+#if defined(CONFIG_TRUSTED_FOUNDATIONS)
+#ifndef CONFIG_ARCH_TEGRA_11x_SOC
+ trace_smc_wake(tegra_resume_smc_entry_time, NVSEC_SMC_START);
+ trace_smc_wake(tegra_resume_smc_exit_time, NVSEC_SMC_DONE);
+#endif
+#endif
+
tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_switch);
restore_cpu_complex(flags);
cpu_cluster_pm_exit();
- remain = tegra_lp2_timer_remain();
+ remain = tegra_pd_timer_remain();
if (sleep_time)
- tegra_lp2_set_trigger(0);
+ tegra_pd_set_trigger(0);
if (flags & TEGRA_POWER_CLUSTER_MASK) {
tegra_cluster_switch_epilog(flags);
if (is_idle_task(current))
- trace_cpu_cluster_rcuidle(POWER_CPU_CLUSTER_DONE);
+ trace_nvcpu_cluster_rcuidle(NVPOWER_CPU_CLUSTER_DONE);
else
- trace_cpu_cluster(POWER_CPU_CLUSTER_DONE);
- } else {
- resume_cpu_dfll_mode();
+ trace_nvcpu_cluster(NVPOWER_CPU_CLUSTER_DONE);
}
+ resume_cpu_dfll_mode(flags);
tegra_cluster_switch_time(flags, tegra_cluster_switch_time_id_epilog);
#if INSTRUMENT_CLUSTER_SWITCH
if (flags & TEGRA_POWER_CLUSTER_MASK) {
- pr_err("%s: prolog %lu us, switch %lu us, epilog %lu us, total %lu us\n",
+ pr_debug("%s: prolog %lu us, switch %lu us, epilog %lu us, total %lu us\n",
is_lp_cluster() ? "G=>LP" : "LP=>G",
tegra_cluster_switch_times[tegra_cluster_switch_time_id_prolog] -
tegra_cluster_switch_times[tegra_cluster_switch_time_id_start],
return remain;
}
+void tegra_mc_clk_prepare(void)
+{
+ /* copy the reset vector and SDRAM shutdown code into IRAM */
+ memcpy(iram_save, iram_code, iram_save_size);
+ memcpy(iram_code, tegra_iram_start(), iram_save_size);
+
+ *iram_cpu_lp1_mask = 1;
+ *iram_mc_clk_mask = 1;
+
+ __raw_writel(virt_to_phys(tegra_resume), pmc + PMC_SCRATCH41);
+ wmb();
+}
+
+void tegra_mc_clk_finish(void)
+{
+ /* restore IRAM */
+ memcpy(iram_code, iram_save, iram_save_size);
+ *iram_cpu_lp1_mask = 0;
+ *iram_mc_clk_mask = 0;
+ writel(0, pmc + PMC_SCRATCH41);
+}
+
+#ifdef CONFIG_TEGRA_LP1_LOW_COREVOLTAGE
+int tegra_is_lp1_suspend_mode(void)
+{
+ return (current_suspend_mode == TEGRA_SUSPEND_LP1);
+}
+#endif
+
static int tegra_common_suspend(void)
{
void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
tegra_sctx.mc[1] = readl(mc + MC_SECURITY_SIZE);
tegra_sctx.mc[2] = readl(mc + MC_SECURITY_CFG2);
+#ifdef CONFIG_TEGRA_LP1_LOW_COREVOLTAGE
+ if (pdata && pdata->lp1_lowvolt_support) {
+ u32 lp1_core_lowvolt =
+ (tegra_is_voice_call_active() ||
+ tegra_dvfs_rail_get_thermal_floor(tegra_core_rail)) ?
+ pdata->lp1_core_volt_low_cold << 8 :
+ pdata->lp1_core_volt_low << 8;
+
+ lp1_core_lowvolt |= pdata->core_reg_addr;
+ memcpy(tegra_lp1_register_core_lowvolt(), &lp1_core_lowvolt, 4);
+ }
+#endif
+
/* copy the reset vector and SDRAM shutdown code into IRAM */
memcpy(iram_save, iram_code, iram_save_size);
memcpy(iram_code, tegra_iram_start(), iram_save_size);
void __iomem *emc = IO_ADDRESS(TEGRA_EMC_BASE);
#endif
- /* Clear DPD sample */
- writel(0x0, pmc + PMC_DPD_SAMPLE);
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC) || defined(CONFIG_ARCH_TEGRA_12x_SOC)
+ /* Clear DPD Enable */
+ writel(0x0, pmc + PMC_DPD_ENABLE);
+#endif
writel(tegra_sctx.mc[0], mc + MC_SECURITY_START);
writel(tegra_sctx.mc[1], mc + MC_SECURITY_SIZE);
#ifdef CONFIG_ARCH_TEGRA_2x_SOC
/* trigger emc mode write */
writel(EMC_MRW_DEV_NONE, emc + EMC_MRW_0);
-#endif
/* clear scratch registers shared by suspend and the reset pen */
writel(0x0, pmc + PMC_SCRATCH39);
+#endif
writel(0x0, pmc + PMC_SCRATCH41);
/* restore IRAM */
reg &= ~TEGRA_POWER_CPU_PWRREQ_OE;
}
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
/*
* LP0 boots through the AVP, which then resumes the AVP to
* the address in scratch 39, and the cpu to the address in
* scratch 41 to tegra_resume
*/
writel(0x0, pmc + PMC_SCRATCH39);
+#endif
/* Enable DPD sample to trigger sampling pads data and direction
* in which pad will be driven during lp0 mode*/
writel(0x1, pmc + PMC_DPD_SAMPLE);
#if !defined(CONFIG_ARCH_TEGRA_3x_SOC) && !defined(CONFIG_ARCH_TEGRA_2x_SOC)
- writel(0x800fffff, pmc + PMC_IO_DPD_REQ_0);
- writel(0x80001fff, pmc + PMC_IO_DPD2_REQ_0);
+#if defined(CONFIG_ARCH_TEGRA_11x_SOC) || defined(CONFIG_ARCH_TEGRA_12x_SOC)
+ writel(0x800fdfff, pmc + PMC_IO_DPD_REQ);
+#else
+ writel(0x800fffff, pmc + PMC_IO_DPD_REQ);
#endif
+ writel(0x80001fff, pmc + PMC_IO_DPD2_REQ);
+#endif
+
#ifdef CONFIG_ARCH_TEGRA_11x_SOC
/* this is needed only for T11x, not for other chips */
reg &= ~TEGRA_POWER_CPUPWRGOOD_EN;
BUG();
}
+#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+ set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer);
+#else
set_power_timers(pdata->cpu_timer, pdata->cpu_off_timer, rate);
+#endif
pmc_32kwritel(reg, PMC_CTRL);
}
[TEGRA_SUSPEND_LP0] = "LP0",
};
+#if defined(CONFIG_CRYPTO_DEV_TEGRA_SE) && defined(CONFIG_ARCH_TEGRA_14x_SOC)
+static int save_se_context(void)
+{
+ struct device *smmu_dev, *se_dev;
+ int ret = 0;
+
+ smmu_dev = get_smmu_device();
+ if (!smmu_dev) {
+ pr_info("Failed to get smmu device\n");
+ goto save_fail;
+ }
+
+ se_dev = get_se_device();
+ if (!se_dev) {
+ pr_info("Failed to get SE device \n");
+ goto save_fail;
+ }
+
+ /* smmu resume needs to be called
+ * for se_suspend() operation */
+ ret = tegra_smmu_resume(smmu_dev);
+ if (ret) {
+ pr_info("Failed to resume smmu device\n");
+ goto save_fail;
+ }
+
+ ret = se_suspend(se_dev, true);
+ if (ret) {
+ pr_info("Failed to suspend SE device\n");
+ goto save_fail;
+ }
+
+ ret = tegra_smmu_suspend(smmu_dev);
+ if (ret) {
+ pr_info("Failed to suspend smmu device\n");
+ goto save_fail;
+ }
+
+save_fail:
+ return ret;
+}
+#endif
+
static int tegra_suspend_enter(suspend_state_t state)
{
- int ret;
+ int ret = 0;
ktime_t delta;
struct timespec ts_entry, ts_exit;
goto abort_suspend;
}
+#if defined(CONFIG_CRYPTO_DEV_TEGRA_SE) && defined(CONFIG_ARCH_TEGRA_14x_SOC)
+ ret = save_se_context();
+ if (ret) {
+ pr_info("Failed to save SE context\n");
+ goto abort_suspend;
+ }
+#endif
+
read_persistent_clock(&ts_exit);
if (timespec_compare(&ts_exit, &ts_entry) > 0) {
for (partid = 0; partid < TEGRA_NUM_POWERGATE; partid++)
if ((1 << partid) & pwrgate_partid_mask)
if (tegra_powergate_is_powered(partid))
- pr_warning("partition %s is left on before suspend\n",
+ pr_debug("partition %s is left on before suspend\n",
tegra_powergate_get_name(partid));
return;
}
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC)
+/* This is the opposite of the LP1BB related PMC setup that occurs
+ * during suspend.
+ */
+static void tegra_disable_lp1bb_interrupt(void)
+{
+ unsigned reg;
+ /* mem_req = 0 was set as an interrupt during LP1BB entry.
+ * It has to be disabled now
+ */
+ reg = readl(pmc + PMC_CTRL2);
+ reg &= ~(PMC_CTRL2_WAKE_DET_EN);
+ pmc_32kwritel(reg, PMC_CTRL2);
+
+ /* Program mem_req NOT to be a wake event */
+ reg = readl(pmc + PMC_WAKE2_MASK);
+ reg &= ~(PMC_WAKE2_BB_MEM_REQ);
+ pmc_32kwritel(reg, PMC_WAKE2_MASK);
+
+ reg = PMC_WAKE2_BB_MEM_REQ;
+ pmc_32kwritel(reg, PMC_WAKE2_STATUS);
+
+ /* Set up the LIC to NOT accept pmc_wake events as interrupts */
+ reg = TRI_ICTLR_PMC_WAKE_INT;
+ writel(reg, tert_ictlr + TRI_ICTLR_CPU_IER_CLR);
+}
+#endif
+
+static void tegra_suspend_powergate_control(int partid, bool turn_off)
+{
+ if (turn_off)
+ tegra_powergate_partition(partid);
+ else
+ tegra_unpowergate_partition(partid);
+}
+
int tegra_suspend_dram(enum tegra_suspend_mode mode, unsigned int flags)
{
int err = 0;
u32 scratch37 = 0xDEADBEEF;
u32 reg;
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC)
+ u32 enter_state = 0;
+#endif
+ bool tegra_suspend_vde_powergated = false;
+
if (WARN_ON(mode <= TEGRA_SUSPEND_NONE ||
mode >= TEGRA_MAX_SUSPEND_MODE)) {
err = -ENXIO;
goto fail;
}
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC)
+ update_pmc_registers(tegra_lp1bb_emc_min_rate_get());
+#endif
+
if (tegra_is_voice_call_active()) {
/* backup the current value of scratch37 */
scratch37 = readl(pmc + PMC_SCRATCH37);
if ((mode == TEGRA_SUSPEND_LP0) || (mode == TEGRA_SUSPEND_LP1))
tegra_suspend_check_pwr_stats();
+ /* turn off VDE partition in LP1 */
+ if (mode == TEGRA_SUSPEND_LP1 &&
+ tegra_powergate_is_powered(TEGRA_POWERGATE_VDEC)) {
+ pr_info("turning off partition %s in LP1\n",
+ tegra_powergate_get_name(TEGRA_POWERGATE_VDEC));
+ tegra_suspend_powergate_control(TEGRA_POWERGATE_VDEC, true);
+ tegra_suspend_vde_powergated = true;
+ }
+
tegra_common_suspend();
tegra_pm_set(mode);
local_fiq_disable();
- trace_cpu_suspend(CPU_SUSPEND_START);
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC)
+ tegra_smp_save_power_mask();
+#endif
+
+ trace_cpu_suspend(CPU_SUSPEND_START, tegra_rtc_read_ms());
if (mode == TEGRA_SUSPEND_LP0) {
#ifdef CONFIG_TEGRA_CLUSTER_CONTROL
tegra_lp0_suspend_mc();
tegra_cpu_reset_handler_save();
tegra_tsc_wait_for_suspend();
- tegra_smp_clear_power_mask();
+ if (!tegra_cpu_is_asim())
+ tegra_smp_clear_power_mask();
}
- else if (mode == TEGRA_SUSPEND_LP1)
+
+#if !defined(CONFIG_ARCH_TEGRA_14x_SOC)
+ if (mode == TEGRA_SUSPEND_LP1)
+#endif
*iram_cpu_lp1_mask = 1;
suspend_cpu_complex(flags);
#if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
- /* In case of LP0, program external power gating accordinly */
- if (mode == TEGRA_SUSPEND_LP0) {
+ /* In case of LP0/1, program external power gating accordinly */
+ if (mode == TEGRA_SUSPEND_LP0 || mode == TEGRA_SUSPEND_LP1) {
reg = readl(FLOW_CTRL_CPU_CSR(0));
if (is_lp_cluster())
reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU; /* Non CPU */
#endif
flush_cache_all();
- outer_flush_all();
outer_disable();
if (mode == TEGRA_SUSPEND_LP2)
else
tegra_sleep_core(mode, PHYS_OFFSET - PAGE_OFFSET);
+ resume_entry_time = 0;
+ if (mode != TEGRA_SUSPEND_LP0)
+ resume_entry_time = readl(tmrus_reg_base + TIMERUS_CNTR_1US);
+
tegra_init_cache(true);
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC)
+ reg = readl(pmc + PMC_LP_STATE_SCRATCH_REG);
+ enter_state = (reg >> PMC_LP_STATE_BIT_OFFSET) & PMC_LP_STATE_BIT_MASK;
+ /* If we actually had entered in either LP1 or LP1BB,
+ * restore power mask and disable mem_req interrupt PMC
+ */
+ if (enter_state) {
+ pr_info("Exited state is LP1/LP1BB\n");
+ tegra_disable_lp1bb_interrupt();
+ tegra_smp_restore_power_mask();
+ }
+#endif
+
+#if defined(CONFIG_TEGRA_USE_SECURE_KERNEL)
+#ifndef CONFIG_ARCH_TEGRA_11x_SOC
+ trace_smc_wake(tegra_resume_smc_entry_time, NVSEC_SMC_START);
+ trace_smc_wake(tegra_resume_smc_exit_time, NVSEC_SMC_DONE);
+#endif
+
if (mode == TEGRA_SUSPEND_LP0) {
+ trace_secureos_init(tegra_resume_entry_time,
+ NVSEC_SUSPEND_EXIT_DONE);
+ }
+#endif
+
+ if (mode == TEGRA_SUSPEND_LP0) {
+
+ /* CPUPWRGOOD_EN is not enabled in HW so disabling this, *
+ * Otherwise it is creating issue in cluster switch after LP0 *
#ifdef CONFIG_ARCH_TEGRA_11x_SOC
reg = readl(pmc+PMC_CTRL);
reg |= TEGRA_POWER_CPUPWRGOOD_EN;
pmc_32kwritel(reg, PMC_CTRL);
#endif
+ */
+
tegra_tsc_resume();
tegra_cpu_reset_handler_restore();
tegra_lp0_resume_mc();
tegra_tsc_wait_for_resume();
- } else if (mode == TEGRA_SUSPEND_LP1)
+ }
+
+#if !defined(CONFIG_ARCH_TEGRA_14x_SOC)
+ if (mode == TEGRA_SUSPEND_LP1)
+#endif
*iram_cpu_lp1_mask = 0;
/* if scratch37 was clobbered during LP1, restore it */
if (pdata && pdata->board_resume)
pdata->board_resume(mode, TEGRA_RESUME_AFTER_CPU);
- trace_cpu_suspend(CPU_SUSPEND_DONE);
+ trace_cpu_suspend(CPU_SUSPEND_DONE, tegra_rtc_read_ms());
local_fiq_enable();
tegra_common_resume();
+ /* turn on VDE partition in LP1 */
+ if (mode == TEGRA_SUSPEND_LP1 && tegra_suspend_vde_powergated) {
+ pr_info("turning on partition %s in LP1\n",
+ tegra_powergate_get_name(TEGRA_POWERGATE_VDEC));
+ tegra_suspend_powergate_control(TEGRA_POWERGATE_VDEC, false);
+ }
+
fail:
return err;
}
static struct kobj_attribute suspend_mode_attribute =
__ATTR(mode, 0644, suspend_mode_show, suspend_mode_store);
+static ssize_t suspend_resume_time_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%ums\n", ((u32)resume_time / 1000));
+}
+
+static struct kobj_attribute suspend_resume_time_attribute =
+ __ATTR(resume_time, 0444, suspend_resume_time_show, 0);
+
+static ssize_t suspend_time_show(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "%ums\n", ((u32)suspend_time / 1000));
+}
+
+static struct kobj_attribute suspend_time_attribute =
+ __ATTR(suspend_time, 0444, suspend_time_show, 0);
+
static struct kobject *suspend_kobj;
static int tegra_pm_enter_suspend(void)
{
pr_info("Entering suspend state %s\n", lp_state[current_suspend_mode]);
- suspend_cpu_dfll_mode();
+ suspend_cpu_dfll_mode(0);
if (current_suspend_mode == TEGRA_SUSPEND_LP0)
tegra_lp0_cpu_mode(true);
return 0;
{
if (current_suspend_mode == TEGRA_SUSPEND_LP0)
tegra_lp0_cpu_mode(false);
- resume_cpu_dfll_mode();
+ resume_cpu_dfll_mode(0);
pr_info("Exited suspend state %s\n", lp_state[current_suspend_mode]);
}
+static void tegra_pm_enter_shutdown(void)
+{
+ suspend_cpu_dfll_mode(0);
+ pr_info("Shutting down tegra ...\n");
+}
+
static struct syscore_ops tegra_pm_enter_syscore_ops = {
.suspend = tegra_pm_enter_suspend,
.resume = tegra_pm_enter_resume,
+ .shutdown = tegra_pm_enter_shutdown,
};
static __init int tegra_pm_enter_syscore_init(void)
{
u32 reg;
u32 mode;
+ struct pmc_pm_data *pm_dat;
+ bool is_board_pdata = true;
#ifdef CONFIG_ARCH_TEGRA_HAS_CL_DVFS
tegra_dfll = clk_get_sys(NULL, "dfll_cpu");
#endif
tegra_pclk = clk_get_sys(NULL, "pclk");
BUG_ON(IS_ERR(tegra_pclk));
- pdata = plat;
+
+ /* create the pdata from DT information */
+ pm_dat = tegra_get_pm_data();
+ if (pm_dat) {
+ pr_err("PMC dt information non-NULL %s\n", __func__);
+ is_board_pdata = false;
+ pdata = kzalloc(sizeof(struct tegra_suspend_platform_data),
+ GFP_KERNEL);
+ if (pm_dat->combined_req != plat->combined_req) {
+ pr_err("PMC DT attribute combined_req=%d, board value=%d\n",
+ pm_dat->combined_req, plat->combined_req);
+ pdata->combined_req = plat->combined_req;
+ } else {
+ pdata->combined_req = pm_dat->combined_req;
+ }
+ if (pm_dat->sysclkreq_high != plat->sysclkreq_high) {
+ pr_err("PMC DT attribute sysclkreq_high=%d, board value=%d\n",
+ pm_dat->sysclkreq_high, plat->sysclkreq_high);
+ pdata->sysclkreq_high = plat->sysclkreq_high;
+ } else {
+ pdata->sysclkreq_high = pm_dat->sysclkreq_high;
+ }
+ if (pm_dat->corereq_high != plat->corereq_high) {
+ pr_err("PMC DT attribute corereq_high=%d, board value=%d\n",
+ pm_dat->corereq_high, plat->corereq_high);
+ pdata->corereq_high = plat->corereq_high;
+ } else {
+ pdata->corereq_high = pm_dat->corereq_high;
+ }
+ if (pm_dat->cpu_off_time != plat->cpu_off_timer) {
+ pr_err("PMC DT attribute cpu_off_timer=%d, board value=%ld\n",
+ pm_dat->cpu_off_time, plat->cpu_off_timer);
+ pdata->cpu_off_timer = plat->cpu_off_timer;
+ } else {
+ pdata->cpu_off_timer = pm_dat->cpu_off_time;
+ }
+ if (pm_dat->cpu_good_time != plat->cpu_timer) {
+ pr_err("PMC DT attribute cpu_timer=%d, board value=%ld\n",
+ pm_dat->cpu_good_time, plat->cpu_timer);
+ pdata->cpu_timer = plat->cpu_timer;
+ } else {
+ pdata->cpu_timer = pm_dat->cpu_good_time;
+ }
+ if (pm_dat->suspend_mode != plat->suspend_mode) {
+ pr_err("PMC DT attribute suspend_mode=%d, board value=%d\n",
+ pm_dat->suspend_mode, plat->suspend_mode);
+ pdata->suspend_mode = plat->suspend_mode;
+ } else {
+ pdata->suspend_mode = pm_dat->suspend_mode;
+ }
+ /* FIXME: pmc_pm_data fields to be reused
+ * core_osc_time, core_pmu_time, core_off_time
+ * units of above fields is uSec while
+ * platform data values are in ticks
+ */
+ /* FIXME: pmc_pm_data unused by downstream code
+ * cpu_pwr_good_en, lp0_vec_size, lp0_vec_phy_addr
+ */
+ /* FIXME: add missing DT bindings taken from platform data */
+ pdata->core_timer = plat->core_timer;
+ pdata->core_off_timer = plat->core_off_timer;
+ pdata->board_suspend = plat->board_suspend;
+ pdata->board_resume = plat->board_resume;
+ pdata->sysclkreq_gpio = plat->sysclkreq_gpio;
+ pdata->cpu_lp2_min_residency = plat->cpu_lp2_min_residency;
+ pdata->cpu_resume_boost = plat->cpu_resume_boost;
+#ifdef CONFIG_TEGRA_LP1_LOW_COREVOLTAGE
+ pdata->lp1_lowvolt_support = plat->lp1_lowvolt_support;
+ pdata->i2c_base_addr = plat->i2c_base_addr;
+ pdata->pmuslave_addr = plat->pmuslave_addr;
+ pdata->core_reg_addr = plat->core_reg_addr;
+ pdata->lp1_core_volt_low_cold = plat->lp1_core_volt_low_cold;
+ pdata->lp1_core_volt_low = plat->lp1_core_volt_low;
+ pdata->lp1_core_volt_high = plat->lp1_core_volt_high;
+#endif
+#ifdef CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE
+ pdata->min_residency_vmin_fmin = plat->min_residency_vmin_fmin;
+ pdata->min_residency_ncpu_slow = plat->min_residency_ncpu_slow;
+ pdata->min_residency_ncpu_fast = plat->min_residency_ncpu_fast;
+ pdata->min_residency_crail = plat->min_residency_crail;
+#endif
+ pdata->min_residency_mc_clk = plat->min_residency_mc_clk;
+ pdata->usb_vbus_internal_wake = plat->usb_vbus_internal_wake;
+ pdata->usb_id_internal_wake = plat->usb_id_internal_wake;
+ } else {
+ pr_err("PMC board data used in %s\n", __func__);
+ pdata = plat;
+ }
(void)reg;
(void)mode;
plat->suspend_mode = TEGRA_SUSPEND_LP2;
}
-#ifdef CONFIG_TEGRA_LP1_950
+#ifdef CONFIG_TEGRA_LP1_LOW_COREVOLTAGE
if (pdata->lp1_lowvolt_support) {
u32 lp1_core_lowvolt, lp1_core_highvolt;
memcpy(tegra_lp1_register_pmuslave_addr(), &pdata->pmuslave_addr, 4);
/* Initialize scratch registers used for CPU LP2 synchronization */
writel(0, pmc + PMC_SCRATCH37);
writel(0, pmc + PMC_SCRATCH38);
+#ifdef CONFIG_ARCH_TEGRA_2x_SOC
writel(0, pmc + PMC_SCRATCH39);
+#endif
writel(0, pmc + PMC_SCRATCH41);
/* Always enable CPU power request; just normal polarity is supported */
reg |= TEGRA_POWER_PWRREQ_OE;
pmc_32kwritel(reg, PMC_CTRL);
+ if (pdata->sysclkreq_gpio) {
+ reg = readl(pmc + PMC_DPAD_ORIDE);
+ reg &= ~TEGRA_DPAD_ORIDE_SYS_CLK_REQ;
+ pmc_32kwritel(reg, PMC_DPAD_ORIDE);
+ }
+
if (pdata->suspend_mode == TEGRA_SUSPEND_LP0)
tegra_lp0_suspend_init();
&suspend_mode_attribute.attr))
pr_err("%s: sysfs_create_file suspend type failed!\n",
__func__);
+ if (sysfs_create_file(suspend_kobj, \
+ &suspend_resume_time_attribute.attr))
+ pr_err("%s: sysfs_create_file resume_time failed!\n",
+ __func__);
+ if (sysfs_create_file(suspend_kobj, \
+ &suspend_time_attribute.attr))
+ pr_err("%s: sysfs_create_file suspend_time failed!\n",
+ __func__);
}
iram_cpu_lp2_mask = tegra_cpu_lp2_mask;
iram_cpu_lp1_mask = tegra_cpu_lp1_mask;
+ iram_mc_clk_mask = tegra_mc_clk_mask;
/* clear io dpd settings before kernel */
tegra_bl_io_dpd_cleanup();
fail:
#endif
if (plat->suspend_mode == TEGRA_SUSPEND_NONE)
- tegra_lp2_in_idle(false);
+ tegra_pd_in_idle(false);
current_suspend_mode = plat->suspend_mode;
}
+void tegra_lp1bb_suspend_emc_rate(unsigned long emc_min, unsigned long emc_max)
+{
+ pdata->lp1bb_emc_rate_min = emc_min;
+ pdata->lp1bb_emc_rate_max = emc_max;
+}
+
+void tegra_lp1bb_suspend_mv_set(int mv)
+{
+ if (WARN_ON_ONCE(!pdata))
+ return;
+
+ pdata->lp1bb_core_volt_min = mv;
+}
+
+unsigned long tegra_lp1bb_emc_min_rate_get(void)
+{
+ if (WARN_ON_ONCE(!pdata) || !pdata->lp1bb_emc_rate_min)
+ return 204000000;
+
+ return pdata->lp1bb_emc_rate_min;
+}
+
unsigned long debug_uart_port_base = 0;
EXPORT_SYMBOL(debug_uart_port_base);
void tegra_console_uart_suspend(void)
{
if (console_suspend_enabled && debug_uart_clk)
- clk_disable(debug_uart_clk);
+ tegra_clk_disable_unprepare(debug_uart_clk);
}
void tegra_console_uart_resume(void)
{
if (console_suspend_enabled && debug_uart_clk)
- clk_enable(debug_uart_clk);
+ tegra_clk_prepare_enable(debug_uart_clk);
}
static int tegra_debug_uart_syscore_init(void)
return 0;
}
arch_initcall(tegra_debug_uart_syscore_init);
+
+#if defined(CONFIG_ARCH_TEGRA_14x_SOC)
+static inline bool pmc_write_check(int index, int bit_position)
+{
+ if (pmc_write_bitmap[index] & (1 << bit_position))
+ return true;
+ else
+ return false;
+}
+
+static void update_pmc_registers(unsigned long rate)
+{
+ u32 i, j;
+ int instance = 1;
+
+ /* FIXME: convert rate to instance */
+
+ /* Based on index, we select that block of scratches */
+ u32 base2 = (tegra_wb0_params_address + (instance - 1) *
+ tegra_wb0_params_block_size);
+ void __iomem *base = ioremap(base2, tegra_wb0_params_block_size);
+
+#define copy_dram_to_pmc(index, bit) \
+ pmc_32kwritel(readl(base + PMC_REGISTER_OFFSET(index, bit)), \
+ PMC_REGISTER_OFFSET(index, bit) + PMC_SCRATCH0)
+
+
+ /* Iterate through the bitmap, and copy those registers
+ * which are marked in the bitmap
+ */
+ for (i = 0, j = 0; j < ARRAY_SIZE(pmc_write_bitmap);) {
+ if (pmc_write_bitmap[j] == 0) {
+ j++;
+ i = 0;
+ continue;
+ }
+
+ if (pmc_write_check(j, i))
+ copy_dram_to_pmc(j, i);
+
+ if (++i > (sizeof(pmc_write_bitmap[0]) * 8)) {
+ i = 0;
+ j++;
+ }
+ }
+
+#undef copy_dram_to_pmc
+ iounmap(base);
+}
+#endif
+
+#ifdef CONFIG_ARM_ARCH_TIMER
+
+static u32 tsc_suspend_start;
+static u32 tsc_resume_start;
+
+#define pmc_writel(value, reg) \
+ writel(value, (uintptr_t)pmc + (reg))
+#define pmc_readl(reg) \
+ readl((uintptr_t)pmc + (reg))
+
+#define PMC_DPD_ENABLE 0x24
+#define PMC_DPD_ENABLE_TSC_MULT_ENABLE (1 << 1)
+
+#define PMC_TSC_MULT 0x2b4
+#define PMC_TSC_MULT_FREQ_STS (1 << 16)
+
+#define TSC_TIMEOUT_US 32
+
+void tegra_tsc_suspend(void)
+{
+ if (arch_timer_initialized) {
+ u32 reg = pmc_readl(PMC_DPD_ENABLE);
+ BUG_ON(reg & PMC_DPD_ENABLE_TSC_MULT_ENABLE);
+ reg |= PMC_DPD_ENABLE_TSC_MULT_ENABLE;
+ pmc_writel(reg, PMC_DPD_ENABLE);
+ tsc_suspend_start = timer_readl(TIMERUS_CNTR_1US);
+ }
+}
+
+void tegra_tsc_resume(void)
+{
+ if (arch_timer_initialized) {
+ u32 reg = pmc_readl(PMC_DPD_ENABLE);
+ BUG_ON(!(reg & PMC_DPD_ENABLE_TSC_MULT_ENABLE));
+ reg &= ~PMC_DPD_ENABLE_TSC_MULT_ENABLE;
+ pmc_writel(reg, PMC_DPD_ENABLE);
+ tsc_resume_start = timer_readl(TIMERUS_CNTR_1US);
+ }
+}
+
+void tegra_tsc_wait_for_suspend(void)
+{
+ if (arch_timer_initialized) {
+ while ((timer_readl(TIMERUS_CNTR_1US) - tsc_suspend_start) <
+ TSC_TIMEOUT_US) {
+ if (pmc_readl(PMC_TSC_MULT) & PMC_TSC_MULT_FREQ_STS)
+ break;
+ cpu_relax();
+ }
+ }
+}
+
+void tegra_tsc_wait_for_resume(void)
+{
+ if (arch_timer_initialized) {
+ while ((timer_readl(TIMERUS_CNTR_1US) - tsc_resume_start) <
+ TSC_TIMEOUT_US) {
+ if (!(pmc_readl(PMC_TSC_MULT) & PMC_TSC_MULT_FREQ_STS))
+ break;
+ cpu_relax();
+ }
+ }
+}
+#endif
+
+#if defined(CONFIG_DEBUG_FS) && INSTRUMENT_CLUSTER_SWITCH
+
+static void cluster_switch_stats_show(
+ struct seq_file *s, struct tegra_cluster_switch_time_stats *stats)
+{
+ seq_printf(s, "%u-samples average: %lu\n",
+ CLUSTER_SWITCH_AVG_SAMPLES,
+ stats->avg >> CLUSTER_SWITCH_TIME_AVG_SHIFT);
+ seq_printf(s, "exponential average: %lu\n",
+ stats->exp_avg >> CLUSTER_SWITCH_TIME_AVG_SHIFT);
+ seq_printf(s, "maximum since boot: %lu\n\n", stats->max);
+}
+
+
+static int tegra_cluster_switch_stats_show(struct seq_file *s, void *data)
+{
+ seq_printf(s, "G=>LP cluster switch timing: (us)\n");
+ cluster_switch_stats_show(s, &g2lp_stats);
+ seq_printf(s, "LP=>G cluster switch timing: (us)\n");
+ cluster_switch_stats_show(s, &lp2g_stats);
+ return 0;
+}
+
+static int tegra_cluster_switch_stats_open(
+ struct inode *inode, struct file *file)
+{
+ return single_open(file, tegra_cluster_switch_stats_show,
+ inode->i_private);
+}
+
+static const struct file_operations tegra_cluster_switch_stats_ops = {
+ .open = tegra_cluster_switch_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init tegra_pm_core_debug_init(void)
+{
+ struct dentry *dir, *d;
+
+ dir = debugfs_create_dir("tegra_pm_core", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ d = debugfs_create_file("cluster_switch_stats", S_IRUGO, dir, NULL,
+ &tegra_cluster_switch_stats_ops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+
+late_initcall(tegra_pm_core_debug_init);
+#endif