unknown changes from android-tegra-nv-3.4
[linux-3.10.git] / arch / arm / mach-tegra / pm-t3.c
index 8cd3901..595e405 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Tegra3 SOC-specific power and cluster management
  *
- * Copyright (c) 2009-2011, NVIDIA Corporation.
+ * Copyright (c) 2009-2012, NVIDIA Corporation.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include <linux/smp.h>
 #include <linux/interrupt.h>
 #include <linux/clk.h>
-#include <linux/cpu_pm.h>
 #include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/clockchips.h>
+#include <linux/cpu_pm.h>
 
 #include <mach/gpio.h>
 #include <mach/irqs.h>
 
 #include "clock.h"
 #include "cpuidle.h"
-#include "flowctrl.h"
-#include "gpio-names.h"
 #include "iomap.h"
 #include "pm.h"
 #include "sleep.h"
 #include "tegra3_emc.h"
+#include "dvfs.h"
 
 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
 #define CAR_CCLK_BURST_POLICY \
@@ -196,13 +199,13 @@ void tegra_cluster_switch_prolog(unsigned int flags)
        /* Read the flow controler CSR register and clear the CPU switch
           and immediate flags. If an actual CPU switch is to be performed,
           re-write the CSR register with the desired values. */
-       reg = flowctrl_read_cpu_csr(0);
-       reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
-                FLOW_CTRL_CSR_SWITCH_CLUSTER);
+       reg = readl(FLOW_CTRL_CPU_CSR(0));
+       reg &= ~(FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE |
+                FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER);
 
        /* Program flow controller for immediate wake if requested */
        if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
-               reg |= FLOW_CTRL_CSR_IMMEDIATE_WAKE;
+               reg |= FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE;
 
        /* Do nothing if no switch actions requested */
        if (!target_cluster)
@@ -218,12 +221,32 @@ void tegra_cluster_switch_prolog(unsigned int flags)
                        }
 
                        /* Set up the flow controller to switch CPUs. */
-                       reg |= FLOW_CTRL_CSR_SWITCH_CLUSTER;
+                       reg |= FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER;
                }
        }
 
 done:
-       flowctrl_write_cpu_csr(0, reg);
+       writel(reg, FLOW_CTRL_CPU_CSR(0));
+}
+
+
+static void cluster_switch_epilog_actlr(void)
+{
+       u32 actlr;
+
+       /* TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads
+          as zero, writes ignored). Hence, it is not preserved across G=>LP=>G
+          switch by CPU save/restore code, but SMP bit is restored correctly.
+          Synchronize these two bits here after LP=>G transition. Note that
+          only CPU0 core is powered on before and after the switch. See also
+          bug 807595. */
+
+       __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
+
+       if (actlr & (0x1 << 6)) {
+               actlr |= 0x1;
+               __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
+       }
 }
 
 static void cluster_switch_epilog_gic(void)
@@ -231,10 +254,6 @@ static void cluster_switch_epilog_gic(void)
        unsigned int max_irq, i;
        void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
 
-       /* Nothing to do if currently running on the LP CPU. */
-       if (is_lp_cluster())
-               return;
-
        /* Reprogram the interrupt affinity because the on the LP CPU,
           the interrupt distributor affinity regsiters are stubbed out
           by ARM (reads as zero, writes ignored). So when the LP CPU
@@ -246,8 +265,25 @@ static void cluster_switch_epilog_gic(void)
        max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f;
        max_irq = (max_irq + 1) * 32;
 
-       for (i = 32; i < max_irq; i += 4)
-               writel(0x01010101, gic_base + GIC_DIST_TARGET + i * 4 / 4);
+       for (i = 32; i < max_irq; i += 4) {
+               u32 val = 0x01010101;
+#ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
+               unsigned int irq;
+               for (irq = i; irq < (i + 4); irq++) {
+                       struct cpumask mask;
+                       struct irq_desc *desc = irq_to_desc(irq);
+
+                       if (desc && desc->affinity_hint &&
+                           desc->irq_data.affinity) {
+                               if (cpumask_and(&mask, desc->affinity_hint,
+                                               desc->irq_data.affinity))
+                                       val |= (*cpumask_bits(&mask) & 0xff) <<
+                                               ((irq & 3) * 8);
+                       }
+               }
+#endif
+               writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4);
+       }
 }
 
 void tegra_cluster_switch_epilog(unsigned int flags)
@@ -257,13 +293,16 @@ void tegra_cluster_switch_epilog(unsigned int flags)
        /* Make sure the switch and immediate flags are cleared in
           the flow controller to prevent undesirable side-effects
           for future users of the flow controller. */
-       reg = flowctrl_read_cpu_csr(0);
-       reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
-                FLOW_CTRL_CSR_SWITCH_CLUSTER);
-       flowctrl_write_cpu_csr(0, reg);
-
-       /* Perform post-switch clean-up of the interrupt distributor */
-       cluster_switch_epilog_gic();
+       reg = readl(FLOW_CTRL_CPU_CSR(0));
+       reg &= ~(FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE |
+                FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER);
+       writel(reg, FLOW_CTRL_CPU_CSR(0));
+
+       /* Perform post-switch LP=>G clean-up */
+       if (!is_lp_cluster()) {
+               cluster_switch_epilog_actlr();
+               cluster_switch_epilog_gic();
+       }
 
        #if DEBUG_CLUSTER_SWITCH
        {
@@ -305,38 +344,54 @@ int tegra_cluster_control(unsigned int us, unsigned int flags)
        if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
                us = 0;
 
-       if (current_cluster != target_cluster) {
-               if (target_cluster == TEGRA_POWER_CLUSTER_G) {
-                       s64 t = ktime_to_us(ktime_sub(ktime_get(), last_g2lp));
-                       s64 t_off = tegra_cpu_power_off_time();
-                       if (t_off > t)
-                               udelay((unsigned int)(t_off - t));
-               }
-               else
-                       last_g2lp = ktime_get();
-       }
-
        DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__,
                (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2,
                is_lp_cluster() ? "LP" : "G",
                (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP",
                (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "",
                (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "",
-               us));
+               us));
 
        local_irq_save(irq_flags);
+
+       if (current_cluster != target_cluster && !timekeeping_suspended) {
+               ktime_t now = ktime_get();
+               if (target_cluster == TEGRA_POWER_CLUSTER_G) {
+                       s64 t = ktime_to_us(ktime_sub(now, last_g2lp));
+                       s64 t_off = tegra_cpu_power_off_time();
+                       if (t_off > t)
+                               udelay((unsigned int)(t_off - t));
+
+                       tegra_dvfs_rail_on(tegra_cpu_rail, now);
+
+               } else {
+                       last_g2lp = now;
+                       tegra_dvfs_rail_off(tegra_cpu_rail, now);
+               }
+       }
+
        if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
                if (us)
                        tegra_lp2_set_trigger(us);
 
+               tegra_cluster_switch_prolog(flags);
                tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags);
+               tegra_cluster_switch_epilog(flags);
 
                if (us)
                        tegra_lp2_set_trigger(0);
        } else {
+               int cpu = 0;
+
                tegra_set_cpu_in_lp2(0);
                cpu_pm_enter();
+               if (!timekeeping_suspended)
+                       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
+                                          &cpu);
                tegra_idle_lp2_last(0, flags);
+               if (!timekeeping_suspended)
+                       clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
+                                          &cpu);
                cpu_pm_exit();
                tegra_clear_cpu_in_lp2(0);
        }
@@ -349,21 +404,17 @@ int tegra_cluster_control(unsigned int us, unsigned int flags)
 #endif
 
 #ifdef CONFIG_PM_SLEEP
-static u32 mc_reserved_rsv;
-static u32 mc_emem_arb_override;
 
 void tegra_lp0_suspend_mc(void)
 {
-       void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
-       mc_reserved_rsv = readl(mc + MC_RESERVED_RSV);
-       mc_emem_arb_override = readl(mc + MC_EMEM_ARB_OVERRIDE);
+       /* Since memory frequency after LP0 is restored to boot rate
+          mc timing is saved during init, not on entry to LP0. Keep
+          this hook just in case, anyway */
 }
 
 void tegra_lp0_resume_mc(void)
 {
-       void __iomem *mc = IO_ADDRESS(TEGRA_MC_BASE);
-       writel(mc_reserved_rsv, mc + MC_RESERVED_RSV);
-       writel(mc_emem_arb_override, mc + MC_EMEM_ARB_OVERRIDE);
+       tegra_mc_timing_restore();
 }
 
 void tegra_lp0_cpu_mode(bool enter)
@@ -378,6 +429,101 @@ void tegra_lp0_cpu_mode(bool enter)
                flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G;
                flags |= TEGRA_POWER_CLUSTER_IMMEDIATE;
                tegra_cluster_control(0, flags);
+               pr_info("Tegra: switched to %s cluster\n", enter ? "LP" : "G");
        }
 }
 #endif
+
+#define IO_DPD_INFO(_name, _index, _bit) \
+       { \
+               .name = _name, \
+               .io_dpd_reg_index = _index, \
+               .io_dpd_bit = _bit, \
+       }
+
+/* PMC IO DPD register offsets */
+#define APBDEV_PMC_IO_DPD_REQ_0                0x1b8
+#define APBDEV_PMC_IO_DPD_STATUS_0     0x1bc
+#define APBDEV_PMC_SEL_DPD_TIM_0       0x1c8
+#define APBDEV_DPD_ENABLE_LSB          30
+#define APBDEV_DPD2_ENABLE_LSB         5
+#define PMC_DPD_SAMPLE                 0x20
+
+struct tegra_io_dpd tegra_list_io_dpd[] = {
+/* Empty DPD list - sd dpd entries removed */
+};
+
+struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
+{
+       int i;
+       const char *name = dev ? dev_name(dev) : NULL;
+       if (name) {
+               for (i = 0; i < (sizeof(tegra_list_io_dpd) /
+                       sizeof(struct tegra_io_dpd)); i++) {
+                       if (!(strncmp(tegra_list_io_dpd[i].name, name,
+                               strlen(name)))) {
+                               return &tegra_list_io_dpd[i];
+                       }
+               }
+       }
+       dev_info(dev, "Error: tegra3 io dpd not supported for %s\n",
+               ((name) ? name : "NULL"));
+       return NULL;
+}
+EXPORT_SYMBOL(tegra_io_dpd_get);
+
+static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
+static DEFINE_SPINLOCK(tegra_io_dpd_lock);
+
+void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
+{
+       unsigned int enable_mask;
+       unsigned int dpd_status;
+       unsigned int dpd_enable_lsb;
+
+       if ((!hnd))
+               return;
+       spin_lock(&tegra_io_dpd_lock);
+       dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
+                                               APBDEV_DPD_ENABLE_LSB;
+       writel(0x1, pmc + PMC_DPD_SAMPLE);
+       writel(0x10, pmc + APBDEV_PMC_SEL_DPD_TIM_0);
+       enable_mask = ((1 << hnd->io_dpd_bit) | (2 << dpd_enable_lsb));
+       writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
+                                       hnd->io_dpd_reg_index * 8));
+       udelay(1);
+       dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
+                                       hnd->io_dpd_reg_index * 8));
+       if (!(dpd_status & (1 << hnd->io_dpd_bit)))
+               pr_info("Error: dpd%d enable failed, status=%#x\n",
+               (hnd->io_dpd_reg_index + 1), dpd_status);
+       /* Sample register must be reset before next sample operation */
+       writel(0x0, pmc + PMC_DPD_SAMPLE);
+       spin_unlock(&tegra_io_dpd_lock);
+       return;
+}
+EXPORT_SYMBOL(tegra_io_dpd_enable);
+
+void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
+{
+       unsigned int enable_mask;
+       unsigned int dpd_status;
+       unsigned int dpd_enable_lsb;
+
+       if ((!hnd))
+               return;
+       spin_lock(&tegra_io_dpd_lock);
+       dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
+                                               APBDEV_DPD_ENABLE_LSB;
+       enable_mask = ((1 << hnd->io_dpd_bit) | (1 << dpd_enable_lsb));
+       writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
+                                       hnd->io_dpd_reg_index * 8));
+       dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
+                                       hnd->io_dpd_reg_index * 8));
+       if (dpd_status & (1 << hnd->io_dpd_bit))
+               pr_info("Error: dpd%d disable failed, status=%#x\n",
+               (hnd->io_dpd_reg_index + 1), dpd_status);
+       spin_unlock(&tegra_io_dpd_lock);
+       return;
+}
+EXPORT_SYMBOL(tegra_io_dpd_disable);