arm: tegra: API for cluster switching
[linux-3.10.git] / arch / arm / mach-tegra / pm-t3.c
1 /*
2  * arch/arm/mach-tegra/pm-t3.c
3  *
4  * Tegra3 SOC-specific power and cluster management
5  *
6  * Copyright (c) 2009-2012, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <linux/clk.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/device.h>
28 #include <linux/module.h>
29 #include <linux/clockchips.h>
30 #include <linux/cpu_pm.h>
31
32 #include <mach/gpio.h>
33 #include <mach/irqs.h>
34 #include <mach/io_dpd.h>
35
36 #include <asm/smp_plat.h>
37 #include <asm/cputype.h>
38 #include <asm/hardware/gic.h>
39
40 #include <trace/events/power.h>
41
42 #include "clock.h"
43 #include "cpuidle.h"
44 #include "iomap.h"
45 #include "pm.h"
46 #include "sleep.h"
47 #include "tegra3_emc.h"
48 #include "dvfs.h"
49
50 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
51 #define CAR_CCLK_BURST_POLICY \
52         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x20)
53
54 #define CAR_SUPER_CCLK_DIVIDER \
55         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x24)
56
57 #define CAR_CCLKG_BURST_POLICY \
58         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x368)
59
60 #define CAR_SUPER_CCLKG_DIVIDER \
61         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x36C)
62
63 #define CAR_CCLKLP_BURST_POLICY \
64         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x370)
65 #define PLLX_DIV2_BYPASS_LP     (1<<16)
66
67 #define CAR_SUPER_CCLKLP_DIVIDER \
68         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x374)
69
70 #define CAR_BOND_OUT_V \
71         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390)
72 #define CAR_BOND_OUT_V_CPU_G    (1<<0)
73 #define CAR_BOND_OUT_V_CPU_LP   (1<<1)
74
75 #define CAR_CLK_ENB_V_SET \
76         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x440)
77 #define CAR_CLK_ENB_V_CPU_G     (1<<0)
78 #define CAR_CLK_ENB_V_CPU_LP    (1<<1)
79
80 #define CAR_RST_CPUG_CMPLX_SET \
81         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x450)
82
83 #define CAR_RST_CPUG_CMPLX_CLR \
84         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x454)
85
86 #define CAR_RST_CPULP_CMPLX_SET \
87         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x458)
88
89 #define CAR_RST_CPULP_CMPLX_CLR \
90         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x45C)
91
92 #define CAR_CLK_CPUG_CMPLX_SET \
93         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x460)
94
95 #define CAR_CLK_CPUG_CMPLX_CLR \
96         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x464)
97
98 #define CAR_CLK_CPULP_CMPLX_SET \
99         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x468)
100
101 #define CAR_CLK_CPULP_CMPLX_CLR \
102         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x46C)
103
104 #define CPU_CLOCK(cpu)  (0x1<<(8+cpu))
105 #define CPU_RESET(cpu)  (0x1111ul<<(cpu))
106
107 #define PLLX_FO_G (1<<28)
108 #define PLLX_FO_LP (1<<29)
109
110 #define CLK_RST_CONTROLLER_PLLX_MISC_0 \
111         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0xE4)
112
113 static int cluster_switch_prolog_clock(unsigned int flags)
114 {
115         u32 reg;
116         u32 CclkBurstPolicy;
117         u32 SuperCclkDivier;
118
119         /* Read the bond out register containing the G and LP CPUs. */
120         reg = readl(CAR_BOND_OUT_V);
121
122         /* Sync G-PLLX divider bypass with LP (no effect on G, just to prevent
123            LP settings overwrite by save/restore code */
124         CclkBurstPolicy = ~PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKG_BURST_POLICY);
125         CclkBurstPolicy |= PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKLP_BURST_POLICY);
126         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
127
128         /* Switching to G? */
129         if (flags & TEGRA_POWER_CLUSTER_G) {
130                 /* Do the G CPUs exist? */
131                 if (reg & CAR_BOND_OUT_V_CPU_G)
132                         return -ENXIO;
133
134                 /* Keep G CPU clock policy set by upper laayer, with the
135                    exception of the transition via LP1 */
136                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
137                         /* In LP1 power mode come up on CLKM (oscillator) */
138                         CclkBurstPolicy = readl(CAR_CCLKG_BURST_POLICY);
139                         CclkBurstPolicy &= ~0xF;
140                         SuperCclkDivier = 0;
141
142                         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
143                         writel(SuperCclkDivier, CAR_SUPER_CCLKG_DIVIDER);
144                 }
145
146 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
147                 /* Hold G CPUs 1-3 in reset after the switch */
148                 reg = CPU_RESET(1) | CPU_RESET(2) | CPU_RESET(3);
149                 writel(reg, CAR_RST_CPUG_CMPLX_SET);
150
151                 /* Take G CPU 0 out of reset after the switch */
152                 reg = CPU_RESET(0);
153                 writel(reg, CAR_RST_CPUG_CMPLX_CLR);
154
155                 /* Disable the clocks on G CPUs 1-3 after the switch */
156                 reg = CPU_CLOCK(1) | CPU_CLOCK(2) | CPU_CLOCK(3);
157                 writel(reg, CAR_CLK_CPUG_CMPLX_SET);
158
159                 /* Enable the clock on G CPU 0 after the switch */
160                 reg = CPU_CLOCK(0);
161                 writel(reg, CAR_CLK_CPUG_CMPLX_CLR);
162
163                 /* Enable the G CPU complex clock after the switch */
164                 reg = CAR_CLK_ENB_V_CPU_G;
165                 writel(reg, CAR_CLK_ENB_V_SET);
166 #endif
167         }
168         /* Switching to LP? */
169         else if (flags & TEGRA_POWER_CLUSTER_LP) {
170                 /* Does the LP CPU exist? */
171                 if (reg & CAR_BOND_OUT_V_CPU_LP)
172                         return -ENXIO;
173
174                 /* Keep LP CPU clock policy set by upper layer, with the
175                    exception of the transition via LP1 */
176                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
177                         /* In LP1 power mode come up on CLKM (oscillator) */
178                         CclkBurstPolicy = readl(CAR_CCLKLP_BURST_POLICY);
179                         CclkBurstPolicy &= ~0xF;
180                         SuperCclkDivier = 0;
181
182                         writel(CclkBurstPolicy, CAR_CCLKLP_BURST_POLICY);
183                         writel(SuperCclkDivier, CAR_SUPER_CCLKLP_DIVIDER);
184                 }
185
186 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
187                 /* Take the LP CPU ut of reset after the switch */
188                 reg = CPU_RESET(0);
189                 writel(reg, CAR_RST_CPULP_CMPLX_CLR);
190
191                 /* Enable the clock on the LP CPU after the switch */
192                 reg = CPU_CLOCK(0);
193                 writel(reg, CAR_CLK_CPULP_CMPLX_CLR);
194
195                 /* Enable the LP CPU complex clock after the switch */
196                 reg = CAR_CLK_ENB_V_CPU_LP;
197                 writel(reg, CAR_CLK_ENB_V_SET);
198 #endif
199         }
200
201         return 0;
202 }
203
204 static inline void enable_pllx_cluster_port(void)
205 {
206         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
207         val &= (is_lp_cluster()?(~PLLX_FO_G):(~PLLX_FO_LP));
208         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
209 }
210
211 static inline void disable_pllx_cluster_port(void)
212 {
213         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
214         val |= (is_lp_cluster()?PLLX_FO_G:PLLX_FO_LP);
215         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
216 }
217
218 void tegra_cluster_switch_prolog(unsigned int flags)
219 {
220         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
221         unsigned int current_cluster = is_lp_cluster()
222                                         ? TEGRA_POWER_CLUSTER_LP
223                                         : TEGRA_POWER_CLUSTER_G;
224         u32 reg;
225         u32 cpu;
226
227         cpu = cpu_logical_map(smp_processor_id());
228
229         /* Read the flow controler CSR register and clear the CPU switch
230            and immediate flags. If an actual CPU switch is to be performed,
231            re-write the CSR register with the desired values. */
232         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
233         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
234                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
235
236         /* Program flow controller for immediate wake if requested */
237         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
238                 reg |= FLOW_CTRL_CSR_IMMEDIATE_WAKE;
239
240         /* Do nothing if no switch actions requested */
241         if (!target_cluster)
242                 goto done;
243
244 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
245         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
246         if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
247             ((flags & TEGRA_POWER_CLUSTER_PART_NONCPU) == 0) &&
248             (current_cluster == TEGRA_POWER_CLUSTER_LP))
249                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
250         else if (flags & TEGRA_POWER_CLUSTER_PART_CRAIL)
251                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
252
253         if (flags & TEGRA_POWER_CLUSTER_PART_NONCPU)
254                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
255 #endif
256
257         if ((current_cluster != target_cluster) ||
258                 (flags & TEGRA_POWER_CLUSTER_FORCE)) {
259                 if (current_cluster != target_cluster) {
260                         // Set up the clocks for the target CPU.
261                         if (cluster_switch_prolog_clock(flags)) {
262                                 /* The target CPU does not exist */
263                                 goto done;
264                         }
265
266                         /* Set up the flow controller to switch CPUs. */
267                         reg |= FLOW_CTRL_CSR_SWITCH_CLUSTER;
268
269                         /* Enable target port of PLL_X */
270                         enable_pllx_cluster_port();
271                 }
272         }
273
274 done:
275         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
276 }
277
278
279 static void cluster_switch_epilog_actlr(void)
280 {
281         u32 actlr;
282
283         /*
284          * This is only needed for Cortex-A9, for Cortex-A15, do nothing!
285          *
286          * TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads
287          * as zero, writes ignored). Hence, it is not preserved across G=>LP=>G
288          * switch by CPU save/restore code, but SMP bit is restored correctly.
289          * Synchronize these two bits here after LP=>G transition. Note that
290          * only CPU0 core is powered on before and after the switch. See also
291          * bug 807595.
292         */
293         if (((read_cpuid_id() >> 4) & 0xFFF) == 0xC0F)
294                 return;
295
296         __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
297
298         if (actlr & (0x1 << 6)) {
299                 actlr |= 0x1;
300                 __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
301         }
302 }
303
304 static void cluster_switch_epilog_gic(void)
305 {
306         unsigned int max_irq, i;
307         void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
308
309         /* Reprogram the interrupt affinity because the on the LP CPU,
310            the interrupt distributor affinity regsiters are stubbed out
311            by ARM (reads as zero, writes ignored). So when the LP CPU
312            context save code runs, the affinity registers will read
313            as all zero. This causes all interrupts to be effectively
314            disabled when back on the G CPU because they aren't routable
315            to any CPU. See bug 667720 for details. */
316
317         max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f;
318         max_irq = (max_irq + 1) * 32;
319
320         for (i = 32; i < max_irq; i += 4) {
321                 u32 val = 0x01010101;
322 #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
323                 unsigned int irq;
324                 for (irq = i; irq < (i + 4); irq++) {
325                         struct cpumask mask;
326                         struct irq_desc *desc = irq_to_desc(irq);
327
328                         if (desc && desc->affinity_hint) {
329                                 if (cpumask_and(&mask, desc->affinity_hint,
330                                                 desc->irq_data.affinity))
331                                         val |= (*cpumask_bits(&mask) & 0xff) <<
332                                                 ((irq & 3) * 8);
333                         }
334                 }
335 #endif
336                 writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4);
337         }
338 }
339
340 void tegra_cluster_switch_epilog(unsigned int flags)
341 {
342         u32 reg;
343         u32 cpu;
344
345         cpu = cpu_logical_map(smp_processor_id());
346
347         /* Make sure the switch and immediate flags are cleared in
348            the flow controller to prevent undesirable side-effects
349            for future users of the flow controller. */
350         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
351         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
352                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
353 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
354         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
355 #endif
356         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
357
358         /* Perform post-switch LP=>G clean-up */
359         if (!is_lp_cluster()) {
360                 cluster_switch_epilog_actlr();
361                 cluster_switch_epilog_gic();
362         }
363
364         /* Disable unused port of PLL_X */
365         disable_pllx_cluster_port();
366
367         #if DEBUG_CLUSTER_SWITCH
368         {
369                 /* FIXME: clock functions below are taking mutex */
370                 struct clk *c = tegra_get_clock_by_name(
371                         is_lp_cluster() ? "cpu_lp" : "cpu_g");
372                 DEBUG_CLUSTER(("%s: %s freq %lu\r\n", __func__,
373                         is_lp_cluster() ? "LP" : "G", clk_get_rate(c)));
374         }
375         #endif
376 }
377
378 int tegra_cluster_control(unsigned int us, unsigned int flags)
379 {
380         static ktime_t last_g2lp;
381
382         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
383         unsigned int current_cluster = is_lp_cluster()
384                                         ? TEGRA_POWER_CLUSTER_LP
385                                         : TEGRA_POWER_CLUSTER_G;
386         unsigned long irq_flags;
387
388         if ((target_cluster == TEGRA_POWER_CLUSTER_MASK) || !target_cluster)
389                 return -EINVAL;
390
391         if (num_online_cpus() > 1)
392                 return -EBUSY;
393
394         if ((current_cluster == target_cluster)
395         && !(flags & TEGRA_POWER_CLUSTER_FORCE))
396                 return -EEXIST;
397
398         if (target_cluster == TEGRA_POWER_CLUSTER_G)
399                 if (!is_g_cluster_present())
400                         return -EPERM;
401
402         trace_power_start(POWER_PSTATE, target_cluster, 0);
403
404         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
405                 us = 0;
406
407         DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__,
408                 (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2,
409                 is_lp_cluster() ? "LP" : "G",
410                 (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP",
411                 (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "",
412                 (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "",
413                 us));
414
415         local_irq_save(irq_flags);
416
417         if (current_cluster != target_cluster && !timekeeping_suspended) {
418                 ktime_t now = ktime_get();
419                 if (target_cluster == TEGRA_POWER_CLUSTER_G) {
420                         s64 t = ktime_to_us(ktime_sub(now, last_g2lp));
421                         s64 t_off = tegra_cpu_power_off_time();
422 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
423                         /* u32 reg; */
424 #endif
425                         if (t_off > t)
426                                 udelay((unsigned int)(t_off - t));
427
428                         tegra_dvfs_rail_on(tegra_cpu_rail, now);
429 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
430                         /*
431                          * comment out RAM repair as this seems impacting
432                          * cluster switch
433                          */
434                         /* enable RAM repair by flow controller */
435                         /*
436                         reg = readl(FLOW_CTRL_RAM_REPAIR);
437                         reg &= ~FLOW_CTRL_RAM_REPAIR_BYPASS_EN;
438                         writel(reg, FLOW_CTRL_RAM_REPAIR);
439                         */
440 #endif
441
442                 } else {
443 #ifdef CONFIG_TEGRA_VIRTUAL_CPUID
444                         u32 cpu;
445
446                         cpu = cpu_logical_map(smp_processor_id());
447                         writel(cpu, FLOW_CTRL_MPID);
448 #endif
449                         last_g2lp = now;
450                         tegra_dvfs_rail_off(tegra_cpu_rail, now);
451                 }
452         }
453
454         if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
455                 if (us)
456                         tegra_pd_set_trigger(us);
457
458                 tegra_cluster_switch_prolog(flags);
459                 tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags);
460                 tegra_cluster_switch_epilog(flags);
461
462                 if (us)
463                         tegra_pd_set_trigger(0);
464         } else {
465                 int cpu;
466
467                 cpu = cpu_logical_map(smp_processor_id());
468
469                 tegra_set_cpu_in_pd(cpu);
470                 cpu_pm_enter();
471                 if (!timekeeping_suspended)
472                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
473                                            &cpu);
474                 tegra_idle_power_down_last(0, flags);
475                 if (!timekeeping_suspended)
476                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
477                                            &cpu);
478                 cpu_pm_exit();
479                 tegra_clear_cpu_in_pd(cpu);
480         }
481         local_irq_restore(irq_flags);
482
483         DEBUG_CLUSTER(("%s: %s\r\n", __func__, is_lp_cluster() ? "LP" : "G"));
484
485         return 0;
486 }
487
488 int tegra_switch_to_lp_cluster()
489 {
490         struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
491         struct clk *cpu_lp_clk = tegra_get_clock_by_name("cpu_lp");
492         int rate = clk_get_rate(cpu_clk);
493         int e;
494
495         if (is_lp_cluster())
496                 return 0;
497
498         /* Change the Clock Rate to desired LP CPU's clock rate */
499
500         if (rate > cpu_lp_clk->max_rate) {
501                 e = clk_set_rate(cpu_clk, cpu_lp_clk->max_rate);
502                 if (e) {
503                         pr_err("cluster_swtich: Failed to set clock %d", e);
504                         return e;
505                 }
506         }
507
508         e = clk_set_parent(cpu_clk, cpu_lp_clk);
509         if (e) {
510                 pr_err("cluster switching request failed (%d)\n", e);
511                 return e;
512         }
513         return e;
514 }
515
516 int tegra_switch_to_g_cluster()
517 {
518         struct clk *cpu_clk = tegra_get_clock_by_name("cpu");
519         struct clk *cpu_g_clk = tegra_get_clock_by_name("cpu_g");
520         int e;
521
522         if (!is_lp_cluster())
523                 return 0;
524
525         e = clk_set_parent(cpu_clk, cpu_g_clk);
526         if (e) {
527                 pr_err("cluster switching request failed (%d)\n", e);
528                 return e;
529         }
530
531         /* Switch back to G Cluster Cpu Max Clock rate */
532
533         e = clk_set_rate(cpu_clk, cpu_g_clk->max_rate);
534         if (e) {
535                 pr_err("cluster_swtich: Failed to increase the clock %d\n", e);
536                 return e;
537         }
538         return e;
539 }
540
541 #endif
542
543 #ifdef CONFIG_PM_SLEEP
544
545 void tegra_lp0_suspend_mc(void)
546 {
547         /* Since memory frequency after LP0 is restored to boot rate
548            mc timing is saved during init, not on entry to LP0. Keep
549            this hook just in case, anyway */
550 }
551
552 void tegra_lp0_resume_mc(void)
553 {
554         tegra_mc_timing_restore();
555 }
556
557 void tegra_lp0_cpu_mode(bool enter)
558 {
559         static struct clk *cclk_lp;
560         static bool entered_on_g = false;
561         unsigned int flags;
562
563         if (!cclk_lp)
564                 cclk_lp = tegra_get_clock_by_name("cclk_lp");
565
566         if (enter)
567                 entered_on_g = !is_lp_cluster();
568
569         if (entered_on_g) {
570                 if (enter)
571                         tegra_clk_prepare_enable(cclk_lp);
572
573                 flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G;
574                 flags |= TEGRA_POWER_CLUSTER_IMMEDIATE;
575 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
576                 flags |= TEGRA_POWER_CLUSTER_PART_DEFAULT;
577 #endif
578                 if (!tegra_cluster_control(0, flags)) {
579                         if (!enter)
580                                 tegra_clk_disable_unprepare(cclk_lp);
581                         pr_info("Tegra: switched to %s cluster\n",
582                                 enter ? "LP" : "G");
583                 }
584         }
585 }
586
587 #define IO_DPD_INFO(_name, _index, _bit) \
588         { \
589                 .name = _name, \
590                 .io_dpd_reg_index = _index, \
591                 .io_dpd_bit = _bit, \
592         }
593
594 /* PMC IO DPD register offsets */
595 #define APBDEV_PMC_IO_DPD_REQ_0         0x1b8
596 #define APBDEV_PMC_IO_DPD_STATUS_0      0x1bc
597 #define APBDEV_PMC_SEL_DPD_TIM_0        0x1c8
598 #define APBDEV_DPD_ENABLE_LSB           30
599 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
600 #define APBDEV_DPD2_ENABLE_LSB          5
601 #else
602 #define APBDEV_DPD2_ENABLE_LSB          30
603 #endif
604 #define PMC_DPD_SAMPLE                  0x20
605
606 static struct tegra_io_dpd tegra_list_io_dpd[] = {
607 #if defined(CONFIG_ARCH_TEGRA_3x_SOC) && defined(CONFIG_TEGRA_IO_DPD)
608         /* sd dpd bits in dpd2 register */
609         IO_DPD_INFO("sdhci-tegra.0",    1,      1), /* SDMMC1 */
610 #endif
611         IO_DPD_INFO("sdhci-tegra.2",    1,      2), /* SDMMC3 */
612         IO_DPD_INFO("sdhci-tegra.3",    1,      3), /* SDMMC4 */
613 };
614 #endif
615
616 /* we want to cleanup bootloader io dpd setting in kernel */
617 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
618
619 #ifdef CONFIG_PM_SLEEP
620 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
621 {
622 #ifdef CONFIG_TEGRA_IO_DPD
623         int i;
624         const char *name = dev ? dev_name(dev) : NULL;
625         if (name) {
626                 for (i = 0; i < ARRAY_SIZE(tegra_list_io_dpd); i++) {
627                         if (!(strncmp(tegra_list_io_dpd[i].name, name,
628                                 strlen(name)))) {
629                                 return &tegra_list_io_dpd[i];
630                         }
631                 }
632         }
633         dev_info(dev, "Error: tegra3 io dpd not supported for %s\n",
634                 ((name) ? name : "NULL"));
635 #endif
636         return NULL;
637 }
638
639 static DEFINE_SPINLOCK(tegra_io_dpd_lock);
640
641 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
642 {
643         unsigned int enable_mask;
644         unsigned int dpd_status;
645         unsigned int dpd_enable_lsb;
646
647         if ((!hnd)) {
648                 pr_warn("SD IO DPD handle NULL in %s\n", __func__);
649                 return;
650         }
651         spin_lock(&tegra_io_dpd_lock);
652         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
653                                                 APBDEV_DPD_ENABLE_LSB;
654         writel(0x1, pmc + PMC_DPD_SAMPLE);
655         writel(0x10, pmc + APBDEV_PMC_SEL_DPD_TIM_0);
656         enable_mask = ((1 << hnd->io_dpd_bit) | (2 << dpd_enable_lsb));
657         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
658                                         hnd->io_dpd_reg_index * 8));
659         udelay(1);
660         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
661                                         hnd->io_dpd_reg_index * 8));
662         if (!(dpd_status & (1 << hnd->io_dpd_bit))) {
663 #if !defined(CONFIG_TEGRA_FPGA_PLATFORM)
664                 pr_info("Error: dpd%d enable failed, status=%#x\n",
665                 (hnd->io_dpd_reg_index + 1), dpd_status);
666 #endif
667         }
668         /* Sample register must be reset before next sample operation */
669         writel(0x0, pmc + PMC_DPD_SAMPLE);
670         spin_unlock(&tegra_io_dpd_lock);
671         return;
672 }
673
674 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
675 {
676         unsigned int enable_mask;
677         unsigned int dpd_status;
678         unsigned int dpd_enable_lsb;
679
680         if ((!hnd)) {
681                 pr_warn("SD IO DPD handle NULL in %s\n", __func__);
682                 return;
683         }
684         spin_lock(&tegra_io_dpd_lock);
685         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
686                                                 APBDEV_DPD_ENABLE_LSB;
687         enable_mask = ((1 << hnd->io_dpd_bit) | (1 << dpd_enable_lsb));
688         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
689                                         hnd->io_dpd_reg_index * 8));
690         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
691                                         hnd->io_dpd_reg_index * 8));
692         if (dpd_status & (1 << hnd->io_dpd_bit)) {
693 #if !defined(CONFIG_TEGRA_FPGA_PLATFORM)
694                 pr_info("Error: dpd%d disable failed, status=%#x\n",
695                 (hnd->io_dpd_reg_index + 1), dpd_status);
696 #endif
697         }
698         spin_unlock(&tegra_io_dpd_lock);
699         return;
700 }
701
702 static void tegra_io_dpd_delayed_disable(struct work_struct *work)
703 {
704         struct tegra_io_dpd *hnd = container_of(
705                 to_delayed_work(work), struct tegra_io_dpd, delay_dpd);
706         tegra_io_dpd_disable(hnd);
707         hnd->need_delay_dpd = 0;
708 }
709
710 int tegra_io_dpd_init(void)
711 {
712         int i;
713         for (i = 0;
714                 i < (sizeof(tegra_list_io_dpd) / sizeof(struct tegra_io_dpd));
715                 i++) {
716                         INIT_DELAYED_WORK(&(tegra_list_io_dpd[i].delay_dpd),
717                                 tegra_io_dpd_delayed_disable);
718                         mutex_init(&(tegra_list_io_dpd[i].delay_lock));
719                         tegra_list_io_dpd[i].need_delay_dpd = 0;
720         }
721         return 0;
722 }
723
724 #else
725
726 int tegra_io_dpd_init(void)
727 {
728         return 0;
729 }
730
731 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
732 {
733 }
734
735 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
736 {
737 }
738
739 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
740 {
741         return NULL;
742 }
743
744 #endif
745
746 EXPORT_SYMBOL(tegra_io_dpd_get);
747 EXPORT_SYMBOL(tegra_io_dpd_enable);
748 EXPORT_SYMBOL(tegra_io_dpd_disable);
749 EXPORT_SYMBOL(tegra_io_dpd_init);
750
751 struct io_dpd_reg_info {
752         u32 req_reg_off;
753         u8 dpd_code_lsb;
754 };
755
756 static struct io_dpd_reg_info t3_io_dpd_req_regs[] = {
757         {0x1b8, 30},
758         {0x1c0, 5},
759 };
760
761 /* io dpd off request code */
762 #define IO_DPD_CODE_OFF         1
763
764 /* cleans io dpd settings from bootloader during kernel init */
765 void tegra_bl_io_dpd_cleanup()
766 {
767         int i;
768         unsigned int dpd_mask;
769         unsigned int dpd_status;
770
771         pr_info("Clear bootloader IO dpd settings\n");
772         /* clear all dpd requests from bootloader */
773         for (i = 0; i < ARRAY_SIZE(t3_io_dpd_req_regs); i++) {
774                 dpd_mask = ((1 << t3_io_dpd_req_regs[i].dpd_code_lsb) - 1);
775                 dpd_mask |= (IO_DPD_CODE_OFF <<
776                         t3_io_dpd_req_regs[i].dpd_code_lsb);
777                 writel(dpd_mask, pmc + t3_io_dpd_req_regs[i].req_reg_off);
778                 /* dpd status register is next to req reg in tegra3 */
779                 dpd_status = readl(pmc +
780                         (t3_io_dpd_req_regs[i].req_reg_off + 4));
781         }
782         return;
783 }
784 EXPORT_SYMBOL(tegra_bl_io_dpd_cleanup);
785