ARM: tegra11x: CPUID virtualization support
[linux-3.10.git] / arch / arm / mach-tegra / pm-t3.c
1 /*
2  * arch/arm/mach-tegra/pm-t3.c
3  *
4  * Tegra3 SOC-specific power and cluster management
5  *
6  * Copyright (c) 2009-2012, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <linux/clk.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/device.h>
28 #include <linux/module.h>
29 #include <linux/clockchips.h>
30 #include <linux/cpu_pm.h>
31
32 #include <mach/gpio.h>
33 #include <mach/irqs.h>
34 #include <mach/io_dpd.h>
35
36 #include <asm/smp_plat.h>
37 #include <asm/cputype.h>
38 #include <asm/hardware/gic.h>
39
40 #include <trace/events/power.h>
41
42 #include "clock.h"
43 #include "cpuidle.h"
44 #include "iomap.h"
45 #include "pm.h"
46 #include "sleep.h"
47 #include "tegra3_emc.h"
48 #include "dvfs.h"
49
50 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
51 #define CAR_CCLK_BURST_POLICY \
52         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x20)
53
54 #define CAR_SUPER_CCLK_DIVIDER \
55         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x24)
56
57 #define CAR_CCLKG_BURST_POLICY \
58         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x368)
59
60 #define CAR_SUPER_CCLKG_DIVIDER \
61         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x36C)
62
63 #define CAR_CCLKLP_BURST_POLICY \
64         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x370)
65 #define PLLX_DIV2_BYPASS_LP     (1<<16)
66
67 #define CAR_SUPER_CCLKLP_DIVIDER \
68         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x374)
69
70 #define CAR_BOND_OUT_V \
71         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390)
72 #define CAR_BOND_OUT_V_CPU_G    (1<<0)
73 #define CAR_BOND_OUT_V_CPU_LP   (1<<1)
74
75 #define CAR_CLK_ENB_V_SET \
76         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x440)
77 #define CAR_CLK_ENB_V_CPU_G     (1<<0)
78 #define CAR_CLK_ENB_V_CPU_LP    (1<<1)
79
80 #define CAR_RST_CPUG_CMPLX_SET \
81         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x450)
82
83 #define CAR_RST_CPUG_CMPLX_CLR \
84         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x454)
85
86 #define CAR_RST_CPULP_CMPLX_SET \
87         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x458)
88
89 #define CAR_RST_CPULP_CMPLX_CLR \
90         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x45C)
91
92 #define CAR_CLK_CPUG_CMPLX_SET \
93         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x460)
94
95 #define CAR_CLK_CPUG_CMPLX_CLR \
96         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x464)
97
98 #define CAR_CLK_CPULP_CMPLX_SET \
99         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x468)
100
101 #define CAR_CLK_CPULP_CMPLX_CLR \
102         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x46C)
103
104 #define CPU_CLOCK(cpu)  (0x1<<(8+cpu))
105 #define CPU_RESET(cpu)  (0x1111ul<<(cpu))
106
107 #define PLLX_FO_G (1<<28)
108 #define PLLX_FO_LP (1<<29)
109
110 #define CLK_RST_CONTROLLER_PLLX_MISC_0 \
111         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0xE4)
112
113 static int cluster_switch_prolog_clock(unsigned int flags)
114 {
115         u32 reg;
116         u32 CclkBurstPolicy;
117         u32 SuperCclkDivier;
118
119         /* Read the bond out register containing the G and LP CPUs. */
120         reg = readl(CAR_BOND_OUT_V);
121
122         /* Sync G-PLLX divider bypass with LP (no effect on G, just to prevent
123            LP settings overwrite by save/restore code */
124         CclkBurstPolicy = ~PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKG_BURST_POLICY);
125         CclkBurstPolicy |= PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKLP_BURST_POLICY);
126         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
127
128         /* Switching to G? */
129         if (flags & TEGRA_POWER_CLUSTER_G) {
130                 /* Do the G CPUs exist? */
131                 if (reg & CAR_BOND_OUT_V_CPU_G)
132                         return -ENXIO;
133
134                 /* Keep G CPU clock policy set by upper laayer, with the
135                    exception of the transition via LP1 */
136                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
137                         /* In LP1 power mode come up on CLKM (oscillator) */
138                         CclkBurstPolicy = readl(CAR_CCLKG_BURST_POLICY);
139                         CclkBurstPolicy &= ~0xF;
140                         SuperCclkDivier = 0;
141
142                         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
143                         writel(SuperCclkDivier, CAR_SUPER_CCLKG_DIVIDER);
144                 }
145
146 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
147                 /* Hold G CPUs 1-3 in reset after the switch */
148                 reg = CPU_RESET(1) | CPU_RESET(2) | CPU_RESET(3);
149                 writel(reg, CAR_RST_CPUG_CMPLX_SET);
150
151                 /* Take G CPU 0 out of reset after the switch */
152                 reg = CPU_RESET(0);
153                 writel(reg, CAR_RST_CPUG_CMPLX_CLR);
154
155                 /* Disable the clocks on G CPUs 1-3 after the switch */
156                 reg = CPU_CLOCK(1) | CPU_CLOCK(2) | CPU_CLOCK(3);
157                 writel(reg, CAR_CLK_CPUG_CMPLX_SET);
158
159                 /* Enable the clock on G CPU 0 after the switch */
160                 reg = CPU_CLOCK(0);
161                 writel(reg, CAR_CLK_CPUG_CMPLX_CLR);
162
163                 /* Enable the G CPU complex clock after the switch */
164                 reg = CAR_CLK_ENB_V_CPU_G;
165                 writel(reg, CAR_CLK_ENB_V_SET);
166 #endif
167         }
168         /* Switching to LP? */
169         else if (flags & TEGRA_POWER_CLUSTER_LP) {
170                 /* Does the LP CPU exist? */
171                 if (reg & CAR_BOND_OUT_V_CPU_LP)
172                         return -ENXIO;
173
174                 /* Keep LP CPU clock policy set by upper layer, with the
175                    exception of the transition via LP1 */
176                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
177                         /* In LP1 power mode come up on CLKM (oscillator) */
178                         CclkBurstPolicy = readl(CAR_CCLKLP_BURST_POLICY);
179                         CclkBurstPolicy &= ~0xF;
180                         SuperCclkDivier = 0;
181
182                         writel(CclkBurstPolicy, CAR_CCLKLP_BURST_POLICY);
183                         writel(SuperCclkDivier, CAR_SUPER_CCLKLP_DIVIDER);
184                 }
185
186 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
187                 /* Take the LP CPU ut of reset after the switch */
188                 reg = CPU_RESET(0);
189                 writel(reg, CAR_RST_CPULP_CMPLX_CLR);
190
191                 /* Enable the clock on the LP CPU after the switch */
192                 reg = CPU_CLOCK(0);
193                 writel(reg, CAR_CLK_CPULP_CMPLX_CLR);
194
195                 /* Enable the LP CPU complex clock after the switch */
196                 reg = CAR_CLK_ENB_V_CPU_LP;
197                 writel(reg, CAR_CLK_ENB_V_SET);
198 #endif
199         }
200
201         return 0;
202 }
203
204 static inline void enable_pllx_cluster_port(void)
205 {
206         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
207         val &= (is_lp_cluster()?(~PLLX_FO_G):(~PLLX_FO_LP));
208         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
209 }
210
211 static inline void disable_pllx_cluster_port(void)
212 {
213         u32 val = readl(CLK_RST_CONTROLLER_PLLX_MISC_0);
214         val |= (is_lp_cluster()?PLLX_FO_G:PLLX_FO_LP);
215         writel(val, CLK_RST_CONTROLLER_PLLX_MISC_0);
216 }
217
218 void tegra_cluster_switch_prolog(unsigned int flags)
219 {
220         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
221         unsigned int current_cluster = is_lp_cluster()
222                                         ? TEGRA_POWER_CLUSTER_LP
223                                         : TEGRA_POWER_CLUSTER_G;
224         u32 reg;
225         u32 cpu;
226
227         cpu = cpu_logical_map(smp_processor_id());
228
229         /* Read the flow controler CSR register and clear the CPU switch
230            and immediate flags. If an actual CPU switch is to be performed,
231            re-write the CSR register with the desired values. */
232         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
233         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
234                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
235
236         /* Program flow controller for immediate wake if requested */
237         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
238                 reg |= FLOW_CTRL_CSR_IMMEDIATE_WAKE;
239
240         /* Do nothing if no switch actions requested */
241         if (!target_cluster)
242                 goto done;
243
244 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
245         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
246         if ((flags & TEGRA_POWER_CLUSTER_PART_CRAIL) &&
247             ((flags & TEGRA_POWER_CLUSTER_PART_NONCPU) == 0) &&
248             (current_cluster == TEGRA_POWER_CLUSTER_LP))
249                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
250         else if (flags & TEGRA_POWER_CLUSTER_PART_CRAIL)
251                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
252
253         if (flags & TEGRA_POWER_CLUSTER_PART_NONCPU)
254                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
255 #endif
256
257         if ((current_cluster != target_cluster) ||
258                 (flags & TEGRA_POWER_CLUSTER_FORCE)) {
259                 if (current_cluster != target_cluster) {
260                         // Set up the clocks for the target CPU.
261                         if (cluster_switch_prolog_clock(flags)) {
262                                 /* The target CPU does not exist */
263                                 goto done;
264                         }
265
266                         /* Set up the flow controller to switch CPUs. */
267                         reg |= FLOW_CTRL_CSR_SWITCH_CLUSTER;
268
269                         /* Enable target port of PLL_X */
270                         enable_pllx_cluster_port();
271                 }
272         }
273
274 done:
275         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
276 }
277
278
279 static void cluster_switch_epilog_actlr(void)
280 {
281         u32 actlr;
282
283         /*
284          * This is only needed for Cortex-A9, for Cortex-A15, do nothing!
285          *
286          * TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads
287          * as zero, writes ignored). Hence, it is not preserved across G=>LP=>G
288          * switch by CPU save/restore code, but SMP bit is restored correctly.
289          * Synchronize these two bits here after LP=>G transition. Note that
290          * only CPU0 core is powered on before and after the switch. See also
291          * bug 807595.
292         */
293         if (((read_cpuid_id() >> 4) & 0xFFF) == 0xC0F)
294                 return;
295
296         __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
297
298         if (actlr & (0x1 << 6)) {
299                 actlr |= 0x1;
300                 __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
301         }
302 }
303
304 static void cluster_switch_epilog_gic(void)
305 {
306         unsigned int max_irq, i;
307         void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
308
309         /* Reprogram the interrupt affinity because the on the LP CPU,
310            the interrupt distributor affinity regsiters are stubbed out
311            by ARM (reads as zero, writes ignored). So when the LP CPU
312            context save code runs, the affinity registers will read
313            as all zero. This causes all interrupts to be effectively
314            disabled when back on the G CPU because they aren't routable
315            to any CPU. See bug 667720 for details. */
316
317         max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f;
318         max_irq = (max_irq + 1) * 32;
319
320         for (i = 32; i < max_irq; i += 4) {
321                 u32 val = 0x01010101;
322 #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
323                 unsigned int irq;
324                 for (irq = i; irq < (i + 4); irq++) {
325                         struct cpumask mask;
326                         struct irq_desc *desc = irq_to_desc(irq);
327
328                         if (desc && desc->affinity_hint) {
329                                 if (cpumask_and(&mask, desc->affinity_hint,
330                                                 desc->irq_data.affinity))
331                                         val |= (*cpumask_bits(&mask) & 0xff) <<
332                                                 ((irq & 3) * 8);
333                         }
334                 }
335 #endif
336                 writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4);
337         }
338 }
339
340 void tegra_cluster_switch_epilog(unsigned int flags)
341 {
342         u32 reg;
343         u32 cpu;
344
345         cpu = cpu_logical_map(smp_processor_id());
346
347         /* Make sure the switch and immediate flags are cleared in
348            the flow controller to prevent undesirable side-effects
349            for future users of the flow controller. */
350         reg = readl(FLOW_CTRL_CPU_CSR(cpu));
351         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
352                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
353 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
354         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
355 #endif
356         writel(reg, FLOW_CTRL_CPU_CSR(cpu));
357
358         /* Perform post-switch LP=>G clean-up */
359         if (!is_lp_cluster()) {
360                 cluster_switch_epilog_actlr();
361                 cluster_switch_epilog_gic();
362         }
363
364         /* Disable unused port of PLL_X */
365         disable_pllx_cluster_port();
366
367         #if DEBUG_CLUSTER_SWITCH
368         {
369                 /* FIXME: clock functions below are taking mutex */
370                 struct clk *c = tegra_get_clock_by_name(
371                         is_lp_cluster() ? "cpu_lp" : "cpu_g");
372                 DEBUG_CLUSTER(("%s: %s freq %lu\r\n", __func__,
373                         is_lp_cluster() ? "LP" : "G", clk_get_rate(c)));
374         }
375         #endif
376 }
377
378 int tegra_cluster_control(unsigned int us, unsigned int flags)
379 {
380         static ktime_t last_g2lp;
381
382         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
383         unsigned int current_cluster = is_lp_cluster()
384                                         ? TEGRA_POWER_CLUSTER_LP
385                                         : TEGRA_POWER_CLUSTER_G;
386         unsigned long irq_flags;
387
388         if ((target_cluster == TEGRA_POWER_CLUSTER_MASK) || !target_cluster)
389                 return -EINVAL;
390
391         if (num_online_cpus() > 1)
392                 return -EBUSY;
393
394         if ((current_cluster == target_cluster)
395         && !(flags & TEGRA_POWER_CLUSTER_FORCE))
396                 return -EEXIST;
397
398         if (target_cluster == TEGRA_POWER_CLUSTER_G)
399                 if (!is_g_cluster_present())
400                         return -EPERM;
401
402         trace_power_start(POWER_PSTATE, target_cluster, 0);
403
404         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
405                 us = 0;
406
407         DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__,
408                 (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2,
409                 is_lp_cluster() ? "LP" : "G",
410                 (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP",
411                 (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "",
412                 (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "",
413                 us));
414
415         local_irq_save(irq_flags);
416
417         if (current_cluster != target_cluster && !timekeeping_suspended) {
418                 ktime_t now = ktime_get();
419                 if (target_cluster == TEGRA_POWER_CLUSTER_G) {
420                         s64 t = ktime_to_us(ktime_sub(now, last_g2lp));
421                         s64 t_off = tegra_cpu_power_off_time();
422 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
423                         /* u32 reg; */
424 #endif
425                         if (t_off > t)
426                                 udelay((unsigned int)(t_off - t));
427
428                         tegra_dvfs_rail_on(tegra_cpu_rail, now);
429 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
430                         /*
431                          * comment out RAM repair as this seems impacting
432                          * cluster switch
433                          */
434                         /* enable RAM repair by flow controller */
435                         /*
436                         reg = readl(FLOW_CTRL_RAM_REPAIR);
437                         reg &= ~FLOW_CTRL_RAM_REPAIR_BYPASS_EN;
438                         writel(reg, FLOW_CTRL_RAM_REPAIR);
439                         */
440 #endif
441
442                 } else {
443 #ifdef CONFIG_TEGRA_VIRTUAL_CPUID
444                         u32 cpu;
445
446                         cpu = cpu_logical_map(smp_processor_id());
447                         writel(cpu, FLOW_CTRL_MPID);
448 #endif
449                         last_g2lp = now;
450                         tegra_dvfs_rail_off(tegra_cpu_rail, now);
451                 }
452         }
453
454         if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
455                 if (us)
456                         tegra_lp2_set_trigger(us);
457
458                 tegra_cluster_switch_prolog(flags);
459                 tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags);
460                 tegra_cluster_switch_epilog(flags);
461
462                 if (us)
463                         tegra_lp2_set_trigger(0);
464         } else {
465                 int cpu;
466
467                 cpu = cpu_logical_map(smp_processor_id());
468
469                 tegra_set_cpu_in_lp2(cpu);
470                 cpu_pm_enter();
471                 if (!timekeeping_suspended)
472                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
473                                            &cpu);
474                 tegra_idle_lp2_last(0, flags);
475                 if (!timekeeping_suspended)
476                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
477                                            &cpu);
478                 cpu_pm_exit();
479                 tegra_clear_cpu_in_lp2(cpu);
480         }
481         local_irq_restore(irq_flags);
482
483         DEBUG_CLUSTER(("%s: %s\r\n", __func__, is_lp_cluster() ? "LP" : "G"));
484
485         return 0;
486 }
487 #endif
488
489 #ifdef CONFIG_PM_SLEEP
490
491 void tegra_lp0_suspend_mc(void)
492 {
493         /* Since memory frequency after LP0 is restored to boot rate
494            mc timing is saved during init, not on entry to LP0. Keep
495            this hook just in case, anyway */
496 }
497
498 void tegra_lp0_resume_mc(void)
499 {
500         tegra_mc_timing_restore();
501 }
502
503 void tegra_lp0_cpu_mode(bool enter)
504 {
505         static struct clk *cclk_lp;
506         static bool entered_on_g = false;
507         unsigned int flags;
508
509         if (!cclk_lp)
510                 cclk_lp = tegra_get_clock_by_name("cclk_lp");
511
512         if (enter)
513                 entered_on_g = !is_lp_cluster();
514
515         if (entered_on_g) {
516                 if (enter)
517                         clk_enable(cclk_lp);
518
519                 flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G;
520                 flags |= TEGRA_POWER_CLUSTER_IMMEDIATE;
521 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
522                 flags |= TEGRA_POWER_CLUSTER_PART_DEFAULT;
523 #endif
524                 if (!tegra_cluster_control(0, flags)) {
525                         if (!enter)
526                                 clk_disable(cclk_lp);
527                         pr_info("Tegra: switched to %s cluster\n",
528                                 enter ? "LP" : "G");
529                 }
530         }
531 }
532
533 #define IO_DPD_INFO(_name, _index, _bit) \
534         { \
535                 .name = _name, \
536                 .io_dpd_reg_index = _index, \
537                 .io_dpd_bit = _bit, \
538         }
539
540 /* PMC IO DPD register offsets */
541 #define APBDEV_PMC_IO_DPD_REQ_0         0x1b8
542 #define APBDEV_PMC_IO_DPD_STATUS_0      0x1bc
543 #define APBDEV_PMC_SEL_DPD_TIM_0        0x1c8
544 #define APBDEV_DPD_ENABLE_LSB           30
545 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
546 #define APBDEV_DPD2_ENABLE_LSB          5
547 #else
548 #define APBDEV_DPD2_ENABLE_LSB          30
549 #endif
550 #define PMC_DPD_SAMPLE                  0x20
551
552 static struct tegra_io_dpd tegra_list_io_dpd[] = {
553 #if defined(CONFIG_ARCH_TEGRA_3x_SOC) && defined(CONFIG_TEGRA_IO_DPD)
554         /* sd dpd bits in dpd2 register */
555         IO_DPD_INFO("sdhci-tegra.0",    1,      1), /* SDMMC1 */
556 #endif
557         IO_DPD_INFO("sdhci-tegra.2",    1,      2), /* SDMMC3 */
558         IO_DPD_INFO("sdhci-tegra.3",    1,      3), /* SDMMC4 */
559 };
560 #endif
561
562 /* we want to cleanup bootloader io dpd setting in kernel */
563 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
564
565 #ifdef CONFIG_PM_SLEEP
566 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
567 {
568 #ifdef CONFIG_TEGRA_IO_DPD
569         int i;
570         const char *name = dev ? dev_name(dev) : NULL;
571         if (name) {
572                 for (i = 0; i < ARRAY_SIZE(tegra_list_io_dpd); i++) {
573                         if (!(strncmp(tegra_list_io_dpd[i].name, name,
574                                 strlen(name)))) {
575                                 return &tegra_list_io_dpd[i];
576                         }
577                 }
578         }
579         dev_info(dev, "Error: tegra3 io dpd not supported for %s\n",
580                 ((name) ? name : "NULL"));
581 #endif
582         return NULL;
583 }
584
585 static DEFINE_SPINLOCK(tegra_io_dpd_lock);
586
587 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
588 {
589         unsigned int enable_mask;
590         unsigned int dpd_status;
591         unsigned int dpd_enable_lsb;
592
593         if ((!hnd)) {
594                 pr_warn("SD IO DPD handle NULL in %s\n", __func__);
595                 return;
596         }
597         spin_lock(&tegra_io_dpd_lock);
598         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
599                                                 APBDEV_DPD_ENABLE_LSB;
600         writel(0x1, pmc + PMC_DPD_SAMPLE);
601         writel(0x10, pmc + APBDEV_PMC_SEL_DPD_TIM_0);
602         enable_mask = ((1 << hnd->io_dpd_bit) | (2 << dpd_enable_lsb));
603         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
604                                         hnd->io_dpd_reg_index * 8));
605         udelay(1);
606         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
607                                         hnd->io_dpd_reg_index * 8));
608         if (!(dpd_status & (1 << hnd->io_dpd_bit))) {
609 #if !defined(CONFIG_TEGRA_FPGA_PLATFORM)
610                 pr_info("Error: dpd%d enable failed, status=%#x\n",
611                 (hnd->io_dpd_reg_index + 1), dpd_status);
612 #endif
613         }
614         /* Sample register must be reset before next sample operation */
615         writel(0x0, pmc + PMC_DPD_SAMPLE);
616         spin_unlock(&tegra_io_dpd_lock);
617         return;
618 }
619
620 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
621 {
622         unsigned int enable_mask;
623         unsigned int dpd_status;
624         unsigned int dpd_enable_lsb;
625
626         if ((!hnd)) {
627                 pr_warn("SD IO DPD handle NULL in %s\n", __func__);
628                 return;
629         }
630         spin_lock(&tegra_io_dpd_lock);
631         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
632                                                 APBDEV_DPD_ENABLE_LSB;
633         enable_mask = ((1 << hnd->io_dpd_bit) | (1 << dpd_enable_lsb));
634         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
635                                         hnd->io_dpd_reg_index * 8));
636         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
637                                         hnd->io_dpd_reg_index * 8));
638         if (dpd_status & (1 << hnd->io_dpd_bit)) {
639 #if !defined(CONFIG_TEGRA_FPGA_PLATFORM)
640                 pr_info("Error: dpd%d disable failed, status=%#x\n",
641                 (hnd->io_dpd_reg_index + 1), dpd_status);
642 #endif
643         }
644         spin_unlock(&tegra_io_dpd_lock);
645         return;
646 }
647
648 static void tegra_io_dpd_delayed_disable(struct work_struct *work)
649 {
650         struct tegra_io_dpd *hnd = container_of(
651                 to_delayed_work(work), struct tegra_io_dpd, delay_dpd);
652         tegra_io_dpd_disable(hnd);
653         hnd->need_delay_dpd = 0;
654 }
655
656 int tegra_io_dpd_init(void)
657 {
658         int i;
659         for (i = 0;
660                 i < (sizeof(tegra_list_io_dpd) / sizeof(struct tegra_io_dpd));
661                 i++) {
662                         INIT_DELAYED_WORK(&(tegra_list_io_dpd[i].delay_dpd),
663                                 tegra_io_dpd_delayed_disable);
664                         mutex_init(&(tegra_list_io_dpd[i].delay_lock));
665                         tegra_list_io_dpd[i].need_delay_dpd = 0;
666         }
667         return 0;
668 }
669
670 #else
671
672 int tegra_io_dpd_init(void)
673 {
674         return 0;
675 }
676
677 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
678 {
679 }
680
681 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
682 {
683 }
684
685 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
686 {
687         return NULL;
688 }
689
690 #endif
691
692 EXPORT_SYMBOL(tegra_io_dpd_get);
693 EXPORT_SYMBOL(tegra_io_dpd_enable);
694 EXPORT_SYMBOL(tegra_io_dpd_disable);
695 EXPORT_SYMBOL(tegra_io_dpd_init);
696
697 struct io_dpd_reg_info {
698         u32 req_reg_off;
699         u8 dpd_code_lsb;
700 };
701
702 static struct io_dpd_reg_info t3_io_dpd_req_regs[] = {
703         {0x1b8, 30},
704         {0x1c0, 5},
705 };
706
707 /* io dpd off request code */
708 #define IO_DPD_CODE_OFF         1
709
710 /* cleans io dpd settings from bootloader during kernel init */
711 void tegra_bl_io_dpd_cleanup()
712 {
713         int i;
714         unsigned int dpd_mask;
715         unsigned int dpd_status;
716
717         pr_info("Clear bootloader IO dpd settings\n");
718         /* clear all dpd requests from bootloader */
719         for (i = 0; i < ARRAY_SIZE(t3_io_dpd_req_regs); i++) {
720                 dpd_mask = ((1 << t3_io_dpd_req_regs[i].dpd_code_lsb) - 1);
721                 dpd_mask |= (IO_DPD_CODE_OFF <<
722                         t3_io_dpd_req_regs[i].dpd_code_lsb);
723                 writel(dpd_mask, pmc + t3_io_dpd_req_regs[i].req_reg_off);
724                 /* dpd status register is next to req reg in tegra3 */
725                 dpd_status = readl(pmc +
726                         (t3_io_dpd_req_regs[i].req_reg_off + 4));
727         }
728         return;
729 }
730 EXPORT_SYMBOL(tegra_bl_io_dpd_cleanup);
731