c0116b10002cb5e685b8be3c87367af2cc3f230e
[linux-2.6.git] / arch / arm / mach-tegra / pm-t3.c
1 /*
2  * arch/arm/mach-tegra/pm-t3.c
3  *
4  * Tegra3 SOC-specific power and cluster management
5  *
6  * Copyright (c) 2009-2012, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <linux/clk.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/device.h>
28 #include <linux/module.h>
29 #include <linux/clockchips.h>
30 #include <linux/cpu_pm.h>
31
32 #include <mach/gpio.h>
33 #include <mach/iomap.h>
34 #include <mach/irqs.h>
35
36 #include <asm/cputype.h>
37 #include <asm/hardware/gic.h>
38
39 #include <trace/events/power.h>
40
41 #include "clock.h"
42 #include "cpuidle.h"
43 #include "pm.h"
44 #include "sleep.h"
45 #include "tegra3_emc.h"
46 #include "dvfs.h"
47
48 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
49 #define CAR_CCLK_BURST_POLICY \
50         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x20)
51
52 #define CAR_SUPER_CCLK_DIVIDER \
53         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x24)
54
55 #define CAR_CCLKG_BURST_POLICY \
56         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x368)
57
58 #define CAR_SUPER_CCLKG_DIVIDER \
59         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x36C)
60
61 #define CAR_CCLKLP_BURST_POLICY \
62         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x370)
63 #define PLLX_DIV2_BYPASS_LP     (1<<16)
64
65 #define CAR_SUPER_CCLKLP_DIVIDER \
66         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x374)
67
68 #define CAR_BOND_OUT_V \
69         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390)
70 #define CAR_BOND_OUT_V_CPU_G    (1<<0)
71 #define CAR_BOND_OUT_V_CPU_LP   (1<<1)
72
73 #define CAR_CLK_ENB_V_SET \
74         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x440)
75 #define CAR_CLK_ENB_V_CPU_G     (1<<0)
76 #define CAR_CLK_ENB_V_CPU_LP    (1<<1)
77
78 #define CAR_RST_CPUG_CMPLX_SET \
79         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x450)
80
81 #define CAR_RST_CPUG_CMPLX_CLR \
82         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x454)
83
84 #define CAR_RST_CPULP_CMPLX_SET \
85         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x458)
86
87 #define CAR_RST_CPULP_CMPLX_CLR \
88         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x45C)
89
90 #define CAR_CLK_CPUG_CMPLX_SET \
91         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x460)
92
93 #define CAR_CLK_CPUG_CMPLX_CLR \
94         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x464)
95
96 #define CAR_CLK_CPULP_CMPLX_SET \
97         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x468)
98
99 #define CAR_CLK_CPULP_CMPLX_CLR \
100         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x46C)
101
102 #define CPU_CLOCK(cpu)  (0x1<<(8+cpu))
103 #define CPU_RESET(cpu)  (0x1111ul<<(cpu))
104
105 static int cluster_switch_prolog_clock(unsigned int flags)
106 {
107         u32 reg;
108         u32 CclkBurstPolicy;
109         u32 SuperCclkDivier;
110
111         /* Read the bond out register containing the G and LP CPUs. */
112         reg = readl(CAR_BOND_OUT_V);
113
114         /* Sync G-PLLX divider bypass with LP (no effect on G, just to prevent
115            LP settings overwrite by save/restore code */
116         CclkBurstPolicy = ~PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKG_BURST_POLICY);
117         CclkBurstPolicy |= PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKLP_BURST_POLICY);
118         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
119
120         /* Switching to G? */
121         if (flags & TEGRA_POWER_CLUSTER_G) {
122                 /* Do the G CPUs exist? */
123                 if (reg & CAR_BOND_OUT_V_CPU_G)
124                         return -ENXIO;
125
126                 /* Keep G CPU clock policy set by upper laayer, with the
127                    exception of the transition via LP1 */
128                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
129                         /* In LP1 power mode come up on CLKM (oscillator) */
130                         CclkBurstPolicy = readl(CAR_CCLKG_BURST_POLICY);
131                         CclkBurstPolicy &= ~0xF;
132                         SuperCclkDivier = 0;
133
134                         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
135                         writel(SuperCclkDivier, CAR_SUPER_CCLKG_DIVIDER);
136                 }
137
138                 /* Hold G CPUs 1-3 in reset after the switch */
139                 reg = CPU_RESET(1) | CPU_RESET(2) | CPU_RESET(3);
140                 writel(reg, CAR_RST_CPUG_CMPLX_SET);
141
142                 /* Take G CPU 0 out of reset after the switch */
143                 reg = CPU_RESET(0);
144                 writel(reg, CAR_RST_CPUG_CMPLX_CLR);
145
146                 /* Disable the clocks on G CPUs 1-3 after the switch */
147                 reg = CPU_CLOCK(1) | CPU_CLOCK(2) | CPU_CLOCK(3);
148                 writel(reg, CAR_CLK_CPUG_CMPLX_SET);
149
150                 /* Enable the clock on G CPU 0 after the switch */
151                 reg = CPU_CLOCK(0);
152                 writel(reg, CAR_CLK_CPUG_CMPLX_CLR);
153
154                 /* Enable the G CPU complex clock after the switch */
155                 reg = CAR_CLK_ENB_V_CPU_G;
156                 writel(reg, CAR_CLK_ENB_V_SET);
157         }
158         /* Switching to LP? */
159         else if (flags & TEGRA_POWER_CLUSTER_LP) {
160                 /* Does the LP CPU exist? */
161                 if (reg & CAR_BOND_OUT_V_CPU_LP)
162                         return -ENXIO;
163
164                 /* Keep LP CPU clock policy set by upper layer, with the
165                    exception of the transition via LP1 */
166                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
167                         /* In LP1 power mode come up on CLKM (oscillator) */
168                         CclkBurstPolicy = readl(CAR_CCLKLP_BURST_POLICY);
169                         CclkBurstPolicy &= ~0xF;
170                         SuperCclkDivier = 0;
171
172                         writel(CclkBurstPolicy, CAR_CCLKLP_BURST_POLICY);
173                         writel(SuperCclkDivier, CAR_SUPER_CCLKLP_DIVIDER);
174                 }
175
176                 /* Take the LP CPU ut of reset after the switch */
177                 reg = CPU_RESET(0);
178                 writel(reg, CAR_RST_CPULP_CMPLX_CLR);
179
180                 /* Enable the clock on the LP CPU after the switch */
181                 reg = CPU_CLOCK(0);
182                 writel(reg, CAR_CLK_CPULP_CMPLX_CLR);
183
184                 /* Enable the LP CPU complex clock after the switch */
185                 reg = CAR_CLK_ENB_V_CPU_LP;
186                 writel(reg, CAR_CLK_ENB_V_SET);
187         }
188
189         return 0;
190 }
191
192 void tegra_cluster_switch_prolog(unsigned int flags)
193 {
194         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
195         unsigned int current_cluster = is_lp_cluster()
196                                         ? TEGRA_POWER_CLUSTER_LP
197                                         : TEGRA_POWER_CLUSTER_G;
198         u32 reg;
199
200         /* Read the flow controler CSR register and clear the CPU switch
201            and immediate flags. If an actual CPU switch is to be performed,
202            re-write the CSR register with the desired values. */
203         reg = readl(FLOW_CTRL_CPU_CSR(0));
204         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
205                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
206
207         /* Program flow controller for immediate wake if requested */
208         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
209                 reg |= FLOW_CTRL_CSR_IMMEDIATE_WAKE;
210
211         /* Do nothing if no switch actions requested */
212         if (!target_cluster)
213                 goto done;
214
215 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
216         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
217         if (flags & TEGRA_POWER_CLUSTER_PART_CRAIL)
218                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_CRAIL;
219         if (flags & TEGRA_POWER_CLUSTER_PART_NONCPU)
220                 reg |= FLOW_CTRL_CSR_ENABLE_EXT_NCPU;
221 #endif
222
223         if ((current_cluster != target_cluster) ||
224                 (flags & TEGRA_POWER_CLUSTER_FORCE)) {
225                 if (current_cluster != target_cluster) {
226                         // Set up the clocks for the target CPU.
227                         if (cluster_switch_prolog_clock(flags)) {
228                                 /* The target CPU does not exist */
229                                 goto done;
230                         }
231
232                         /* Set up the flow controller to switch CPUs. */
233                         reg |= FLOW_CTRL_CSR_SWITCH_CLUSTER;
234                 }
235         }
236
237 done:
238         writel(reg, FLOW_CTRL_CPU_CSR(0));
239 }
240
241
242 static void cluster_switch_epilog_actlr(void)
243 {
244         u32 actlr;
245
246         /*
247          * This is only needed for Cortex-A9, for Cortex-A15, do nothing!
248          *
249          * TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads
250          * as zero, writes ignored). Hence, it is not preserved across G=>LP=>G
251          * switch by CPU save/restore code, but SMP bit is restored correctly.
252          * Synchronize these two bits here after LP=>G transition. Note that
253          * only CPU0 core is powered on before and after the switch. See also
254          * bug 807595.
255         */
256         if (((read_cpuid_id() >> 4) & 0xFFF) == 0xC0F)
257                 return;
258
259         __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
260
261         if (actlr & (0x1 << 6)) {
262                 actlr |= 0x1;
263                 __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
264         }
265 }
266
267 static void cluster_switch_epilog_gic(void)
268 {
269         unsigned int max_irq, i;
270         void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
271
272         /* Reprogram the interrupt affinity because the on the LP CPU,
273            the interrupt distributor affinity regsiters are stubbed out
274            by ARM (reads as zero, writes ignored). So when the LP CPU
275            context save code runs, the affinity registers will read
276            as all zero. This causes all interrupts to be effectively
277            disabled when back on the G CPU because they aren't routable
278            to any CPU. See bug 667720 for details. */
279
280         max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f;
281         max_irq = (max_irq + 1) * 32;
282
283         for (i = 32; i < max_irq; i += 4) {
284                 u32 val = 0x01010101;
285 #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
286                 unsigned int irq;
287                 for (irq = i; irq < (i + 4); irq++) {
288                         struct cpumask mask;
289                         struct irq_desc *desc = irq_to_desc(irq);
290
291                         if (desc && desc->affinity_hint &&
292                             desc->irq_data.affinity) {
293                                 if (cpumask_and(&mask, desc->affinity_hint,
294                                                 desc->irq_data.affinity))
295                                         val |= (*cpumask_bits(&mask) & 0xff) <<
296                                                 ((irq & 3) * 8);
297                         }
298                 }
299 #endif
300                 writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4);
301         }
302 }
303
304 void tegra_cluster_switch_epilog(unsigned int flags)
305 {
306         u32 reg;
307
308         /* Make sure the switch and immediate flags are cleared in
309            the flow controller to prevent undesirable side-effects
310            for future users of the flow controller. */
311         reg = readl(FLOW_CTRL_CPU_CSR(0));
312         reg &= ~(FLOW_CTRL_CSR_IMMEDIATE_WAKE |
313                  FLOW_CTRL_CSR_SWITCH_CLUSTER);
314 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
315         reg &= ~FLOW_CTRL_CSR_ENABLE_EXT_MASK;
316 #endif
317         writel(reg, FLOW_CTRL_CPU_CSR(0));
318
319         /* Perform post-switch LP=>G clean-up */
320         if (!is_lp_cluster()) {
321                 cluster_switch_epilog_actlr();
322                 cluster_switch_epilog_gic();
323         }
324
325         #if DEBUG_CLUSTER_SWITCH
326         {
327                 /* FIXME: clock functions below are taking mutex */
328                 struct clk *c = tegra_get_clock_by_name(
329                         is_lp_cluster() ? "cpu_lp" : "cpu_g");
330                 DEBUG_CLUSTER(("%s: %s freq %lu\r\n", __func__,
331                         is_lp_cluster() ? "LP" : "G", clk_get_rate(c)));
332         }
333         #endif
334 }
335
336 int tegra_cluster_control(unsigned int us, unsigned int flags)
337 {
338         static ktime_t last_g2lp;
339
340         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
341         unsigned int current_cluster = is_lp_cluster()
342                                         ? TEGRA_POWER_CLUSTER_LP
343                                         : TEGRA_POWER_CLUSTER_G;
344         unsigned long irq_flags;
345
346         if ((target_cluster == TEGRA_POWER_CLUSTER_MASK) || !target_cluster)
347                 return -EINVAL;
348
349         if (num_online_cpus() > 1)
350                 return -EBUSY;
351
352         if ((current_cluster == target_cluster)
353         && !(flags & TEGRA_POWER_CLUSTER_FORCE))
354                 return -EEXIST;
355
356         if (target_cluster == TEGRA_POWER_CLUSTER_G)
357                 if (!is_g_cluster_present())
358                         return -EPERM;
359
360         trace_power_start(POWER_PSTATE, target_cluster, 0);
361
362         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
363                 us = 0;
364
365         DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__,
366                 (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2,
367                 is_lp_cluster() ? "LP" : "G",
368                 (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP",
369                 (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "",
370                 (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "",
371                 us));
372
373         local_irq_save(irq_flags);
374
375         if (current_cluster != target_cluster && !timekeeping_suspended) {
376                 ktime_t now = ktime_get();
377                 if (target_cluster == TEGRA_POWER_CLUSTER_G) {
378                         s64 t = ktime_to_us(ktime_sub(now, last_g2lp));
379                         s64 t_off = tegra_cpu_power_off_time();
380 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
381                         u32 reg;
382 #endif
383                         if (t_off > t)
384                                 udelay((unsigned int)(t_off - t));
385
386                         tegra_dvfs_rail_on(tegra_cpu_rail, now);
387 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
388                         reg = readl(FLOW_CTRL_RAM_REPAIR);
389                         reg &= ~FLOW_CTRL_RAM_REPAIR_BYPASS_EN;
390                         writel(reg, FLOW_CTRL_RAM_REPAIR);
391                         /* power up C rail */
392                         reg = readl(FLOW_CTRL_CPU_PWR_CSR);
393                         reg |= FLOW_CTRL_CPU_PWR_CSR_RAIL_ENABLE;
394                         writel(reg, FLOW_CTRL_CPU_PWR_CSR);
395 #endif
396
397                 } else {
398                         last_g2lp = now;
399                         tegra_dvfs_rail_off(tegra_cpu_rail, now);
400                 }
401         }
402
403         if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
404                 if (us)
405                         tegra_lp2_set_trigger(us);
406
407                 tegra_cluster_switch_prolog(flags);
408                 tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags);
409                 tegra_cluster_switch_epilog(flags);
410
411                 if (us)
412                         tegra_lp2_set_trigger(0);
413         } else {
414                 int cpu = 0;
415
416                 tegra_set_cpu_in_lp2(0);
417                 cpu_pm_enter();
418                 if (!timekeeping_suspended)
419                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
420                                            &cpu);
421                 tegra_idle_lp2_last(0, flags);
422                 if (!timekeeping_suspended)
423                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
424                                            &cpu);
425                 cpu_pm_exit();
426                 tegra_clear_cpu_in_lp2(0);
427         }
428         local_irq_restore(irq_flags);
429
430         DEBUG_CLUSTER(("%s: %s\r\n", __func__, is_lp_cluster() ? "LP" : "G"));
431
432         return 0;
433 }
434 #endif
435
436 #ifdef CONFIG_PM_SLEEP
437
438 void tegra_lp0_suspend_mc(void)
439 {
440         /* Since memory frequency after LP0 is restored to boot rate
441            mc timing is saved during init, not on entry to LP0. Keep
442            this hook just in case, anyway */
443 }
444
445 void tegra_lp0_resume_mc(void)
446 {
447         tegra_mc_timing_restore();
448 }
449
450 void tegra_lp0_cpu_mode(bool enter)
451 {
452         static bool entered_on_g = false;
453         unsigned int flags;
454
455         if (enter)
456                 entered_on_g = !is_lp_cluster();
457
458         if (entered_on_g) {
459                 flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G;
460                 flags |= TEGRA_POWER_CLUSTER_IMMEDIATE;
461 #if defined(CONFIG_ARCH_TEGRA_HAS_SYMMETRIC_CPU_PWR_GATE)
462                 flags |= TEGRA_POWER_CLUSTER_PART_DEFAULT;
463 #endif
464                 tegra_cluster_control(0, flags);
465                 pr_info("Tegra: switched to %s cluster\n", enter ? "LP" : "G");
466         }
467 }
468 #endif
469
470 #define IO_DPD_INFO(_name, _index, _bit) \
471         { \
472                 .name = _name, \
473                 .io_dpd_reg_index = _index, \
474                 .io_dpd_bit = _bit, \
475         }
476
477 /* PMC IO DPD register offsets */
478 #define APBDEV_PMC_IO_DPD_REQ_0         0x1b8
479 #define APBDEV_PMC_IO_DPD_STATUS_0      0x1bc
480 #define APBDEV_PMC_SEL_DPD_TIM_0        0x1c8
481 #define APBDEV_DPD_ENABLE_LSB           30
482 #define APBDEV_DPD2_ENABLE_LSB          5
483 #define PMC_DPD_SAMPLE                  0x20
484
485 struct tegra_io_dpd tegra_list_io_dpd[] = {
486         /* sd dpd bits in dpd2 register */
487         IO_DPD_INFO("sdhci-tegra.0",    1,      1), /* SDMMC1 */
488         IO_DPD_INFO("sdhci-tegra.2",    1,      2), /* SDMMC3 */
489         IO_DPD_INFO("sdhci-tegra.3",    1,      3), /* SDMMC4 */
490 };
491
492 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
493 {
494         int i;
495         const char *name = dev ? dev_name(dev) : NULL;
496         if (name) {
497                 for (i = 0; i < (sizeof(tegra_list_io_dpd) /
498                         sizeof(struct tegra_io_dpd)); i++) {
499                         if (!(strncmp(tegra_list_io_dpd[i].name, name,
500                                 strlen(name)))) {
501                                 return &tegra_list_io_dpd[i];
502                         }
503                 }
504         }
505         dev_info(dev, "Error: tegra3 io dpd not supported for %s\n",
506                 ((name) ? name : "NULL"));
507         return NULL;
508 }
509 EXPORT_SYMBOL(tegra_io_dpd_get);
510
511 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
512 static DEFINE_SPINLOCK(tegra_io_dpd_lock);
513
514 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
515 {
516         unsigned int enable_mask;
517         unsigned int dpd_status;
518         unsigned int dpd_enable_lsb;
519
520         if ((!hnd)) {
521                 pr_warn("SD IO DPD handle NULL in %s\n", __func__);
522                 return;
523         }
524         spin_lock(&tegra_io_dpd_lock);
525         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
526                                                 APBDEV_DPD_ENABLE_LSB;
527         writel(0x1, pmc + PMC_DPD_SAMPLE);
528         writel(0x10, pmc + APBDEV_PMC_SEL_DPD_TIM_0);
529         enable_mask = ((1 << hnd->io_dpd_bit) | (2 << dpd_enable_lsb));
530         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
531                                         hnd->io_dpd_reg_index * 8));
532         udelay(1);
533         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
534                                         hnd->io_dpd_reg_index * 8));
535         if (!(dpd_status & (1 << hnd->io_dpd_bit)))
536                 pr_info("Error: dpd%d enable failed, status=%#x\n",
537                 (hnd->io_dpd_reg_index + 1), dpd_status);
538         /* Sample register must be reset before next sample operation */
539         writel(0x0, pmc + PMC_DPD_SAMPLE);
540         spin_unlock(&tegra_io_dpd_lock);
541         return;
542 }
543 EXPORT_SYMBOL(tegra_io_dpd_enable);
544
545 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
546 {
547         unsigned int enable_mask;
548         unsigned int dpd_status;
549         unsigned int dpd_enable_lsb;
550
551         if ((!hnd)) {
552                 pr_warn("SD IO DPD handle NULL in %s\n", __func__);
553                 return;
554         }
555         spin_lock(&tegra_io_dpd_lock);
556         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
557                                                 APBDEV_DPD_ENABLE_LSB;
558         enable_mask = ((1 << hnd->io_dpd_bit) | (1 << dpd_enable_lsb));
559         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
560                                         hnd->io_dpd_reg_index * 8));
561         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
562                                         hnd->io_dpd_reg_index * 8));
563         if (dpd_status & (1 << hnd->io_dpd_bit))
564                 pr_info("Error: dpd%d disable failed, status=%#x\n",
565                 (hnd->io_dpd_reg_index + 1), dpd_status);
566         spin_unlock(&tegra_io_dpd_lock);
567         return;
568 }
569 EXPORT_SYMBOL(tegra_io_dpd_disable);