arm: tegra: sd: enable sd dpd
[linux-2.6.git] / arch / arm / mach-tegra / pm-t3.c
1 /*
2  * arch/arm/mach-tegra/pm-t3.c
3  *
4  * Tegra3 SOC-specific power and cluster management
5  *
6  * Copyright (c) 2009-2012, NVIDIA CORPORATION.  All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/smp.h>
23 #include <linux/interrupt.h>
24 #include <linux/clk.h>
25 #include <linux/delay.h>
26 #include <linux/irq.h>
27 #include <linux/device.h>
28 #include <linux/module.h>
29 #include <linux/clockchips.h>
30
31 #include <mach/gpio.h>
32 #include <mach/iomap.h>
33 #include <mach/irqs.h>
34 #include <mach/io_dpd.h>
35
36 #include <asm/cpu_pm.h>
37 #include <asm/hardware/gic.h>
38
39 #include <trace/events/power.h>
40
41 #include "clock.h"
42 #include "cpuidle.h"
43 #include "pm.h"
44 #include "sleep.h"
45 #include "tegra3_emc.h"
46 #include "dvfs.h"
47
48 #ifdef CONFIG_TEGRA_CLUSTER_CONTROL
49 #define CAR_CCLK_BURST_POLICY \
50         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x20)
51
52 #define CAR_SUPER_CCLK_DIVIDER \
53         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x24)
54
55 #define CAR_CCLKG_BURST_POLICY \
56         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x368)
57
58 #define CAR_SUPER_CCLKG_DIVIDER \
59         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x36C)
60
61 #define CAR_CCLKLP_BURST_POLICY \
62         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x370)
63 #define PLLX_DIV2_BYPASS_LP     (1<<16)
64
65 #define CAR_SUPER_CCLKLP_DIVIDER \
66         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x374)
67
68 #define CAR_BOND_OUT_V \
69         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x390)
70 #define CAR_BOND_OUT_V_CPU_G    (1<<0)
71 #define CAR_BOND_OUT_V_CPU_LP   (1<<1)
72
73 #define CAR_CLK_ENB_V_SET \
74         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x440)
75 #define CAR_CLK_ENB_V_CPU_G     (1<<0)
76 #define CAR_CLK_ENB_V_CPU_LP    (1<<1)
77
78 #define CAR_RST_CPUG_CMPLX_SET \
79         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x450)
80
81 #define CAR_RST_CPUG_CMPLX_CLR \
82         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x454)
83
84 #define CAR_RST_CPULP_CMPLX_SET \
85         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x458)
86
87 #define CAR_RST_CPULP_CMPLX_CLR \
88         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x45C)
89
90 #define CAR_CLK_CPUG_CMPLX_SET \
91         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x460)
92
93 #define CAR_CLK_CPUG_CMPLX_CLR \
94         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x464)
95
96 #define CAR_CLK_CPULP_CMPLX_SET \
97         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x468)
98
99 #define CAR_CLK_CPULP_CMPLX_CLR \
100         (IO_ADDRESS(TEGRA_CLK_RESET_BASE) + 0x46C)
101
102 #define CPU_CLOCK(cpu)  (0x1<<(8+cpu))
103 #define CPU_RESET(cpu)  (0x1111ul<<(cpu))
104
105 static int cluster_switch_prolog_clock(unsigned int flags)
106 {
107         u32 reg;
108         u32 CclkBurstPolicy;
109         u32 SuperCclkDivier;
110
111         /* Read the bond out register containing the G and LP CPUs. */
112         reg = readl(CAR_BOND_OUT_V);
113
114         /* Sync G-PLLX divider bypass with LP (no effect on G, just to prevent
115            LP settings overwrite by save/restore code */
116         CclkBurstPolicy = ~PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKG_BURST_POLICY);
117         CclkBurstPolicy |= PLLX_DIV2_BYPASS_LP & readl(CAR_CCLKLP_BURST_POLICY);
118         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
119
120         /* Switching to G? */
121         if (flags & TEGRA_POWER_CLUSTER_G) {
122                 /* Do the G CPUs exist? */
123                 if (reg & CAR_BOND_OUT_V_CPU_G)
124                         return -ENXIO;
125
126                 /* Keep G CPU clock policy set by upper laayer, with the
127                    exception of the transition via LP1 */
128                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
129                         /* In LP1 power mode come up on CLKM (oscillator) */
130                         CclkBurstPolicy = readl(CAR_CCLKG_BURST_POLICY);
131                         CclkBurstPolicy &= ~0xF;
132                         SuperCclkDivier = 0;
133
134                         writel(CclkBurstPolicy, CAR_CCLKG_BURST_POLICY);
135                         writel(SuperCclkDivier, CAR_SUPER_CCLKG_DIVIDER);
136                 }
137
138                 /* Hold G CPUs 1-3 in reset after the switch */
139                 reg = CPU_RESET(1) | CPU_RESET(2) | CPU_RESET(3);
140                 writel(reg, CAR_RST_CPUG_CMPLX_SET);
141
142                 /* Take G CPU 0 out of reset after the switch */
143                 reg = CPU_RESET(0);
144                 writel(reg, CAR_RST_CPUG_CMPLX_CLR);
145
146                 /* Disable the clocks on G CPUs 1-3 after the switch */
147                 reg = CPU_CLOCK(1) | CPU_CLOCK(2) | CPU_CLOCK(3);
148                 writel(reg, CAR_CLK_CPUG_CMPLX_SET);
149
150                 /* Enable the clock on G CPU 0 after the switch */
151                 reg = CPU_CLOCK(0);
152                 writel(reg, CAR_CLK_CPUG_CMPLX_CLR);
153
154                 /* Enable the G CPU complex clock after the switch */
155                 reg = CAR_CLK_ENB_V_CPU_G;
156                 writel(reg, CAR_CLK_ENB_V_SET);
157         }
158         /* Switching to LP? */
159         else if (flags & TEGRA_POWER_CLUSTER_LP) {
160                 /* Does the LP CPU exist? */
161                 if (reg & CAR_BOND_OUT_V_CPU_LP)
162                         return -ENXIO;
163
164                 /* Keep LP CPU clock policy set by upper layer, with the
165                    exception of the transition via LP1 */
166                 if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
167                         /* In LP1 power mode come up on CLKM (oscillator) */
168                         CclkBurstPolicy = readl(CAR_CCLKLP_BURST_POLICY);
169                         CclkBurstPolicy &= ~0xF;
170                         SuperCclkDivier = 0;
171
172                         writel(CclkBurstPolicy, CAR_CCLKLP_BURST_POLICY);
173                         writel(SuperCclkDivier, CAR_SUPER_CCLKLP_DIVIDER);
174                 }
175
176                 /* Take the LP CPU ut of reset after the switch */
177                 reg = CPU_RESET(0);
178                 writel(reg, CAR_RST_CPULP_CMPLX_CLR);
179
180                 /* Enable the clock on the LP CPU after the switch */
181                 reg = CPU_CLOCK(0);
182                 writel(reg, CAR_CLK_CPULP_CMPLX_CLR);
183
184                 /* Enable the LP CPU complex clock after the switch */
185                 reg = CAR_CLK_ENB_V_CPU_LP;
186                 writel(reg, CAR_CLK_ENB_V_SET);
187         }
188
189         return 0;
190 }
191
192 void tegra_cluster_switch_prolog(unsigned int flags)
193 {
194         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
195         unsigned int current_cluster = is_lp_cluster()
196                                         ? TEGRA_POWER_CLUSTER_LP
197                                         : TEGRA_POWER_CLUSTER_G;
198         u32 reg;
199
200         /* Read the flow controler CSR register and clear the CPU switch
201            and immediate flags. If an actual CPU switch is to be performed,
202            re-write the CSR register with the desired values. */
203         reg = readl(FLOW_CTRL_CPU_CSR(0));
204         reg &= ~(FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE |
205                  FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER);
206
207         /* Program flow controller for immediate wake if requested */
208         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
209                 reg |= FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE;
210
211         /* Do nothing if no switch actions requested */
212         if (!target_cluster)
213                 goto done;
214
215         if ((current_cluster != target_cluster) ||
216                 (flags & TEGRA_POWER_CLUSTER_FORCE)) {
217                 if (current_cluster != target_cluster) {
218                         // Set up the clocks for the target CPU.
219                         if (cluster_switch_prolog_clock(flags)) {
220                                 /* The target CPU does not exist */
221                                 goto done;
222                         }
223
224                         /* Set up the flow controller to switch CPUs. */
225                         reg |= FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER;
226                 }
227         }
228
229 done:
230         writel(reg, FLOW_CTRL_CPU_CSR(0));
231 }
232
233
234 static void cluster_switch_epilog_actlr(void)
235 {
236         u32 actlr;
237
238         /* TLB maintenance broadcast bit (FW) is stubbed out on LP CPU (reads
239            as zero, writes ignored). Hence, it is not preserved across G=>LP=>G
240            switch by CPU save/restore code, but SMP bit is restored correctly.
241            Synchronize these two bits here after LP=>G transition. Note that
242            only CPU0 core is powered on before and after the switch. See also
243            bug 807595. */
244
245         __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
246
247         if (actlr & (0x1 << 6)) {
248                 actlr |= 0x1;
249                 __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
250         }
251 }
252
253 static void cluster_switch_epilog_gic(void)
254 {
255         unsigned int max_irq, i;
256         void __iomem *gic_base = IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE);
257
258         /* Reprogram the interrupt affinity because the on the LP CPU,
259            the interrupt distributor affinity regsiters are stubbed out
260            by ARM (reads as zero, writes ignored). So when the LP CPU
261            context save code runs, the affinity registers will read
262            as all zero. This causes all interrupts to be effectively
263            disabled when back on the G CPU because they aren't routable
264            to any CPU. See bug 667720 for details. */
265
266         max_irq = readl(gic_base + GIC_DIST_CTR) & 0x1f;
267         max_irq = (max_irq + 1) * 32;
268
269         for (i = 32; i < max_irq; i += 4) {
270                 u32 val = 0x01010101;
271 #ifdef CONFIG_GIC_SET_MULTIPLE_CPUS
272                 unsigned int irq;
273                 for (irq = i; irq < (i + 4); irq++) {
274                         struct cpumask mask;
275                         struct irq_desc *desc = irq_to_desc(irq);
276
277                         if (desc && desc->affinity_hint &&
278                             desc->irq_data.affinity) {
279                                 if (cpumask_and(&mask, desc->affinity_hint,
280                                                 desc->irq_data.affinity))
281                                         val |= (*cpumask_bits(&mask) & 0xff) <<
282                                                 ((irq & 3) * 8);
283                         }
284                 }
285 #endif
286                 writel(val, gic_base + GIC_DIST_TARGET + i * 4 / 4);
287         }
288 }
289
290 void tegra_cluster_switch_epilog(unsigned int flags)
291 {
292         u32 reg;
293
294         /* Make sure the switch and immediate flags are cleared in
295            the flow controller to prevent undesirable side-effects
296            for future users of the flow controller. */
297         reg = readl(FLOW_CTRL_CPU_CSR(0));
298         reg &= ~(FLOW_CTRL_CPU_CSR_IMMEDIATE_WAKE |
299                  FLOW_CTRL_CPU_CSR_SWITCH_CLUSTER);
300         writel(reg, FLOW_CTRL_CPU_CSR(0));
301
302         /* Perform post-switch LP=>G clean-up */
303         if (!is_lp_cluster()) {
304                 cluster_switch_epilog_actlr();
305                 cluster_switch_epilog_gic();
306         }
307
308         #if DEBUG_CLUSTER_SWITCH
309         {
310                 /* FIXME: clock functions below are taking mutex */
311                 struct clk *c = tegra_get_clock_by_name(
312                         is_lp_cluster() ? "cpu_lp" : "cpu_g");
313                 DEBUG_CLUSTER(("%s: %s freq %lu\r\n", __func__,
314                         is_lp_cluster() ? "LP" : "G", clk_get_rate(c)));
315         }
316         #endif
317 }
318
319 int tegra_cluster_control(unsigned int us, unsigned int flags)
320 {
321         static ktime_t last_g2lp;
322
323         unsigned int target_cluster = flags & TEGRA_POWER_CLUSTER_MASK;
324         unsigned int current_cluster = is_lp_cluster()
325                                         ? TEGRA_POWER_CLUSTER_LP
326                                         : TEGRA_POWER_CLUSTER_G;
327         unsigned long irq_flags;
328
329         if ((target_cluster == TEGRA_POWER_CLUSTER_MASK) || !target_cluster)
330                 return -EINVAL;
331
332         if (num_online_cpus() > 1)
333                 return -EBUSY;
334
335         if ((current_cluster == target_cluster)
336         && !(flags & TEGRA_POWER_CLUSTER_FORCE))
337                 return -EEXIST;
338
339         if (target_cluster == TEGRA_POWER_CLUSTER_G)
340                 if (!is_g_cluster_present())
341                         return -EPERM;
342
343         trace_power_start(POWER_PSTATE, target_cluster, 0);
344
345         if (flags & TEGRA_POWER_CLUSTER_IMMEDIATE)
346                 us = 0;
347
348         DEBUG_CLUSTER(("%s(LP%d): %s->%s %s %s %d\r\n", __func__,
349                 (flags & TEGRA_POWER_SDRAM_SELFREFRESH) ? 1 : 2,
350                 is_lp_cluster() ? "LP" : "G",
351                 (target_cluster == TEGRA_POWER_CLUSTER_G) ? "G" : "LP",
352                 (flags & TEGRA_POWER_CLUSTER_IMMEDIATE) ? "immediate" : "",
353                 (flags & TEGRA_POWER_CLUSTER_FORCE) ? "force" : "",
354                 us));
355
356         local_irq_save(irq_flags);
357
358         if (current_cluster != target_cluster && !timekeeping_suspended) {
359                 ktime_t now = ktime_get();
360                 if (target_cluster == TEGRA_POWER_CLUSTER_G) {
361                         s64 t = ktime_to_us(ktime_sub(now, last_g2lp));
362                         s64 t_off = tegra_cpu_power_off_time();
363                         if (t_off > t)
364                                 udelay((unsigned int)(t_off - t));
365
366                         tegra_dvfs_rail_on(tegra_cpu_rail, now);
367
368                 } else {
369                         last_g2lp = now;
370                         tegra_dvfs_rail_off(tegra_cpu_rail, now);
371                 }
372         }
373
374         if (flags & TEGRA_POWER_SDRAM_SELFREFRESH) {
375                 if (us)
376                         tegra_lp2_set_trigger(us);
377
378                 tegra_cluster_switch_prolog(flags);
379                 tegra_suspend_dram(TEGRA_SUSPEND_LP1, flags);
380                 tegra_cluster_switch_epilog(flags);
381
382                 if (us)
383                         tegra_lp2_set_trigger(0);
384         } else {
385                 int cpu = 0;
386
387                 tegra_set_cpu_in_lp2(0);
388                 cpu_pm_enter();
389                 if (!timekeeping_suspended)
390                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
391                                            &cpu);
392                 tegra_idle_lp2_last(0, flags);
393                 if (!timekeeping_suspended)
394                         clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
395                                            &cpu);
396                 cpu_pm_exit();
397                 tegra_clear_cpu_in_lp2(0);
398         }
399         local_irq_restore(irq_flags);
400
401         DEBUG_CLUSTER(("%s: %s\r\n", __func__, is_lp_cluster() ? "LP" : "G"));
402
403         return 0;
404 }
405 #endif
406
407 #ifdef CONFIG_PM_SLEEP
408
409 void tegra_lp0_suspend_mc(void)
410 {
411         /* Since memory frequency after LP0 is restored to boot rate
412            mc timing is saved during init, not on entry to LP0. Keep
413            this hook just in case, anyway */
414 }
415
416 void tegra_lp0_resume_mc(void)
417 {
418         tegra_mc_timing_restore();
419 }
420
421 void tegra_lp0_cpu_mode(bool enter)
422 {
423         static bool entered_on_g = false;
424         unsigned int flags;
425
426         if (enter)
427                 entered_on_g = !is_lp_cluster();
428
429         if (entered_on_g) {
430                 flags = enter ? TEGRA_POWER_CLUSTER_LP : TEGRA_POWER_CLUSTER_G;
431                 flags |= TEGRA_POWER_CLUSTER_IMMEDIATE;
432                 tegra_cluster_control(0, flags);
433                 pr_info("Tegra: switched to %s cluster\n", enter ? "LP" : "G");
434         }
435 }
436 #endif
437
438 #define IO_DPD_INFO(_name, _index, _bit) \
439         { \
440                 .name = _name, \
441                 .io_dpd_reg_index = _index, \
442                 .io_dpd_bit = _bit, \
443         }
444
445 /* PMC IO DPD register offsets */
446 #define APBDEV_PMC_IO_DPD_REQ_0         0x1b8
447 #define APBDEV_PMC_IO_DPD_STATUS_0      0x1bc
448 #define APBDEV_PMC_SEL_DPD_TIM_0        0x1c8
449 #define APBDEV_DPD_ENABLE_LSB           30
450 #define APBDEV_DPD2_ENABLE_LSB          5
451 #define PMC_DPD_SAMPLE                  0x20
452
453 struct tegra_io_dpd tegra_list_io_dpd[] = {
454         /* sd dpd bits in dpd2 register */
455         IO_DPD_INFO("sdhci-tegra.0",    1,      1), /* SDMMC1 */
456         IO_DPD_INFO("sdhci-tegra.2",    1,      2), /* SDMMC3 */
457         IO_DPD_INFO("sdhci-tegra.3",    1,      3), /* SDMMC4 */
458 };
459
460 #ifdef CONFIG_PM_SLEEP
461 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
462 {
463         int i;
464         const char *name = dev ? dev_name(dev) : NULL;
465         if (name) {
466                 for (i = 0; i < (sizeof(tegra_list_io_dpd) /
467                         sizeof(struct tegra_io_dpd)); i++) {
468                         if (!(strncmp(tegra_list_io_dpd[i].name, name,
469                                 strlen(name)))) {
470                                 return &tegra_list_io_dpd[i];
471                         }
472                 }
473         }
474         dev_info(dev, "Error: tegra3 io dpd not supported for %s\n",
475                 ((name) ? name : "NULL"));
476         return NULL;
477 }
478
479 static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
480 static DEFINE_SPINLOCK(tegra_io_dpd_lock);
481
482 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
483 {
484         unsigned int enable_mask;
485         unsigned int dpd_status;
486         unsigned int dpd_enable_lsb;
487
488         if ((!hnd)) {
489                 pr_warn("SD IO DPD handle NULL in %s\n", __func__);
490                 return;
491         }
492         spin_lock(&tegra_io_dpd_lock);
493         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
494                                                 APBDEV_DPD_ENABLE_LSB;
495         writel(0x1, pmc + PMC_DPD_SAMPLE);
496         writel(0x10, pmc + APBDEV_PMC_SEL_DPD_TIM_0);
497         enable_mask = ((1 << hnd->io_dpd_bit) | (2 << dpd_enable_lsb));
498         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
499                                         hnd->io_dpd_reg_index * 8));
500         udelay(1);
501         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
502                                         hnd->io_dpd_reg_index * 8));
503         if (!(dpd_status & (1 << hnd->io_dpd_bit)))
504                 pr_info("Error: dpd%d enable failed, status=%#x\n",
505                 (hnd->io_dpd_reg_index + 1), dpd_status);
506         /* Sample register must be reset before next sample operation */
507         writel(0x0, pmc + PMC_DPD_SAMPLE);
508         spin_unlock(&tegra_io_dpd_lock);
509         return;
510 }
511
512 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
513 {
514         unsigned int enable_mask;
515         unsigned int dpd_status;
516         unsigned int dpd_enable_lsb;
517
518         if ((!hnd)) {
519                 pr_warn("SD IO DPD handle NULL in %s\n", __func__);
520                 return;
521         }
522         spin_lock(&tegra_io_dpd_lock);
523         dpd_enable_lsb = (hnd->io_dpd_reg_index) ? APBDEV_DPD2_ENABLE_LSB :
524                                                 APBDEV_DPD_ENABLE_LSB;
525         enable_mask = ((1 << hnd->io_dpd_bit) | (1 << dpd_enable_lsb));
526         writel(enable_mask, pmc + (APBDEV_PMC_IO_DPD_REQ_0 +
527                                         hnd->io_dpd_reg_index * 8));
528         dpd_status = readl(pmc + (APBDEV_PMC_IO_DPD_STATUS_0 +
529                                         hnd->io_dpd_reg_index * 8));
530         if (dpd_status & (1 << hnd->io_dpd_bit))
531                 pr_info("Error: dpd%d disable failed, status=%#x\n",
532                 (hnd->io_dpd_reg_index + 1), dpd_status);
533         spin_unlock(&tegra_io_dpd_lock);
534         return;
535 }
536
537 static void tegra_io_dpd_delayed_disable(struct work_struct *work)
538 {
539         struct tegra_io_dpd *hnd = container_of(
540                 to_delayed_work(work), struct tegra_io_dpd, delay_dpd);
541         tegra_io_dpd_disable(hnd);
542         hnd->need_delay_dpd = 0;
543 }
544
545 int tegra_io_dpd_init(void)
546 {
547         int i;
548         for (i = 0;
549                 i < (sizeof(tegra_list_io_dpd) / sizeof(struct tegra_io_dpd));
550                 i++) {
551                         INIT_DELAYED_WORK(&(tegra_list_io_dpd[i].delay_dpd),
552                                 tegra_io_dpd_delayed_disable);
553                         mutex_init(&(tegra_list_io_dpd[i].delay_lock));
554                         tegra_list_io_dpd[i].need_delay_dpd = 0;
555         }
556         return 0;
557 }
558
559 #else
560
561 int tegra_io_dpd_init(void)
562 {
563         return 0;
564 }
565
566 void tegra_io_dpd_enable(struct tegra_io_dpd *hnd)
567 {
568 }
569
570 void tegra_io_dpd_disable(struct tegra_io_dpd *hnd)
571 {
572 }
573
574 struct tegra_io_dpd *tegra_io_dpd_get(struct device *dev)
575 {
576         return NULL;
577 }
578
579 #endif
580
581 EXPORT_SYMBOL(tegra_io_dpd_get);
582 EXPORT_SYMBOL(tegra_io_dpd_enable);
583 EXPORT_SYMBOL(tegra_io_dpd_disable);
584 EXPORT_SYMBOL(tegra_io_dpd_init);