ARM: tegra12: set CPU rate to 2.2GHz for sku 0x87
[linux-3.10.git] / arch / arm / mach-tegra / tegra3_emc.c
1 /*
2  * arch/arm/mach-tegra/tegra3_emc.c
3  *
4  * Copyright (C) 2011-2013, NVIDIA CORPORATION. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along
17  * with this program; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
19  *
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/clk.h>
24 #include <linux/err.h>
25 #include <linux/io.h>
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/suspend.h>
29 #include <linux/debugfs.h>
30 #include <linux/seq_file.h>
31 #include <linux/of.h>
32 #include <linux/platform_device.h>
33 #include <linux/platform_data/tegra_emc.h>
34
35 #include <asm/cputime.h>
36 #include <asm/cacheflush.h>
37
38 #include <mach/latency_allowance.h>
39
40 #include "clock.h"
41 #include "dvfs.h"
42 #include "tegra3_emc.h"
43 #include "iomap.h"
44
45 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
46 static bool emc_enable = true;
47 #else
48 static bool emc_enable;
49 #endif
50 module_param(emc_enable, bool, 0644);
51
52 u8 tegra_emc_bw_efficiency = 35;
53 u8 tegra_emc_bw_efficiency_boost = 45;
54
55 #define EMC_MIN_RATE_DDR3               25500000
56 #define EMC_STATUS_UPDATE_TIMEOUT       100
57 #define TEGRA_EMC_TABLE_MAX_SIZE        16
58
59 enum {
60         DLL_CHANGE_NONE = 0,
61         DLL_CHANGE_ON,
62         DLL_CHANGE_OFF,
63 };
64
65 #define EMC_CLK_DIV_SHIFT               0
66 #define EMC_CLK_DIV_MASK                (0xFF << EMC_CLK_DIV_SHIFT)
67 #define EMC_CLK_SOURCE_SHIFT            30
68 #define EMC_CLK_SOURCE_MASK             (0x3 << EMC_CLK_SOURCE_SHIFT)
69 #define EMC_CLK_LOW_JITTER_ENABLE       (0x1 << 29)
70 #define EMC_CLK_MC_SAME_FREQ            (0x1 << 16)
71
72 #define BURST_REG_LIST \
73         DEFINE_REG(TEGRA_EMC_BASE, EMC_RC),                     \
74         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC),                    \
75         DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS),                    \
76         DEFINE_REG(TEGRA_EMC_BASE, EMC_RP),                     \
77         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W),                    \
78         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R),                    \
79         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P),                    \
80         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P),                    \
81         DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD),                 \
82         DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD),                 \
83         DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD),                    \
84         DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT),                   \
85         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT),                   \
86         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV),                    \
87         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE),                   \
88         DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST),                   \
89         DEFINE_REG(TEGRA_EMC_BASE, EMC_QSAFE),                  \
90         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV),                    \
91         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH),                \
92         DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM),      \
93         DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT),    \
94         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR),                \
95         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD),                \
96         DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN),              \
97         DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN),               \
98         DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN),                \
99         DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN),                \
100         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR),                   \
101         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL),                \
102         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE),                   \
103         DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW),                   \
104         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB),                  \
105         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE),             \
106         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP),               \
107         DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW),                 \
108         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_EXTRA),             \
109         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG6),              \
110         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE),              \
111         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ),               \
112         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5),              \
113         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL),            \
114         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD),     \
115         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS0),         \
116         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS1),         \
117         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS2),         \
118         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS3),         \
119         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4),         \
120         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5),         \
121         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6),         \
122         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7),         \
123         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE0),        \
124         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE1),        \
125         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE2),        \
126         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE3),        \
127         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4),        \
128         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5),        \
129         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6),        \
130         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7),        \
131         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS0),        \
132         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS1),        \
133         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS2),        \
134         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS3),        \
135         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4),        \
136         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5),        \
137         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6),        \
138         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7),        \
139         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ0),          \
140         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ1),          \
141         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ2),          \
142         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ3),          \
143         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL),          \
144         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2),         \
145         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2),          \
146         DEFINE_REG(0             , EMC_XM2CLKPADCTRL),          \
147         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL),         \
148         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL),       \
149         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2),      \
150         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2QUSEPADCTRL),         \
151         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL3),         \
152         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_TERM_CTRL),          \
153         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL),          \
154         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT),          \
155         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT),           \
156         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG),        \
157         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT),                    \
158         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION),           \
159         DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL),   \
160                                                                 \
161         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG),             \
162         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
163         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD),      \
164         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP),       \
165         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC),       \
166         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS),      \
167         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW),      \
168         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD),      \
169         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE),  \
170         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE),  \
171         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R),      \
172         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W),      \
173         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W),      \
174         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R),      \
175         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS),        \
176         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS),       \
177         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0),           \
178         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE),  \
179                                                                 \
180         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE),             \
181         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_RSV),
182
183 #define DEFINE_REG(base, reg) ((base) ? (IO_ADDRESS((base)) + (reg)) : 0)
184 static void __iomem *burst_reg_addr[TEGRA30_EMC_NUM_REGS] = {
185         BURST_REG_LIST
186 };
187 #undef DEFINE_REG
188
189 #define DEFINE_REG(base, reg)   reg##_INDEX
190 enum {
191         BURST_REG_LIST
192 };
193 #undef DEFINE_REG
194
195 static int emc_num_burst_regs;
196
197 struct emc_sel {
198         struct clk      *input;
199         u32             value;
200         unsigned long   input_rate;
201 };
202
203 static struct emc_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
204 static struct tegra30_emc_table start_timing;
205 static const struct tegra30_emc_table *emc_timing;
206 static unsigned long dram_over_temp_state = DRAM_OVER_TEMP_NONE;
207
208 static const u32 *dram_to_soc_bit_map;
209 static const struct tegra30_emc_table *tegra_emc_table;
210 static int tegra_emc_table_size;
211
212 static u32 dram_dev_num;
213 static u32 emc_cfg_saved;
214 static u32 dram_type = -1;
215
216 static struct clk *emc;
217 static struct clk *bridge;
218
219 static struct {
220         cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
221         int last_sel;
222         u64 last_update;
223         u64 clkchange_count;
224         spinlock_t spinlock;
225 } emc_stats;
226
227 static DEFINE_SPINLOCK(emc_access_lock);
228
229 static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
230 static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
231 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
232
233 static inline void emc_writel(u32 val, unsigned long addr)
234 {
235         writel(val, emc_base + addr);
236         barrier();
237 }
238 static inline u32 emc_readl(unsigned long addr)
239 {
240         return readl(emc_base + addr);
241 }
242 static inline void mc_writel(u32 val, unsigned long addr)
243 {
244         writel(val, mc_base + addr);
245         barrier();
246 }
247 static inline u32 mc_readl(unsigned long addr)
248 {
249         return readl(mc_base + addr);
250 }
251
252 static void emc_last_stats_update(int last_sel)
253 {
254         unsigned long flags;
255         u64 cur_jiffies = get_jiffies_64();
256
257         spin_lock_irqsave(&emc_stats.spinlock, flags);
258
259         if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
260                 emc_stats.time_at_clock[emc_stats.last_sel] =
261                         emc_stats.time_at_clock[emc_stats.last_sel] +
262                         (cur_jiffies - emc_stats.last_update);
263
264         emc_stats.last_update = cur_jiffies;
265
266         if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
267                 emc_stats.clkchange_count++;
268                 emc_stats.last_sel = last_sel;
269         }
270         spin_unlock_irqrestore(&emc_stats.spinlock, flags);
271 }
272
273 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
274 {
275         int i;
276         for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
277                 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
278                         return 0;
279                 udelay(1);
280         }
281         return -ETIMEDOUT;
282 }
283
284 static inline void emc_timing_update(void)
285 {
286         int err;
287
288         emc_writel(0x1, EMC_TIMING_CONTROL);
289         err = wait_for_update(EMC_STATUS,
290                               EMC_STATUS_TIMING_UPDATE_STALLED, false);
291         if (err) {
292                 pr_err("%s: timing update error: %d", __func__, err);
293                 BUG();
294         }
295 }
296
297 static inline void auto_cal_disable(void)
298 {
299         int err;
300
301         emc_writel(0, EMC_AUTO_CAL_INTERVAL);
302         err = wait_for_update(EMC_AUTO_CAL_STATUS,
303                               EMC_AUTO_CAL_STATUS_ACTIVE, false);
304         if (err) {
305                 pr_err("%s: disable auto-cal error: %d", __func__, err);
306                 BUG();
307         }
308 }
309
310 static inline void set_over_temp_timing(
311         const struct tegra30_emc_table *next_timing, unsigned long state)
312 {
313 #define REFRESH_SPEEDUP(val)                                                  \
314         do {                                                                  \
315                 val = ((val) & 0xFFFF0000) | (((val) & 0xFFFF) >> 2);         \
316         } while (0)
317
318         u32 ref = next_timing->burst_regs[EMC_REFRESH_INDEX];
319         u32 pre_ref = next_timing->burst_regs[EMC_PRE_REFRESH_REQ_CNT_INDEX];
320         u32 dsr_cntrl = next_timing->burst_regs[EMC_DYN_SELF_REF_CONTROL_INDEX];
321
322         switch (state) {
323         case DRAM_OVER_TEMP_NONE:
324                 break;
325         case DRAM_OVER_TEMP_REFRESH_X2:
326         case DRAM_OVER_TEMP_REFRESH_X4:
327         case DRAM_OVER_TEMP_THROTTLE:
328                 REFRESH_SPEEDUP(ref);
329                 REFRESH_SPEEDUP(pre_ref);
330                 REFRESH_SPEEDUP(dsr_cntrl);
331                 break;
332         default:
333                 pr_err("%s: Failed to set dram over temp state %lu\n",
334                        __func__, state);
335                 BUG();
336         }
337
338         __raw_writel(ref, burst_reg_addr[EMC_REFRESH_INDEX]);
339         __raw_writel(pre_ref, burst_reg_addr[EMC_PRE_REFRESH_REQ_CNT_INDEX]);
340         __raw_writel(dsr_cntrl, burst_reg_addr[EMC_DYN_SELF_REF_CONTROL_INDEX]);
341 }
342
343 static inline void set_mc_arbiter_limits(void)
344 {
345         u32 reg = mc_readl(MC_EMEM_ARB_OUTSTANDING_REQ);
346         u32 max_val = 0x50 << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
347
348         if (!(reg & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) ||
349             ((reg & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > max_val)) {
350                 reg = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE |
351                         MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | max_val;
352                 mc_writel(reg, MC_EMEM_ARB_OUTSTANDING_REQ);
353                 mc_writel(0x1, MC_TIMING_CONTROL);
354         }
355 }
356
357 static inline void disable_early_ack(u32 mc_override)
358 {
359         static u32 override_val;
360
361         override_val = mc_override & (~MC_EMEM_ARB_OVERRIDE_EACK_MASK);
362         mc_writel(override_val, MC_EMEM_ARB_OVERRIDE);
363         __cpuc_flush_dcache_area(&override_val, sizeof(override_val));
364         outer_clean_range(__pa(&override_val), __pa(&override_val + 1));
365         override_val |= mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK;
366 }
367
368 static inline void enable_early_ack(u32 mc_override)
369 {
370         mc_writel((mc_override | MC_EMEM_ARB_OVERRIDE_EACK_MASK),
371                         MC_EMEM_ARB_OVERRIDE);
372 }
373
374 static inline bool dqs_preset(const struct tegra30_emc_table *next_timing,
375                               const struct tegra30_emc_table *last_timing)
376 {
377         bool ret = false;
378
379 #define DQS_SET(reg, bit)                                                     \
380         do {                                                                  \
381                 if ((next_timing->burst_regs[EMC_##reg##_INDEX] &             \
382                      EMC_##reg##_##bit##_ENABLE) &&                           \
383                     (!(last_timing->burst_regs[EMC_##reg##_INDEX] &           \
384                        EMC_##reg##_##bit##_ENABLE)))   {                      \
385                         emc_writel(last_timing->burst_regs[EMC_##reg##_INDEX] \
386                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg);  \
387                         ret = true;                                           \
388                 }                                                             \
389         } while (0)
390
391         DQS_SET(XM2DQSPADCTRL2, VREF);
392         DQS_SET(XM2DQSPADCTRL3, VREF);
393         DQS_SET(XM2QUSEPADCTRL, IVREF);
394
395         return ret;
396 }
397
398 static inline void overwrite_mrs_wait_cnt(
399         const struct tegra30_emc_table *next_timing,
400         bool zcal_long)
401 {
402         u32 reg;
403         u32 cnt = 512;
404
405         /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
406            for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
407            expected operation length. Reduce the latter by the overlapping
408            zq-calibration, if any */
409         if (zcal_long)
410                 cnt -= dram_dev_num * 256;
411
412         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
413                 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
414                 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
415         if (cnt < reg)
416                 cnt = reg;
417
418         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
419                 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
420         reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
421                 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
422
423         emc_writel(reg, EMC_MRS_WAIT_CNT);
424 }
425
426 static inline bool need_qrst(const struct tegra30_emc_table *next_timing,
427                              const struct tegra30_emc_table *last_timing,
428                              u32 emc_dpd_reg)
429 {
430         u32 last_mode = (last_timing->burst_regs[EMC_FBIO_CFG5_INDEX] &
431                 EMC_CFG5_QUSE_MODE_MASK) >> EMC_CFG5_QUSE_MODE_SHIFT;
432         u32 next_mode = (next_timing->burst_regs[EMC_FBIO_CFG5_INDEX] &
433                 EMC_CFG5_QUSE_MODE_MASK) >> EMC_CFG5_QUSE_MODE_SHIFT;
434
435         /* QUSE DPD is disabled */
436         bool ret = !(emc_dpd_reg & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) &&
437
438         /* QUSE uses external mode before or after clock change */
439                 (((last_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN) &&
440                   (last_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK)) ||
441                  ((next_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN) &&
442                   (next_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK)))  &&
443
444         /* QUSE pad switches from schmitt to vref mode */
445                 (((last_timing->burst_regs[EMC_XM2QUSEPADCTRL_INDEX] &
446                    EMC_XM2QUSEPADCTRL_IVREF_ENABLE) == 0) &&
447                  ((next_timing->burst_regs[EMC_XM2QUSEPADCTRL_INDEX] &
448                    EMC_XM2QUSEPADCTRL_IVREF_ENABLE) != 0));
449
450         return ret;
451 }
452
453 static inline void periodic_qrst_enable(u32 emc_cfg_reg, u32 emc_dbg_reg)
454 {
455         /* enable write mux => enable periodic QRST => restore mux */
456         emc_writel(emc_dbg_reg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
457         emc_writel(emc_cfg_reg | EMC_CFG_PERIODIC_QRST, EMC_CFG);
458         emc_writel(emc_dbg_reg, EMC_DBG);
459 }
460
461 static inline int get_dll_change(const struct tegra30_emc_table *next_timing,
462                                  const struct tegra30_emc_table *last_timing)
463 {
464         bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
465         bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
466
467         if (next_dll_enabled == last_dll_enabled)
468                 return DLL_CHANGE_NONE;
469         else if (next_dll_enabled)
470                 return DLL_CHANGE_ON;
471         else
472                 return DLL_CHANGE_OFF;
473 }
474
475 static inline void set_dram_mode(const struct tegra30_emc_table *next_timing,
476                                  const struct tegra30_emc_table *last_timing,
477                                  int dll_change)
478 {
479         if (dram_type == DRAM_TYPE_DDR3) {
480                 /* first mode_1, then mode_2, then mode_reset*/
481                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
482                         emc_writel(next_timing->emc_mode_1, EMC_EMRS);
483                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
484                         emc_writel(next_timing->emc_mode_2, EMC_EMRS);
485
486                 if ((next_timing->emc_mode_reset !=
487                      last_timing->emc_mode_reset) ||
488                     (dll_change == DLL_CHANGE_ON))
489                 {
490                         u32 reg = next_timing->emc_mode_reset &
491                                 (~EMC_MODE_SET_DLL_RESET);
492                         if (dll_change == DLL_CHANGE_ON) {
493                                 reg |= EMC_MODE_SET_DLL_RESET;
494                                 reg |= EMC_MODE_SET_LONG_CNT;
495                         }
496                         emc_writel(reg, EMC_MRS);
497                 }
498         } else {
499                 /* first mode_2, then mode_1; mode_reset is not applicable */
500                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
501                         emc_writel(next_timing->emc_mode_2, EMC_MRW);
502                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
503                         emc_writel(next_timing->emc_mode_1, EMC_MRW);
504         }
505 }
506
507 static inline void do_clock_change(u32 clk_setting)
508 {
509         int err;
510
511         mc_readl(MC_EMEM_ADR_CFG);      /* completes prev writes */
512         writel(clk_setting, clk_base + emc->reg);
513         readl(clk_base + emc->reg);/* completes prev write */
514
515         err = wait_for_update(EMC_INTSTATUS,
516                               EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
517         if (err) {
518                 pr_err("%s: clock change completion error: %d", __func__, err);
519                 BUG();
520         }
521 }
522
523 static noinline void emc_set_clock(const struct tegra30_emc_table *next_timing,
524                                    const struct tegra30_emc_table *last_timing,
525                                    u32 clk_setting)
526 {
527         int i, dll_change, pre_wait;
528         bool dyn_sref_enabled, vref_cal_toggle, qrst_used, zcal_long;
529
530         u32 mc_override = mc_readl(MC_EMEM_ARB_OVERRIDE);
531         u32 emc_cfg_reg = emc_readl(EMC_CFG);
532         u32 emc_dbg_reg = emc_readl(EMC_DBG);
533
534         dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
535         dll_change = get_dll_change(next_timing, last_timing);
536         zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
537                 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
538
539         /* FIXME: remove steps enumeration below? */
540
541         /* 1. clear clkchange_complete interrupts */
542         emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
543
544         /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
545            possible self-refresh entry/exit and/or dqs vref settled - waiting
546            before the clock change decreases worst case change stall time */
547         pre_wait = 0;
548         if (dyn_sref_enabled) {
549                 emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
550                 emc_writel(emc_cfg_reg, EMC_CFG);
551                 pre_wait = 5;           /* 5us+ for self-refresh entry/exit */
552         }
553
554         /* 2.25 update MC arbiter settings */
555         set_mc_arbiter_limits();
556         if (mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK)
557                 disable_early_ack(mc_override);
558
559         /* 2.5 check dq/dqs vref delay */
560         if (dqs_preset(next_timing, last_timing)) {
561                 if (pre_wait < 3)
562                         pre_wait = 3;   /* 3us+ for dqs vref settled */
563         }
564         if (pre_wait) {
565                 emc_timing_update();
566                 udelay(pre_wait);
567         }
568
569         /* 3. disable auto-cal if vref mode is switching */
570         vref_cal_toggle = (next_timing->emc_acal_interval != 0) &&
571                 ((next_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX] ^
572                   last_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX]) &
573                  EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE);
574         if (vref_cal_toggle)
575                 auto_cal_disable();
576
577         /* 4. program burst shadow registers */
578         for (i = 0; i < emc_num_burst_regs; i++) {
579                 if (!burst_reg_addr[i])
580                         continue;
581                 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
582         }
583         if ((dram_type == DRAM_TYPE_LPDDR2) &&
584             (dram_over_temp_state != DRAM_OVER_TEMP_NONE))
585                 set_over_temp_timing(next_timing, dram_over_temp_state);
586         wmb();
587         barrier();
588
589         /* On ddr3 when DLL is re-started predict MRS long wait count and
590            overwrite DFS table setting */
591         if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
592                 overwrite_mrs_wait_cnt(next_timing, zcal_long);
593
594         /* the last read below makes sure prev writes are completed */
595         qrst_used = need_qrst(next_timing, last_timing,
596                               emc_readl(EMC_SEL_DPD_CTRL));
597
598         /* 5. flow control marker 1 (no EMC read access after this) */
599         emc_writel(1, EMC_STALL_BEFORE_CLKCHANGE);
600
601         /* 6. enable periodic QRST */
602         if (qrst_used)
603                 periodic_qrst_enable(emc_cfg_reg, emc_dbg_reg);
604
605         /* 6.1 disable auto-refresh to save time after clock change */
606         emc_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
607
608         /* 7. turn Off dll and enter self-refresh on DDR3 */
609         if (dram_type == DRAM_TYPE_DDR3) {
610                 if (dll_change == DLL_CHANGE_OFF)
611                         emc_writel(next_timing->emc_mode_1, EMC_EMRS);
612                 emc_writel(DRAM_BROADCAST(dram_dev_num) |
613                            EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
614         }
615
616         /* 8. flow control marker 2 */
617         emc_writel(1, EMC_STALL_AFTER_CLKCHANGE);
618
619         /* 8.1 enable write mux, update unshadowed pad control */
620         emc_writel(emc_dbg_reg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
621         emc_writel(next_timing->burst_regs[EMC_XM2CLKPADCTRL_INDEX],
622                    EMC_XM2CLKPADCTRL);
623
624         /* 9. restore periodic QRST, and disable write mux */
625         if ((qrst_used) || (next_timing->emc_periodic_qrst !=
626                             last_timing->emc_periodic_qrst)) {
627                 emc_cfg_reg = next_timing->emc_periodic_qrst ?
628                         emc_cfg_reg | EMC_CFG_PERIODIC_QRST :
629                         emc_cfg_reg & (~EMC_CFG_PERIODIC_QRST);
630                 emc_writel(emc_cfg_reg, EMC_CFG);
631         }
632         emc_writel(emc_dbg_reg, EMC_DBG);
633
634         /* 10. exit self-refresh on DDR3 */
635         if (dram_type == DRAM_TYPE_DDR3)
636                 emc_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
637
638         /* 11. set dram mode registers */
639         set_dram_mode(next_timing, last_timing, dll_change);
640
641         /* 12. issue zcal command if turning zcal On */
642         if (zcal_long) {
643                 emc_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
644                 if (dram_dev_num > 1)
645                         emc_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
646         }
647
648         /* 13. flow control marker 3 */
649         emc_writel(1, EMC_UNSTALL_RW_AFTER_CLKCHANGE);
650
651         /* 14. read any MC register to ensure the programming is done
652                change EMC clock source register (EMC read access restored)
653                wait for clk change completion */
654         do_clock_change(clk_setting);
655
656         /* 14.1 re-enable auto-refresh */
657         emc_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
658
659         /* 15. restore auto-cal */
660         if (vref_cal_toggle)
661                 emc_writel(next_timing->emc_acal_interval,
662                            EMC_AUTO_CAL_INTERVAL);
663
664         /* 16. restore dynamic self-refresh */
665         if (next_timing->rev >= 0x32)
666                 dyn_sref_enabled = next_timing->emc_dsr;
667         if (dyn_sref_enabled) {
668                 emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
669                 emc_writel(emc_cfg_reg, EMC_CFG);
670         }
671
672         /* 17. set zcal wait count */
673         if (zcal_long)
674                 emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
675
676         /* 18. update restored timing */
677         udelay(2);
678         emc_timing_update();
679
680         /* 18.a restore early ACK */
681         mc_writel(mc_override, MC_EMEM_ARB_OVERRIDE);
682 }
683
684 static inline void emc_get_timing(struct tegra30_emc_table *timing)
685 {
686         int i;
687
688         for (i = 0; i < emc_num_burst_regs; i++) {
689                 if (burst_reg_addr[i])
690                         timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
691                 else
692                         timing->burst_regs[i] = 0;
693         }
694         timing->emc_acal_interval = 0;
695         timing->emc_zcal_cnt_long = 0;
696         timing->emc_mode_reset = 0;
697         timing->emc_mode_1 = 0;
698         timing->emc_mode_2 = 0;
699         timing->emc_periodic_qrst = (emc_readl(EMC_CFG) &
700                                      EMC_CFG_PERIODIC_QRST) ? 1 : 0;
701 }
702
703 /* After deep sleep EMC power features are not restored.
704  * Do it at run-time after the 1st clock change.
705  */
706 static inline void emc_cfg_power_restore(void)
707 {
708         u32 reg = emc_readl(EMC_CFG);
709         u32 pwr_mask = EMC_CFG_PWR_MASK;
710
711         if (tegra_emc_table[0].rev >= 0x32)
712                 pwr_mask &= ~EMC_CFG_DYN_SREF_ENABLE;
713
714         if ((reg ^ emc_cfg_saved) & pwr_mask) {
715                 reg = (reg & (~pwr_mask)) | (emc_cfg_saved & pwr_mask);
716                 emc_writel(reg, EMC_CFG);
717                 emc_timing_update();
718         }
719 }
720
721 /* The EMC registers have shadow registers. When the EMC clock is updated
722  * in the clock controller, the shadow registers are copied to the active
723  * registers, allowing glitchless memory bus frequency changes.
724  * This function updates the shadow registers for a new clock frequency,
725  * and relies on the clock lock on the emc clock to avoid races between
726  * multiple frequency changes */
727 static int emc_set_rate(unsigned long rate, bool use_backup)
728 {
729         int i;
730         u32 clk_setting;
731         const struct tegra30_emc_table *last_timing;
732         unsigned long flags;
733
734         if (!tegra_emc_table)
735                 return -EINVAL;
736
737         /* Table entries specify rate in kHz */
738         rate = rate / 1000;
739
740         for (i = 0; i < tegra_emc_table_size; i++) {
741                 if (tegra_emc_clk_sel[i].input == NULL)
742                         continue;       /* invalid entry */
743
744                 if (tegra_emc_table[i].rate == rate)
745                         break;
746         }
747
748         if (i >= tegra_emc_table_size)
749                 return -EINVAL;
750
751         if (!emc_timing) {
752                 /* can not assume that boot timing matches dfs table even
753                    if boot frequency matches one of the table nodes */
754                 emc_get_timing(&start_timing);
755                 last_timing = &start_timing;
756         }
757         else
758                 last_timing = emc_timing;
759
760         clk_setting = use_backup ? emc->shared_bus_backup.value :
761                 tegra_emc_clk_sel[i].value;
762
763         spin_lock_irqsave(&emc_access_lock, flags);
764         emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
765         if (!emc_timing)
766                 emc_cfg_power_restore();
767         emc_timing = &tegra_emc_table[i];
768         spin_unlock_irqrestore(&emc_access_lock, flags);
769
770         emc_last_stats_update(i);
771
772         pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
773
774         return 0;
775 }
776
777 int tegra_emc_set_rate(unsigned long rate)
778 {
779         return emc_set_rate(rate, false);
780 }
781
782 int tegra_emc_backup(unsigned long rate)
783 {
784         BUG_ON(rate != emc->shared_bus_backup.bus_rate);
785         return emc_set_rate(rate, true);
786 }
787
788 /* Select the closest EMC rate that is higher than the requested rate */
789 long tegra_emc_round_rate(unsigned long rate)
790 {
791         int i;
792         int best = -1;
793         unsigned long distance = ULONG_MAX;
794
795         if (!tegra_emc_table)
796                 return clk_get_rate_locked(emc); /* no table - no rate change */
797
798         if (!emc_enable)
799                 return -EINVAL;
800
801         pr_debug("%s: %lu\n", __func__, rate);
802
803         /* Table entries specify rate in kHz */
804         rate = rate / 1000;
805
806         for (i = 0; i < tegra_emc_table_size; i++) {
807                 if (tegra_emc_clk_sel[i].input == NULL)
808                         continue;       /* invalid entry */
809
810                 if (tegra_emc_table[i].rate >= rate &&
811                     (tegra_emc_table[i].rate - rate) < distance) {
812                         distance = tegra_emc_table[i].rate - rate;
813                         best = i;
814                 }
815         }
816
817         if (best < 0)
818                 return -EINVAL;
819
820         pr_debug("%s: using %lu\n", __func__, tegra_emc_table[best].rate);
821
822         return tegra_emc_table[best].rate * 1000;
823 }
824
825 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
826 {
827         int i;
828
829         if (!tegra_emc_table)
830                 return ERR_PTR(-ENOENT);
831
832         pr_debug("%s: %lu\n", __func__, rate);
833
834         /* Table entries specify rate in kHz */
835         rate = rate / 1000;
836
837         for (i = 0; i < tegra_emc_table_size; i++) {
838                 if (tegra_emc_table[i].rate == rate) {
839                         struct clk *p = tegra_emc_clk_sel[i].input;
840
841                         *div_value = (tegra_emc_clk_sel[i].value &
842                                 EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
843                         if (tegra_emc_clk_sel[i].input_rate != clk_get_rate(p))
844                                 return NULL;
845
846                         return p;
847                 }
848         }
849         return ERR_PTR(-ENOENT);
850 }
851
852 int find_matching_input(unsigned long table_rate, bool mc_same_freq,
853                         struct emc_sel *emc_clk_sel, struct clk *cbus)
854 {
855         u32 div_value = 0;
856         unsigned long input_rate = 0;
857         const struct clk_mux_sel *sel;
858         const struct clk_mux_sel *parent_sel = NULL;
859         const struct clk_mux_sel *backup_sel = NULL;
860
861         /* Table entries specify rate in kHz */
862         table_rate *= 1000;
863
864         for (sel = emc->inputs; sel->input != NULL; sel++) {
865                 if (sel->input == emc->shared_bus_backup.input) {
866                         backup_sel = sel;
867                         continue;       /* skip backup souce */
868                 }
869
870                 if (sel->input == emc->parent)
871                         parent_sel = sel;
872
873                 input_rate = clk_get_rate(sel->input);
874
875                 if ((input_rate >= table_rate) &&
876                      (input_rate % table_rate == 0)) {
877                         div_value = 2 * input_rate / table_rate - 2;
878                         break;
879                 }
880         }
881
882 #ifdef CONFIG_TEGRA_PLLM_RESTRICTED
883         /*
884          * When match not found, check if this rate can be backed-up by cbus
885          * Then, we will be able to re-lock boot parent PLLM, and use it as
886          * an undivided source. Backup is supported only on LPDDR2 platforms
887          * with restricted PLLM usage. Just one backup entry is recognized,
888          * and it must be between EMC maximum and half maximum rates.
889          */
890         if ((dram_type == DRAM_TYPE_LPDDR2) && (sel->input == NULL) &&
891             (emc->shared_bus_backup.bus_rate == 0) && cbus) {
892                 BUG_ON(!parent_sel || !backup_sel);
893
894                 if ((table_rate == clk_round_rate(cbus, table_rate)) &&
895                     (table_rate < clk_get_max_rate(emc)) &&
896                     (table_rate >= clk_get_max_rate(emc) / 2)) {
897                         emc->shared_bus_backup.bus_rate = table_rate;
898
899                         /* Get ready emc clock backup selection settings */
900                         emc->shared_bus_backup.value =
901                                 (backup_sel->value << EMC_CLK_SOURCE_SHIFT) |
902                                 (cbus->div << EMC_CLK_DIV_SHIFT) |
903                                 (mc_same_freq ? EMC_CLK_MC_SAME_FREQ : 0);
904
905                         /* Select undivided PLLM as regular source */
906                         sel = parent_sel;
907                         input_rate = table_rate;
908                         div_value = 0;
909                 }
910         }
911 #endif
912
913         if (sel->input) {
914                 emc_clk_sel->input = sel->input;
915                 emc_clk_sel->input_rate = input_rate;
916
917                 /* Get ready emc clock selection settings for this table rate */
918                 emc_clk_sel->value = sel->value << EMC_CLK_SOURCE_SHIFT;
919                 emc_clk_sel->value |= (div_value << EMC_CLK_DIV_SHIFT);
920                 if ((div_value == 0) && (emc_clk_sel->input == emc->parent))
921                         emc_clk_sel->value |= EMC_CLK_LOW_JITTER_ENABLE;
922                 if (mc_same_freq)
923                         emc_clk_sel->value |= EMC_CLK_MC_SAME_FREQ;
924                 return 0;
925         }
926         return -EINVAL;
927 }
928
929 static void adjust_emc_dvfs_table(const struct tegra30_emc_table *table,
930                                   int table_size)
931 {
932         int i, j;
933         unsigned long rate;
934
935         if (table[0].rev < 0x33)
936                 return;
937
938         for (i = 0; i < MAX_DVFS_FREQS; i++) {
939                 int mv = emc->dvfs->millivolts[i];
940                 if (!mv)
941                         break;
942
943                 /* For each dvfs voltage find maximum supported rate;
944                    use 1MHz placeholder if not found */
945                 for (rate = 1000, j = 0; j < table_size; j++) {
946                         if (tegra_emc_clk_sel[j].input == NULL)
947                                 continue;       /* invalid entry */
948
949                         if ((mv >= table[j].emc_min_mv) &&
950                             (rate < table[j].rate))
951                                 rate = table[j].rate;
952                 }
953                 /* Table entries specify rate in kHz */
954                 emc->dvfs->freqs[i] = rate * 1000;
955         }
956 }
957
958 static bool is_emc_bridge(void)
959 {
960         int mv;
961         unsigned long rate;
962
963         bridge = tegra_get_clock_by_name("bridge.emc");
964         BUG_ON(!bridge);
965
966         /* LPDDR2 does not need a bridge entry in DFS table: just lock bridge
967            rate at minimum so it won't interfere with emc bus operations */
968         if (dram_type == DRAM_TYPE_LPDDR2) {
969                 clk_set_rate(bridge, 0);
970                 return true;
971         }
972
973         /* DDR3 requires EMC DFS table to include a bridge entry with frequency
974            above minimum bridge threshold, and voltage below bridge threshold */
975         rate = clk_round_rate(bridge, TEGRA_EMC_BRIDGE_RATE_MIN);
976         if (IS_ERR_VALUE(rate))
977                 return false;
978
979         mv = tegra_dvfs_predict_peak_millivolts(emc, rate);
980         if (IS_ERR_VALUE(mv) || (mv > TEGRA_EMC_BRIDGE_MVOLTS_MIN))
981                 return false;
982
983         if (clk_set_rate(bridge, rate))
984                 return false;
985
986         return true;
987 }
988
989 static int tegra_emc_suspend_notify(struct notifier_block *nb,
990                                 unsigned long event, void *data)
991 {
992         if (event != PM_SUSPEND_PREPARE)
993                 return NOTIFY_OK;
994
995         if (dram_type == DRAM_TYPE_DDR3) {
996                 if (clk_prepare_enable(bridge)) {
997                         pr_info("Tegra emc suspend:"
998                                 " failed to enable bridge.emc\n");
999                         return NOTIFY_STOP;
1000                 }
1001                 pr_info("Tegra emc suspend: enabled bridge.emc\n");
1002         }
1003         return NOTIFY_OK;
1004 };
1005 static struct notifier_block tegra_emc_suspend_nb = {
1006         .notifier_call = tegra_emc_suspend_notify,
1007         .priority = 2,
1008 };
1009
1010 static int tegra_emc_resume_notify(struct notifier_block *nb,
1011                                 unsigned long event, void *data)
1012 {
1013         if (event != PM_POST_SUSPEND)
1014                 return NOTIFY_OK;
1015
1016         if (dram_type == DRAM_TYPE_DDR3) {
1017                 clk_disable_unprepare(bridge);
1018                 pr_info("Tegra emc resume: disabled bridge.emc\n");
1019         }
1020         return NOTIFY_OK;
1021 };
1022 static struct notifier_block tegra_emc_resume_nb = {
1023         .notifier_call = tegra_emc_resume_notify,
1024         .priority = -1,
1025 };
1026
1027 static int tegra_emc_get_table_ns_per_tick(unsigned int emc_rate,
1028                                         unsigned int table_tick_len)
1029 {
1030         unsigned int ns_per_tick = 0;
1031         unsigned int mc_period_10ns = 0;
1032         unsigned int reg;
1033
1034         reg = mc_readl(MC_EMEM_ARB_MISC0) & MC_EMEM_ARB_MISC0_EMC_SAME_FREQ;
1035
1036         mc_period_10ns = ((reg ? (NSEC_PER_MSEC * 10) : (20 * NSEC_PER_MSEC)) /
1037                         (emc_rate));
1038         ns_per_tick = ((table_tick_len & MC_EMEM_ARB_CFG_CYCLE_MASK)
1039                 * mc_period_10ns) / (10 *
1040                 (1 + ((table_tick_len & MC_EMEM_ARB_CFG_EXTRA_TICK_MASK)
1041                 >> MC_EMEM_ARB_CFG_EXTRA_TICK_SHIFT)));
1042
1043         /* round new_ns_per_tick to 30/60 */
1044         if (ns_per_tick < 45)
1045                 ns_per_tick = 30;
1046         else
1047                 ns_per_tick = 60;
1048
1049         return ns_per_tick;
1050 }
1051
1052 #ifdef CONFIG_OF
1053 static struct device_node *tegra_emc_ramcode_devnode(struct device_node *np)
1054 {
1055         struct device_node *iter;
1056         u32 reg;
1057
1058         for_each_child_of_node(np, iter) {
1059                 if (of_property_read_u32(np, "nvidia,ram-code", &reg))
1060                         continue;
1061                 if (reg == tegra_get_bct_strapping())
1062                         return of_node_get(iter);
1063         }
1064
1065         return NULL;
1066 }
1067
1068 static struct tegra30_emc_pdata *tegra_emc_dt_parse_pdata(
1069                 struct platform_device *pdev)
1070 {
1071         struct device_node *np = pdev->dev.of_node;
1072         struct device_node *tnp, *iter;
1073         struct tegra30_emc_pdata *pdata;
1074         int ret, i, num_tables;
1075         u32 tegra_bct_strapping;
1076
1077         if (!np)
1078                 return NULL;
1079
1080         tegra_bct_strapping = tegra_get_bct_strapping();
1081         if (of_find_property(np, "nvidia,use-ram-code", NULL)) {
1082                 tnp = tegra_emc_ramcode_devnode(np);
1083                 if (!tnp)
1084                         dev_warn(&pdev->dev,
1085                                 "can't find emc table for ram-code 0x%02x\n",
1086                                         tegra_bct_strapping);
1087         } else
1088                 tnp = of_node_get(np);
1089
1090         if (!tnp)
1091                 return NULL;
1092
1093         num_tables = 0;
1094         for_each_child_of_node(tnp, iter)
1095                 if (of_device_is_compatible(iter, "nvidia,tegra30-emc-table"))
1096                         num_tables++;
1097
1098         if (!num_tables) {
1099                 pdata = NULL;
1100                 goto out;
1101         }
1102
1103         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1104         pdata->tables = devm_kzalloc(&pdev->dev,
1105                                 sizeof(*pdata->tables) * num_tables,
1106                                         GFP_KERNEL);
1107
1108         i = 0;
1109         for_each_child_of_node(tnp, iter) {
1110                 u32 u;
1111                 int num_burst_regs;
1112                 struct property *prop;
1113
1114                 ret = of_property_read_u32(iter, "nvidia,revision", &u);
1115                 if (ret) {
1116                         dev_err(&pdev->dev, "no revision in %s\n",
1117                                 iter->full_name);
1118                         continue;
1119                 }
1120                 pdata->tables[i].rev = u;
1121
1122                 ret = of_property_read_u32(iter, "clock-frequency", &u);
1123                 if (ret) {
1124                         dev_err(&pdev->dev, "no clock-frequency in %s\n",
1125                                 iter->full_name);
1126                         continue;
1127                 }
1128                 pdata->tables[i].rate = u;
1129
1130                 prop = of_find_property(iter, "nvidia,emc-registers", NULL);
1131                 if (!prop)
1132                         continue;
1133
1134                 num_burst_regs = prop->length / sizeof(u);
1135
1136                 ret = of_property_read_u32_array(iter, "nvidia,emc-registers",
1137                                                 pdata->tables[i].burst_regs,
1138                                                         num_burst_regs);
1139                 if (ret) {
1140                         dev_err(&pdev->dev,
1141                                 "malformed emc-registers property in %s\n",
1142                                 iter->full_name);
1143                         continue;
1144                 }
1145
1146                 of_property_read_u32(iter, "nvidia,emc-zcal-cnt-long",
1147                                         &pdata->tables[i].emc_zcal_cnt_long);
1148                 of_property_read_u32(iter, "nvidia,emc-acal-interval",
1149                                         &pdata->tables[i].emc_acal_interval);
1150                 of_property_read_u32(iter, "nvidia,emc-periodic-qrst",
1151                                         &pdata->tables[i].emc_periodic_qrst);
1152                 of_property_read_u32(iter, "nvidia,emc-mode-reset",
1153                                         &pdata->tables[i].emc_mode_reset);
1154                 of_property_read_u32(iter, "nvidia,emc-mode-1",
1155                                         &pdata->tables[i].emc_mode_1);
1156                 of_property_read_u32(iter, "nvidia,emc-mode-2",
1157                                         &pdata->tables[i].emc_mode_2);
1158                 of_property_read_u32(iter, "nvidia,emc-dsr",
1159                                         &pdata->tables[i].emc_dsr);
1160
1161                 ret = of_property_read_u32(iter, "nvidia,emc-min-mv", &u);
1162                 if (!ret)
1163                         pdata->tables[i].emc_min_mv = u;
1164
1165                 i++;
1166         }
1167         pdata->num_tables = i;
1168
1169 out:
1170         of_node_put(tnp);
1171         return pdata;
1172 }
1173 #else
1174 static struct tegra_emc_pdata *tegra_emc_dt_parse_pdata(
1175                                         struct platform_device *pdev)
1176 {
1177         return NULL;
1178 }
1179 #endif
1180
1181 static int __devinit tegra30_emc_probe(struct platform_device *pdev)
1182 {
1183         int i, mv;
1184         u32 reg;
1185         bool max_entry = false;
1186         unsigned long boot_rate, max_rate;
1187         struct clk *cbus = tegra_get_clock_by_name("cbus");
1188         unsigned int ns_per_tick = 0;
1189         unsigned int cur_ns_per_tick = 0;
1190         struct tegra30_emc_pdata *pdata;
1191         struct resource *res;
1192
1193         if (tegra_emc_table)
1194                 return -EINVAL;
1195
1196         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1197         if (!res) {
1198                 dev_err(&pdev->dev, "missing register base\n");
1199                 return -ENOMEM;
1200         }
1201
1202         pdata = pdev->dev.platform_data;
1203
1204         if (!pdata)
1205                 pdata = tegra_emc_dt_parse_pdata(pdev);
1206
1207         pdev->dev.platform_data = pdata;
1208
1209         emc_stats.clkchange_count = 0;
1210         spin_lock_init(&emc_stats.spinlock);
1211         emc_stats.last_update = get_jiffies_64();
1212         emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
1213
1214         boot_rate = clk_get_rate(emc) / 1000;
1215         max_rate = clk_get_max_rate(emc) / 1000;
1216
1217         if ((dram_type != DRAM_TYPE_DDR3) && (dram_type != DRAM_TYPE_LPDDR2)) {
1218                 pr_err("tegra: not supported DRAM type %u\n", dram_type);
1219                 return -ENODATA;
1220         }
1221
1222         if (emc->parent != tegra_get_clock_by_name("pll_m")) {
1223                 pr_err("tegra: boot parent %s is not supported by EMC DFS\n",
1224                         emc->parent->name);
1225                 return -ENODATA;
1226         }
1227
1228         if (!pdata || !pdata->tables || !pdata->num_tables) {
1229                 pr_err("tegra: EMC DFS table is empty\n");
1230                 return -ENODATA;
1231         }
1232
1233         tegra_emc_table_size = min(pdata->num_tables, TEGRA_EMC_TABLE_MAX_SIZE);
1234         switch (pdata->tables[0].rev) {
1235         case 0x30:
1236                 emc_num_burst_regs = 105;
1237                 break;
1238         case 0x31:
1239         case 0x32:
1240         case 0x33:
1241                 emc_num_burst_regs = 107;
1242                 break;
1243         default:
1244                 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
1245                         pdata->tables[0].rev);
1246                 return -ENODATA;
1247         }
1248
1249         /* Match EMC source/divider settings with table entries */
1250         for (i = 0; i < tegra_emc_table_size; i++) {
1251                 bool mc_same_freq = MC_EMEM_ARB_MISC0_EMC_SAME_FREQ &
1252                         pdata->tables[i].burst_regs[MC_EMEM_ARB_MISC0_INDEX];
1253                 unsigned long table_rate = pdata->tables[i].rate;
1254                 if (!table_rate)
1255                         continue;
1256
1257                 BUG_ON(pdata->tables[i].rev != pdata->tables[0].rev);
1258
1259                 if (find_matching_input(table_rate, mc_same_freq,
1260                                         &tegra_emc_clk_sel[i], cbus))
1261                         continue;
1262
1263                 if (table_rate == boot_rate)
1264                         emc_stats.last_sel = i;
1265
1266                 if (table_rate == max_rate)
1267                         max_entry = true;
1268
1269                 cur_ns_per_tick = tegra_emc_get_table_ns_per_tick(table_rate,
1270                         pdata->tables[i].burst_regs[MC_EMEM_ARB_CFG_INDEX]);
1271
1272                 if (ns_per_tick == 0) {
1273                         ns_per_tick = cur_ns_per_tick;
1274                 } else if (ns_per_tick != cur_ns_per_tick) {
1275                         pr_err("tegra: invalid EMC DFS table: "
1276                                 "mismatched DFS tick lengths "
1277                                 "within table!\n");
1278                         ns_per_tick = 0;
1279                         return -EINVAL;
1280                 }
1281         }
1282
1283         /* Validate EMC rate and voltage limits */
1284         if (!max_entry) {
1285                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
1286                        " %lu kHz is not found\n", max_rate);
1287                 return -EINVAL;
1288         }
1289
1290         tegra_latency_allowance_update_tick_length(ns_per_tick);
1291
1292         tegra_emc_table = pdata->tables;
1293
1294         adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
1295         mv = tegra_dvfs_predict_peak_millivolts(emc, max_rate * 1000);
1296         if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
1297                 tegra_emc_table = NULL;
1298                 pr_err("tegra: invalid EMC DFS table: maximum rate %lu kHz does"
1299                        " not match nominal voltage %d\n",
1300                                 max_rate, emc->dvfs->max_millivolts);
1301                 return -ENODATA;
1302         }
1303
1304         if (!is_emc_bridge()) {
1305                 tegra_emc_table = NULL;
1306                 pr_err("tegra: invalid EMC DFS table: emc bridge not found");
1307                 return -ENODATA;
1308         }
1309         pr_info("tegra: validated EMC DFS table\n");
1310
1311         /* Configure clock change mode according to dram type */
1312         reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
1313         reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
1314                 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
1315         emc_writel(reg, EMC_CFG_2);
1316
1317         register_pm_notifier(&tegra_emc_suspend_nb);
1318         register_pm_notifier(&tegra_emc_resume_nb);
1319
1320         return 0;
1321 }
1322
1323 static struct of_device_id tegra30_emc_of_match[] __devinitdata = {
1324         { .compatible = "nvidia,tegra30-emc", },
1325         { },
1326 };
1327
1328 static struct platform_driver tegra30_emc_driver = {
1329         .driver = {
1330                 .name = "tegra-emc",
1331                 .owner = THIS_MODULE,
1332                 .of_match_table = tegra30_emc_of_match,
1333         },
1334         .probe = tegra30_emc_probe,
1335 };
1336
1337 int __init tegra30_init_emc(void)
1338 {
1339         return platform_driver_register(&tegra30_emc_driver);
1340 }
1341
1342 void tegra_emc_timing_invalidate(void)
1343 {
1344         emc_timing = NULL;
1345 }
1346
1347 void tegra_init_dram_bit_map(const u32 *bit_map, int map_size)
1348 {
1349         BUG_ON(map_size != 32);
1350         dram_to_soc_bit_map = bit_map;
1351 }
1352
1353 void tegra_emc_dram_type_init(struct clk *c)
1354 {
1355         emc = c;
1356
1357         dram_type = (emc_readl(EMC_FBIO_CFG5) &
1358                      EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
1359         if (dram_type == DRAM_TYPE_DDR3)
1360                 emc->min_rate = EMC_MIN_RATE_DDR3;
1361
1362         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
1363         emc_cfg_saved = emc_readl(EMC_CFG);
1364 }
1365
1366 int tegra_emc_get_dram_type(void)
1367 {
1368         return dram_type;
1369 }
1370
1371 static u32 soc_to_dram_bit_swap(u32 soc_val, u32 dram_mask, u32 dram_shift)
1372 {
1373         int bit;
1374         u32 dram_val = 0;
1375
1376         /* tegra clocks definitions use shifted mask always */
1377         if (!dram_to_soc_bit_map)
1378                 return soc_val & dram_mask;
1379
1380         for (bit = dram_shift; bit < 32; bit++) {
1381                 u32 dram_bit_mask = 0x1 << bit;
1382                 u32 soc_bit_mask = dram_to_soc_bit_map[bit];
1383
1384                 if (!(dram_bit_mask & dram_mask))
1385                         break;
1386
1387                 if (soc_bit_mask & soc_val)
1388                         dram_val |= dram_bit_mask;
1389         }
1390
1391         return dram_val;
1392 }
1393
1394 static int emc_read_mrr(int dev, int addr)
1395 {
1396         int ret;
1397         u32 val;
1398
1399         if (dram_type != DRAM_TYPE_LPDDR2)
1400                 return -ENODEV;
1401
1402         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, false);
1403         if (ret)
1404                 return ret;
1405
1406         val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
1407         val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
1408         emc_writel(val, EMC_MRR);
1409
1410         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, true);
1411         if (ret)
1412                 return ret;
1413
1414         val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
1415         return val;
1416 }
1417
1418 int tegra_emc_get_dram_temperature(void)
1419 {
1420         int mr4;
1421         unsigned long flags;
1422
1423         spin_lock_irqsave(&emc_access_lock, flags);
1424
1425         mr4 = emc_read_mrr(0, 4);
1426         if (IS_ERR_VALUE(mr4)) {
1427                 spin_unlock_irqrestore(&emc_access_lock, flags);
1428                 return mr4;
1429         }
1430         spin_unlock_irqrestore(&emc_access_lock, flags);
1431
1432         mr4 = soc_to_dram_bit_swap(
1433                 mr4, LPDDR2_MR4_TEMP_MASK, LPDDR2_MR4_TEMP_SHIFT);
1434         return mr4;
1435 }
1436
1437 int tegra_emc_set_over_temp_state(unsigned long state)
1438 {
1439         unsigned long flags;
1440
1441         if (dram_type != DRAM_TYPE_LPDDR2)
1442                 return -ENODEV;
1443
1444         spin_lock_irqsave(&emc_access_lock, flags);
1445
1446         /* Update refresh timing if state changed */
1447         if (emc_timing && (dram_over_temp_state != state)) {
1448                 set_over_temp_timing(emc_timing, state);
1449                 emc_timing_update();
1450                 if (state != DRAM_OVER_TEMP_NONE)
1451                         emc_writel(EMC_REF_FORCE_CMD, EMC_REF);
1452                 dram_over_temp_state = state;
1453         }
1454         spin_unlock_irqrestore(&emc_access_lock, flags);
1455         return 0;
1456 }
1457
1458 /* non-zero state value will reduce eack_disable_refcnt */
1459 static int tegra_emc_set_eack_state(unsigned long state)
1460 {
1461         unsigned long flags;
1462         u32 mc_override;
1463         static int eack_disable_refcnt = 0;
1464
1465         spin_lock_irqsave(&emc_access_lock, flags);
1466
1467         /*
1468          * refcnt > 0 implies there is at least one client requiring eack
1469          * disabled. refcnt of 0 implies eack is enabled
1470          */
1471         if (eack_disable_refcnt == 1 && state) {
1472                 mc_override = mc_readl(MC_EMEM_ARB_OVERRIDE);
1473                 enable_early_ack(mc_override);
1474         } else if (eack_disable_refcnt == 0 && !state) {
1475                 mc_override = mc_readl(MC_EMEM_ARB_OVERRIDE);
1476                 disable_early_ack(mc_override);
1477         }
1478
1479         if (state) {
1480                 if (likely(eack_disable_refcnt > 0)) {
1481                         --eack_disable_refcnt;
1482                 } else {
1483                         pr_err("%s: Ignored a request to underflow eack "
1484                                 "disable reference counter\n",__func__);
1485                         dump_stack();
1486                 }
1487         } else {
1488                 ++eack_disable_refcnt;
1489         }
1490
1491         spin_unlock_irqrestore(&emc_access_lock, flags);
1492         return 0;
1493 }
1494
1495 int tegra_emc_enable_eack(void) {
1496         return tegra_emc_set_eack_state(1);
1497 }
1498
1499 int tegra_emc_disable_eack(void) {
1500         return tegra_emc_set_eack_state(0);
1501 }
1502
1503 #ifdef CONFIG_DEBUG_FS
1504
1505 static struct dentry *emc_debugfs_root;
1506
1507 static int emc_stats_show(struct seq_file *s, void *data)
1508 {
1509         int i;
1510
1511         emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
1512
1513         seq_printf(s, "%-10s %-10s \n", "rate kHz", "time");
1514         for (i = 0; i < tegra_emc_table_size; i++) {
1515                 if (tegra_emc_clk_sel[i].input == NULL)
1516                         continue;       /* invalid entry */
1517
1518                 seq_printf(s, "%-10lu %-10llu \n", tegra_emc_table[i].rate,
1519                            cputime64_to_clock_t(emc_stats.time_at_clock[i]));
1520         }
1521         seq_printf(s, "%-15s %llu\n", "transitions:",
1522                    emc_stats.clkchange_count);
1523         seq_printf(s, "%-15s %llu\n", "time-stamp:",
1524                    cputime64_to_clock_t(emc_stats.last_update));
1525
1526         return 0;
1527 }
1528
1529 static int emc_stats_open(struct inode *inode, struct file *file)
1530 {
1531         return single_open(file, emc_stats_show, inode->i_private);
1532 }
1533
1534 static const struct file_operations emc_stats_fops = {
1535         .open           = emc_stats_open,
1536         .read           = seq_read,
1537         .llseek         = seq_lseek,
1538         .release        = single_release,
1539 };
1540
1541 static int dram_temperature_get(void *data, u64 *val)
1542 {
1543         *val = tegra_emc_get_dram_temperature();
1544         return 0;
1545 }
1546 DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
1547                         NULL, "%lld\n");
1548
1549 static int over_temp_state_get(void *data, u64 *val)
1550 {
1551         *val = dram_over_temp_state;
1552         return 0;
1553 }
1554 static int over_temp_state_set(void *data, u64 val)
1555 {
1556         tegra_emc_set_over_temp_state(val);
1557         return 0;
1558 }
1559 DEFINE_SIMPLE_ATTRIBUTE(over_temp_state_fops, over_temp_state_get,
1560                         over_temp_state_set, "%llu\n");
1561
1562 static int eack_state_get(void *data, u64 *val)
1563 {
1564         unsigned long flags;
1565         u32 mc_override;
1566
1567         spin_lock_irqsave(&emc_access_lock, flags);
1568         mc_override = mc_readl(MC_EMEM_ARB_OVERRIDE);
1569         spin_unlock_irqrestore(&emc_access_lock, flags);
1570
1571         *val = (mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK);
1572         return 0;
1573 }
1574
1575 static int eack_state_set(void *data, u64 val)
1576 {
1577         tegra_emc_set_eack_state(val);
1578         return 0;
1579 }
1580 DEFINE_SIMPLE_ATTRIBUTE(eack_state_fops, eack_state_get,
1581                         eack_state_set, "%llu\n");
1582
1583 static int efficiency_get(void *data, u64 *val)
1584 {
1585         *val = tegra_emc_bw_efficiency;
1586         return 0;
1587 }
1588 static int efficiency_set(void *data, u64 val)
1589 {
1590         tegra_emc_bw_efficiency = (val > 100) ? 100 : val;
1591         if (emc)
1592                 tegra_clk_shared_bus_update(emc);
1593
1594         return 0;
1595 }
1596 DEFINE_SIMPLE_ATTRIBUTE(efficiency_fops, efficiency_get,
1597                         efficiency_set, "%llu\n");
1598
1599 static int efficiency_boost_get(void *data, u64 *val)
1600 {
1601         *val = tegra_emc_bw_efficiency_boost;
1602         return 0;
1603 }
1604 static int efficiency_boost_set(void *data, u64 val)
1605 {
1606         tegra_emc_bw_efficiency_boost = (val > 100) ? 100 : val;
1607         if (emc)
1608                 tegra_clk_shared_bus_update(emc);
1609
1610         return 0;
1611 }
1612 DEFINE_SIMPLE_ATTRIBUTE(efficiency_boost_fops, efficiency_boost_get,
1613                         efficiency_boost_set, "%llu\n");
1614
1615 static int __init tegra_emc_debug_init(void)
1616 {
1617         if (!tegra_emc_table)
1618                 return 0;
1619
1620         emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
1621         if (!emc_debugfs_root)
1622                 return -ENOMEM;
1623
1624         if (!debugfs_create_file(
1625                 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
1626                 goto err_out;
1627
1628         if (!debugfs_create_file("dram_temperature", S_IRUGO, emc_debugfs_root,
1629                                  NULL, &dram_temperature_fops))
1630                 goto err_out;
1631
1632         if (!debugfs_create_file("over_temp_state", S_IRUGO | S_IWUSR,
1633                                  emc_debugfs_root, NULL, &over_temp_state_fops))
1634                 goto err_out;
1635
1636         if (!debugfs_create_file(
1637                 "eack_state", S_IRUGO | S_IWUSR, emc_debugfs_root, NULL, &eack_state_fops))
1638                 goto err_out;
1639
1640         if (!debugfs_create_file("efficiency", S_IRUGO | S_IWUSR,
1641                                  emc_debugfs_root, NULL, &efficiency_fops))
1642                 goto err_out;
1643
1644         if (!debugfs_create_file("efficiency_boost", S_IRUGO | S_IWUSR,
1645                                  emc_debugfs_root, NULL, &efficiency_boost_fops))
1646                 goto err_out;
1647
1648         return 0;
1649
1650 err_out:
1651         debugfs_remove_recursive(emc_debugfs_root);
1652         return -ENOMEM;
1653 }
1654
1655 late_initcall(tegra_emc_debug_init);
1656 #endif