5361c1a4bd44e4013da23d6701e4a9cd18d92f68
[linux-3.10.git] / arch / arm / mach-tegra / tegra3_emc.c
1 /*
2  * arch/arm/mach-tegra/tegra3_emc.c
3  *
4  * Copyright (C) 2011 NVIDIA Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along
17  * with this program; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
19  *
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/clk.h>
24 #include <linux/err.h>
25 #include <linux/io.h>
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/platform_device.h>
29 #include <linux/platform_data/tegra30_emc.h>
30 #include <linux/suspend.h>
31 #include <linux/debugfs.h>
32 #include <linux/seq_file.h>
33
34 #include <asm/cputime.h>
35 #include <asm/cacheflush.h>
36
37 #include <mach/iomap.h>
38
39 #include "clock.h"
40 #include "dvfs.h"
41 #include "tegra3_emc.h"
42
43 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
44 static bool emc_enable = true;
45 #else
46 static bool emc_enable;
47 #endif
48 module_param(emc_enable, bool, 0644);
49
50 static struct platform_device *emc_pdev;
51 static void __iomem *emc_regbases[2];
52
53 u8 tegra_emc_bw_efficiency = 35;
54
55 #define EMC_MIN_RATE_DDR3               50000000
56 #define EMC_STATUS_UPDATE_TIMEOUT       100
57 #define TEGRA_EMC_TABLE_MAX_SIZE        16
58
59 enum {
60         DLL_CHANGE_NONE = 0,
61         DLL_CHANGE_ON,
62         DLL_CHANGE_OFF,
63 };
64
65 #define EMC_CLK_DIV_SHIFT               0
66 #define EMC_CLK_DIV_MASK                (0xFF << EMC_CLK_DIV_SHIFT)
67 #define EMC_CLK_SOURCE_SHIFT            30
68 #define EMC_CLK_SOURCE_MASK             (0x3 << EMC_CLK_SOURCE_SHIFT)
69 #define EMC_CLK_LOW_JITTER_ENABLE       (0x1 << 29)
70 #define EMC_CLK_MC_SAME_FREQ            (0x1 << 16)
71
72 #define BURST_REG_LIST \
73         DEFINE_REG(TEGRA_EMC_BASE, EMC_RC),                     \
74         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC),                    \
75         DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS),                    \
76         DEFINE_REG(TEGRA_EMC_BASE, EMC_RP),                     \
77         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W),                    \
78         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R),                    \
79         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P),                    \
80         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P),                    \
81         DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD),                 \
82         DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD),                 \
83         DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD),                    \
84         DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT),                   \
85         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT),                   \
86         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV),                    \
87         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE),                   \
88         DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST),                   \
89         DEFINE_REG(TEGRA_EMC_BASE, EMC_QSAFE),                  \
90         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV),                    \
91         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH),                \
92         DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM),      \
93         DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT),    \
94         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR),                \
95         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD),                \
96         DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN),              \
97         DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN),               \
98         DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN),                \
99         DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN),                \
100         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR),                   \
101         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL),                \
102         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE),                   \
103         DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW),                   \
104         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB),                  \
105         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE),             \
106         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP),               \
107         DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW),                 \
108         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_EXTRA),             \
109         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG6),              \
110         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE),              \
111         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ),               \
112         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5),              \
113         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL),            \
114         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD),     \
115         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS0),         \
116         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS1),         \
117         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS2),         \
118         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS3),         \
119         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4),         \
120         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5),         \
121         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6),         \
122         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7),         \
123         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE0),        \
124         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE1),        \
125         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE2),        \
126         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE3),        \
127         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4),        \
128         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5),        \
129         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6),        \
130         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7),        \
131         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS0),        \
132         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS1),        \
133         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS2),        \
134         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS3),        \
135         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4),        \
136         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5),        \
137         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6),        \
138         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7),        \
139         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ0),          \
140         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ1),          \
141         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ2),          \
142         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ3),          \
143         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL),          \
144         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2),         \
145         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2),          \
146         DEFINE_REG(0             , EMC_XM2CLKPADCTRL),          \
147         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL),         \
148         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL),       \
149         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2),      \
150         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2QUSEPADCTRL),         \
151         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL3),         \
152         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_TERM_CTRL),          \
153         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL),          \
154         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT),          \
155         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT),           \
156         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG),        \
157         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT),                    \
158         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION),           \
159         DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL),   \
160                                                                 \
161         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG),             \
162         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
163         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD),      \
164         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP),       \
165         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC),       \
166         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS),      \
167         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW),      \
168         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD),      \
169         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE),  \
170         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE),  \
171         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R),      \
172         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W),      \
173         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W),      \
174         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R),      \
175         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS),        \
176         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS),       \
177         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0),           \
178         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE),  \
179                                                                 \
180         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE),             \
181         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_RSV),
182
183 #define DEFINE_REG(base, reg) ((base) ? (IO_ADDRESS((base)) + (reg)) : 0)
184 static const void __iomem *burst_reg_addr[TEGRA_EMC_NUM_REGS] = {
185         BURST_REG_LIST
186 };
187 #undef DEFINE_REG
188
189 #define DEFINE_REG(base, reg)   reg##_INDEX
190 enum {
191         BURST_REG_LIST
192 };
193 #undef DEFINE_REG
194
195 static int emc_num_burst_regs;
196
197 static struct clk_mux_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
198 static struct tegra30_emc_table start_timing;
199 static const struct tegra30_emc_table *emc_timing;
200 static unsigned long dram_over_temp_state = DRAM_OVER_TEMP_NONE;
201
202 static const u32 *dram_to_soc_bit_map;
203
204 static u32 dram_dev_num;
205 static u32 emc_cfg_saved;
206 static u32 dram_type = -1;
207
208 static struct clk *emc;
209 static struct clk *bridge;
210
211 static struct {
212         cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
213         int last_sel;
214         u64 last_update;
215         u64 clkchange_count;
216         spinlock_t spinlock;
217 } emc_stats;
218
219 static DEFINE_SPINLOCK(emc_access_lock);
220
221 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
222
223 static inline void emc_writel(u32 val, unsigned long addr)
224 {
225         writel(val, emc_regbases[0] + addr);
226         barrier();
227 }
228 static inline u32 emc_readl(unsigned long addr)
229 {
230         return readl(emc_regbases[0] + addr);
231 }
232 static inline void mc_writel(u32 val, unsigned long addr)
233 {
234         writel(val, emc_regbases[1] + addr);
235         barrier();
236 }
237 static inline u32 mc_readl(unsigned long addr)
238 {
239         return readl(emc_regbases[1] + addr);
240 }
241
242 static void emc_last_stats_update(int last_sel)
243 {
244         unsigned long flags;
245         u64 cur_jiffies = get_jiffies_64();
246
247         spin_lock_irqsave(&emc_stats.spinlock, flags);
248
249         if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
250                 emc_stats.time_at_clock[emc_stats.last_sel] =
251                         emc_stats.time_at_clock[emc_stats.last_sel] +
252                         (cur_jiffies - emc_stats.last_update);
253
254         emc_stats.last_update = cur_jiffies;
255
256         if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
257                 emc_stats.clkchange_count++;
258                 emc_stats.last_sel = last_sel;
259         }
260         spin_unlock_irqrestore(&emc_stats.spinlock, flags);
261 }
262
263 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
264 {
265         int i;
266         for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
267                 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
268                         return 0;
269                 udelay(1);
270         }
271         return -ETIMEDOUT;
272 }
273
274 static inline void emc_timing_update(void)
275 {
276         int err;
277
278         emc_writel(0x1, EMC_TIMING_CONTROL);
279         err = wait_for_update(EMC_STATUS,
280                               EMC_STATUS_TIMING_UPDATE_STALLED, false);
281         if (err) {
282                 pr_err("%s: timing update error: %d", __func__, err);
283                 BUG();
284         }
285 }
286
287 static inline void auto_cal_disable(void)
288 {
289         int err;
290
291         emc_writel(0, EMC_AUTO_CAL_INTERVAL);
292         err = wait_for_update(EMC_AUTO_CAL_STATUS,
293                               EMC_AUTO_CAL_STATUS_ACTIVE, false);
294         if (err) {
295                 pr_err("%s: disable auto-cal error: %d", __func__, err);
296                 BUG();
297         }
298 }
299
300 static inline void set_over_temp_timing(
301         const struct tegra30_emc_table *next_timing, unsigned long state)
302 {
303 #define REFRESH_SPEEDUP(val)                                                  \
304         do {                                                                  \
305                 val = ((val) & 0xFFFF0000) | (((val) & 0xFFFF) >> 2);         \
306         } while (0)
307
308         u32 ref = next_timing->burst_regs[EMC_REFRESH_INDEX];
309         u32 pre_ref = next_timing->burst_regs[EMC_PRE_REFRESH_REQ_CNT_INDEX];
310         u32 dsr_cntrl = next_timing->burst_regs[EMC_DYN_SELF_REF_CONTROL_INDEX];
311
312         switch (state) {
313         case DRAM_OVER_TEMP_NONE:
314                 break;
315         case DRAM_OVER_TEMP_REFRESH:
316                 REFRESH_SPEEDUP(ref);
317                 REFRESH_SPEEDUP(pre_ref);
318                 REFRESH_SPEEDUP(dsr_cntrl);
319                 break;
320         default:
321                 pr_err("%s: Failed to set dram over temp state %lu\n",
322                        __func__, state);
323                 BUG();
324         }
325
326         __raw_writel(ref, burst_reg_addr[EMC_REFRESH_INDEX]);
327         __raw_writel(pre_ref, burst_reg_addr[EMC_PRE_REFRESH_REQ_CNT_INDEX]);
328         __raw_writel(dsr_cntrl, burst_reg_addr[EMC_DYN_SELF_REF_CONTROL_INDEX]);
329 }
330
331 static inline void set_mc_arbiter_limits(void)
332 {
333         u32 reg = mc_readl(MC_EMEM_ARB_OUTSTANDING_REQ);
334         u32 max_val = 0x50 << EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
335
336         if (!(reg & MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE) ||
337             ((reg & MC_EMEM_ARB_OUTSTANDING_REQ_MAX_MASK) > max_val)) {
338                 reg = MC_EMEM_ARB_OUTSTANDING_REQ_LIMIT_ENABLE |
339                         MC_EMEM_ARB_OUTSTANDING_REQ_HOLDOFF_OVERRIDE | max_val;
340                 mc_writel(reg, MC_EMEM_ARB_OUTSTANDING_REQ);
341                 mc_writel(0x1, MC_TIMING_CONTROL);
342         }
343 }
344
345 static inline void disable_early_ack(u32 mc_override)
346 {
347         static u32 override_val;
348
349         override_val = mc_override & (~MC_EMEM_ARB_OVERRIDE_EACK_MASK);
350         mc_writel(override_val, MC_EMEM_ARB_OVERRIDE);
351         __cpuc_flush_dcache_area(&override_val, sizeof(override_val));
352         outer_clean_range(__pa(&override_val), __pa(&override_val + 1));
353         override_val |= mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK;
354 }
355
356 static inline void enable_early_ack(u32 mc_override)
357 {
358         mc_writel((mc_override | MC_EMEM_ARB_OVERRIDE_EACK_MASK),
359                         MC_EMEM_ARB_OVERRIDE);
360 }
361
362 static inline bool dqs_preset(const struct tegra30_emc_table *next_timing,
363                               const struct tegra30_emc_table *last_timing)
364 {
365         bool ret = false;
366
367 #define DQS_SET(reg, bit)                                                     \
368         do {                                                                  \
369                 if ((next_timing->burst_regs[EMC_##reg##_INDEX] &             \
370                      EMC_##reg##_##bit##_ENABLE) &&                           \
371                     (!(last_timing->burst_regs[EMC_##reg##_INDEX] &           \
372                        EMC_##reg##_##bit##_ENABLE)))   {                      \
373                         emc_writel(last_timing->burst_regs[EMC_##reg##_INDEX] \
374                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg);  \
375                         ret = true;                                           \
376                 }                                                             \
377         } while (0)
378
379         DQS_SET(XM2DQSPADCTRL2, VREF);
380         DQS_SET(XM2DQSPADCTRL3, VREF);
381         DQS_SET(XM2QUSEPADCTRL, IVREF);
382
383         return ret;
384 }
385
386 static inline void overwrite_mrs_wait_cnt(
387         const struct tegra30_emc_table *next_timing,
388         bool zcal_long)
389 {
390         u32 reg;
391         u32 cnt = 512;
392
393         /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
394            for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
395            expected operation length. Reduce the latter by the overlapping
396            zq-calibration, if any */
397         if (zcal_long)
398                 cnt -= dram_dev_num * 256;
399
400         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
401                 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
402                 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
403         if (cnt < reg)
404                 cnt = reg;
405
406         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
407                 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
408         reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
409                 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
410
411         emc_writel(reg, EMC_MRS_WAIT_CNT);
412 }
413
414 static inline bool need_qrst(const struct tegra30_emc_table *next_timing,
415                              const struct tegra30_emc_table *last_timing,
416                              u32 emc_dpd_reg)
417 {
418         u32 last_mode = (last_timing->burst_regs[EMC_FBIO_CFG5_INDEX] &
419                 EMC_CFG5_QUSE_MODE_MASK) >> EMC_CFG5_QUSE_MODE_SHIFT;
420         u32 next_mode = (next_timing->burst_regs[EMC_FBIO_CFG5_INDEX] &
421                 EMC_CFG5_QUSE_MODE_MASK) >> EMC_CFG5_QUSE_MODE_SHIFT;
422
423         /* QUSE DPD is disabled */
424         bool ret = !(emc_dpd_reg & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) &&
425
426         /* QUSE uses external mode before or after clock change */
427                 (((last_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN) &&
428                   (last_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK)) ||
429                  ((next_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN) &&
430                   (next_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK)))  &&
431
432         /* QUSE pad switches from schmitt to vref mode */
433                 (((last_timing->burst_regs[EMC_XM2QUSEPADCTRL_INDEX] &
434                    EMC_XM2QUSEPADCTRL_IVREF_ENABLE) == 0) &&
435                  ((next_timing->burst_regs[EMC_XM2QUSEPADCTRL_INDEX] &
436                    EMC_XM2QUSEPADCTRL_IVREF_ENABLE) != 0));
437
438         return ret;
439 }
440
441 static inline void periodic_qrst_enable(u32 emc_cfg_reg, u32 emc_dbg_reg)
442 {
443         /* enable write mux => enable periodic QRST => restore mux */
444         emc_writel(emc_dbg_reg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
445         emc_writel(emc_cfg_reg | EMC_CFG_PERIODIC_QRST, EMC_CFG);
446         emc_writel(emc_dbg_reg, EMC_DBG);
447 }
448
449 static inline int get_dll_change(const struct tegra30_emc_table *next_timing,
450                                  const struct tegra30_emc_table *last_timing)
451 {
452         bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
453         bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
454
455         if (next_dll_enabled == last_dll_enabled)
456                 return DLL_CHANGE_NONE;
457         else if (next_dll_enabled)
458                 return DLL_CHANGE_ON;
459         else
460                 return DLL_CHANGE_OFF;
461 }
462
463 static inline void set_dram_mode(const struct tegra30_emc_table *next_timing,
464                                  const struct tegra30_emc_table *last_timing,
465                                  int dll_change)
466 {
467         if (dram_type == DRAM_TYPE_DDR3) {
468                 /* first mode_1, then mode_2, then mode_reset*/
469                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
470                         emc_writel(next_timing->emc_mode_1, EMC_EMRS);
471                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
472                         emc_writel(next_timing->emc_mode_2, EMC_EMRS);
473
474                 if ((next_timing->emc_mode_reset !=
475                      last_timing->emc_mode_reset) ||
476                     (dll_change == DLL_CHANGE_ON))
477                 {
478                         u32 reg = next_timing->emc_mode_reset &
479                                 (~EMC_MODE_SET_DLL_RESET);
480                         if (dll_change == DLL_CHANGE_ON) {
481                                 reg |= EMC_MODE_SET_DLL_RESET;
482                                 reg |= EMC_MODE_SET_LONG_CNT;
483                         }
484                         emc_writel(reg, EMC_MRS);
485                 }
486         } else {
487                 /* first mode_2, then mode_1; mode_reset is not applicable */
488                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
489                         emc_writel(next_timing->emc_mode_2, EMC_MRW);
490                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
491                         emc_writel(next_timing->emc_mode_1, EMC_MRW);
492         }
493 }
494
495 static inline void do_clock_change(u32 clk_setting)
496 {
497         int err;
498
499         mc_readl(MC_EMEM_ADR_CFG);      /* completes prev writes */
500         writel(clk_setting, clk_base + emc->reg);
501
502         err = wait_for_update(EMC_INTSTATUS,
503                               EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
504         if (err) {
505                 pr_err("%s: clock change completion error: %d", __func__, err);
506                 BUG();
507         }
508 }
509
510 static noinline void emc_set_clock(const struct tegra30_emc_table *next_timing,
511                                    const struct tegra30_emc_table *last_timing,
512                                    u32 clk_setting)
513 {
514         int i, dll_change, pre_wait;
515         bool dyn_sref_enabled, vref_cal_toggle, qrst_used, zcal_long;
516
517         u32 mc_override = mc_readl(MC_EMEM_ARB_OVERRIDE);
518         u32 emc_cfg_reg = emc_readl(EMC_CFG);
519         u32 emc_dbg_reg = emc_readl(EMC_DBG);
520
521         dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
522         dll_change = get_dll_change(next_timing, last_timing);
523         zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
524                 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
525
526         /* FIXME: remove steps enumeration below? */
527
528         /* 1. clear clkchange_complete interrupts */
529         emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
530
531         /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
532            possible self-refresh entry/exit and/or dqs vref settled - waiting
533            before the clock change decreases worst case change stall time */
534         pre_wait = 0;
535         if (dyn_sref_enabled) {
536                 emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
537                 emc_writel(emc_cfg_reg, EMC_CFG);
538                 pre_wait = 5;           /* 5us+ for self-refresh entry/exit */
539         }
540
541         /* 2.25 update MC arbiter settings */
542         set_mc_arbiter_limits();
543         if (mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK)
544                 disable_early_ack(mc_override);
545
546         /* 2.5 check dq/dqs vref delay */
547         if (dqs_preset(next_timing, last_timing)) {
548                 if (pre_wait < 3)
549                         pre_wait = 3;   /* 3us+ for dqs vref settled */
550         }
551         if (pre_wait) {
552                 emc_timing_update();
553                 udelay(pre_wait);
554         }
555
556         /* 3. disable auto-cal if vref mode is switching */
557         vref_cal_toggle = (next_timing->emc_acal_interval != 0) &&
558                 ((next_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX] ^
559                   last_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX]) &
560                  EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE);
561         if (vref_cal_toggle)
562                 auto_cal_disable();
563
564         /* 4. program burst shadow registers */
565         for (i = 0; i < emc_num_burst_regs; i++) {
566                 if (!burst_reg_addr[i])
567                         continue;
568                 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
569         }
570         if ((dram_type == DRAM_TYPE_LPDDR2) &&
571             (dram_over_temp_state != DRAM_OVER_TEMP_NONE))
572                 set_over_temp_timing(next_timing, dram_over_temp_state);
573         wmb();
574         barrier();
575
576         /* On ddr3 when DLL is re-started predict MRS long wait count and
577            overwrite DFS table setting */
578         if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
579                 overwrite_mrs_wait_cnt(next_timing, zcal_long);
580
581         /* the last read below makes sure prev writes are completed */
582         qrst_used = need_qrst(next_timing, last_timing,
583                               emc_readl(EMC_SEL_DPD_CTRL));
584
585         /* 5. flow control marker 1 (no EMC read access after this) */
586         emc_writel(1, EMC_STALL_BEFORE_CLKCHANGE);
587
588         /* 6. enable periodic QRST */
589         if (qrst_used)
590                 periodic_qrst_enable(emc_cfg_reg, emc_dbg_reg);
591
592         /* 6.1 disable auto-refresh to save time after clock change */
593         emc_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
594
595         /* 7. turn Off dll and enter self-refresh on DDR3 */
596         if (dram_type == DRAM_TYPE_DDR3) {
597                 if (dll_change == DLL_CHANGE_OFF)
598                         emc_writel(next_timing->emc_mode_1, EMC_EMRS);
599                 emc_writel(DRAM_BROADCAST(dram_dev_num) |
600                            EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
601         }
602
603         /* 8. flow control marker 2 */
604         emc_writel(1, EMC_STALL_AFTER_CLKCHANGE);
605
606         /* 8.1 enable write mux, update unshadowed pad control */
607         emc_writel(emc_dbg_reg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
608         emc_writel(next_timing->burst_regs[EMC_XM2CLKPADCTRL_INDEX],
609                    EMC_XM2CLKPADCTRL);
610
611         /* 9. restore periodic QRST, and disable write mux */
612         if ((qrst_used) || (next_timing->emc_periodic_qrst !=
613                             last_timing->emc_periodic_qrst)) {
614                 emc_cfg_reg = next_timing->emc_periodic_qrst ?
615                         emc_cfg_reg | EMC_CFG_PERIODIC_QRST :
616                         emc_cfg_reg & (~EMC_CFG_PERIODIC_QRST);
617                 emc_writel(emc_cfg_reg, EMC_CFG);
618         }
619         emc_writel(emc_dbg_reg, EMC_DBG);
620
621         /* 10. exit self-refresh on DDR3 */
622         if (dram_type == DRAM_TYPE_DDR3)
623                 emc_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
624
625         /* 11. set dram mode registers */
626         set_dram_mode(next_timing, last_timing, dll_change);
627
628         /* 12. issue zcal command if turning zcal On */
629         if (zcal_long) {
630                 emc_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
631                 if (dram_dev_num > 1)
632                         emc_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
633         }
634
635         /* 13. flow control marker 3 */
636         emc_writel(1, EMC_UNSTALL_RW_AFTER_CLKCHANGE);
637
638         /* 14. read any MC register to ensure the programming is done
639                change EMC clock source register (EMC read access restored)
640                wait for clk change completion */
641         do_clock_change(clk_setting);
642
643         /* 14.1 re-enable auto-refresh */
644         emc_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
645
646         /* 15. restore auto-cal */
647         if (vref_cal_toggle)
648                 emc_writel(next_timing->emc_acal_interval,
649                            EMC_AUTO_CAL_INTERVAL);
650
651         /* 16. restore dynamic self-refresh */
652         if (next_timing->rev >= 0x32)
653                 dyn_sref_enabled = next_timing->emc_dsr;
654         if (dyn_sref_enabled) {
655                 emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
656                 emc_writel(emc_cfg_reg, EMC_CFG);
657         }
658
659         /* 17. set zcal wait count */
660         if (zcal_long)
661                 emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
662
663         /* 18. update restored timing */
664         udelay(2);
665         emc_timing_update();
666
667         /* 18.a restore early ACK */
668         mc_writel(mc_override, MC_EMEM_ARB_OVERRIDE);
669 }
670
671 static inline void emc_get_timing(struct tegra30_emc_table *timing)
672 {
673         int i;
674
675         for (i = 0; i < emc_num_burst_regs; i++) {
676                 if (burst_reg_addr[i])
677                         timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
678                 else
679                         timing->burst_regs[i] = 0;
680         }
681         timing->emc_acal_interval = 0;
682         timing->emc_zcal_cnt_long = 0;
683         timing->emc_mode_reset = 0;
684         timing->emc_mode_1 = 0;
685         timing->emc_mode_2 = 0;
686         timing->emc_periodic_qrst = (emc_readl(EMC_CFG) &
687                                      EMC_CFG_PERIODIC_QRST) ? 1 : 0;
688 }
689
690 /* After deep sleep EMC power features are not restored.
691  * Do it at run-time after the 1st clock change.
692  */
693 static inline void emc_cfg_power_restore(void)
694 {
695         struct tegra30_emc_pdata *pdata;
696         u32 reg = emc_readl(EMC_CFG);
697         u32 pwr_mask = EMC_CFG_PWR_MASK;
698
699         pdata = emc_pdev->dev.platform_data;
700
701         if (pdata->tables[0].rev >= 0x32)
702                 pwr_mask &= ~EMC_CFG_DYN_SREF_ENABLE;
703
704         if ((reg ^ emc_cfg_saved) & pwr_mask) {
705                 reg = (reg & (~pwr_mask)) | (emc_cfg_saved & pwr_mask);
706                 emc_writel(reg, EMC_CFG);
707                 emc_timing_update();
708         }
709 }
710
711 /* The EMC registers have shadow registers. When the EMC clock is updated
712  * in the clock controller, the shadow registers are copied to the active
713  * registers, allowing glitchless memory bus frequency changes.
714  * This function updates the shadow registers for a new clock frequency,
715  * and relies on the clock lock on the emc clock to avoid races between
716  * multiple frequency changes */
717 int tegra_emc_set_rate(unsigned long rate)
718 {
719         int i;
720         u32 clk_setting;
721         struct tegra30_emc_pdata *pdata;
722         const struct tegra30_emc_table *last_timing;
723         unsigned long flags;
724
725         if (!emc_pdev)
726                 return -EINVAL;
727
728         pdata = emc_pdev->dev.platform_data;
729
730         /* Table entries specify rate in kHz */
731         rate = rate / 1000;
732
733         for (i = 0; i < pdata->num_tables; i++) {
734                 if (tegra_emc_clk_sel[i].input == NULL)
735                         continue;       /* invalid entry */
736
737                 if (pdata->tables[i].rate == rate)
738                         break;
739         }
740
741         if (i >= pdata->num_tables)
742                 return -EINVAL;
743
744         if (!emc_timing) {
745                 /* can not assume that boot timing matches dfs table even
746                    if boot frequency matches one of the table nodes */
747                 emc_get_timing(&start_timing);
748                 last_timing = &start_timing;
749         }
750         else
751                 last_timing = emc_timing;
752
753         clk_setting = tegra_emc_clk_sel[i].value;
754
755         spin_lock_irqsave(&emc_access_lock, flags);
756         emc_set_clock(&pdata->tables[i], last_timing, clk_setting);
757         if (!emc_timing)
758                 emc_cfg_power_restore();
759         emc_timing = &pdata->tables[i];
760         spin_unlock_irqrestore(&emc_access_lock, flags);
761
762         emc_last_stats_update(i);
763
764         pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
765
766         return 0;
767 }
768
769 /* Select the closest EMC rate that is higher than the requested rate */
770 long tegra_emc_round_rate(unsigned long rate)
771 {
772         struct tegra30_emc_pdata *pdata;
773         int i;
774         int best = -1;
775         unsigned long distance = ULONG_MAX;
776
777         if (!emc_pdev)
778                 return clk_get_rate_locked(emc); /* no table - no rate change */
779
780         if (!emc_enable)
781                 return -EINVAL;
782
783         pdata = emc_pdev->dev.platform_data;
784
785         pr_debug("%s: %lu\n", __func__, rate);
786
787         /* Table entries specify rate in kHz */
788         rate = rate / 1000;
789
790         for (i = 0; i < pdata->num_tables; i++) {
791                 if (tegra_emc_clk_sel[i].input == NULL)
792                         continue;       /* invalid entry */
793
794                 if (pdata->tables[i].rate >= rate &&
795                     (pdata->tables[i].rate - rate) < distance) {
796                         distance = pdata->tables[i].rate - rate;
797                         best = i;
798                 }
799         }
800
801         if (best < 0)
802                 return -EINVAL;
803
804         pr_debug("%s: using %lu\n", __func__, pdata->tables[best].rate);
805
806         return pdata->tables[best].rate * 1000;
807 }
808
809 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
810 {
811         struct tegra30_emc_pdata *pdata;
812         int i;
813
814         if (!emc_pdev)
815                 return NULL;
816
817         pdata = emc_pdev->dev.platform_data;
818
819         pr_debug("%s: %lu\n", __func__, rate);
820
821         /* Table entries specify rate in kHz */
822         rate = rate / 1000;
823
824         for (i = 0; i < pdata->num_tables; i++) {
825                 if (pdata->tables[i].rate == rate) {
826                         *div_value = (tegra_emc_clk_sel[i].value &
827                                 EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
828                         return tegra_emc_clk_sel[i].input;
829                 }
830         }
831
832         return NULL;
833 }
834
835 static const struct clk_mux_sel *find_matching_input(
836         unsigned long table_rate,
837         u32 *div_value)
838 {
839         unsigned long inp_rate;
840         const struct clk_mux_sel *sel;
841
842         for (sel = emc->inputs; sel->input != NULL; sel++) {
843                 /* Table entries specify rate in kHz */
844                 inp_rate = clk_get_rate(sel->input) / 1000;
845
846                 if ((inp_rate >= table_rate) &&
847                      (inp_rate % table_rate == 0)) {
848                         *div_value = 2 * inp_rate / table_rate - 2;
849                         return sel;
850                 }
851         }
852         return NULL;
853 }
854
855 static void adjust_emc_dvfs_table(const struct tegra30_emc_table *table,
856                                   int table_size)
857 {
858         int i, j;
859         unsigned long rate;
860
861         if (table[0].rev < 0x33)
862                 return;
863
864         for (i = 0; i < MAX_DVFS_FREQS; i++) {
865                 int mv = emc->dvfs->millivolts[i];
866                 if (!mv)
867                         break;
868
869                 /* For each dvfs voltage find maximum supported rate;
870                    use 1MHz placeholder if not found */
871                 for (rate = 1000, j = 0; j < table_size; j++) {
872                         if (tegra_emc_clk_sel[j].input == NULL)
873                                 continue;       /* invalid entry */
874
875                         if ((mv >= table[j].emc_min_mv) &&
876                             (rate < table[j].rate))
877                                 rate = table[j].rate;
878                 }
879                 /* Table entries specify rate in kHz */
880                 emc->dvfs->freqs[i] = rate * 1000;
881         }
882 }
883
884 static bool is_emc_bridge(void)
885 {
886         int mv;
887         unsigned long rate;
888
889         bridge = tegra_get_clock_by_name("bridge.emc");
890         BUG_ON(!bridge);
891
892         /* LPDDR2 does not need a bridge entry in DFS table: just lock bridge
893            rate at minimum so it won't interfere with emc bus operations */
894         if (dram_type == DRAM_TYPE_LPDDR2) {
895                 clk_set_rate(bridge, 0);
896                 return true;
897         }
898
899         /* DDR3 requires EMC DFS table to include a bridge entry with frequency
900            above minimum bridge threshold, and voltage below bridge threshold */
901         rate = clk_round_rate(bridge, TEGRA_EMC_BRIDGE_RATE_MIN);
902         if (IS_ERR_VALUE(rate))
903                 return false;
904
905         mv = tegra_dvfs_predict_millivolts(emc, rate);
906         if (IS_ERR_VALUE(mv) || (mv > TEGRA_EMC_BRIDGE_MVOLTS_MIN))
907                 return false;
908
909         if (clk_set_rate(bridge, rate))
910                 return false;
911
912         return true;
913 }
914
915 static int tegra_emc_suspend_notify(struct notifier_block *nb,
916                                 unsigned long event, void *data)
917 {
918         if (event != PM_SUSPEND_PREPARE)
919                 return NOTIFY_OK;
920
921         if (dram_type == DRAM_TYPE_DDR3) {
922                 if (clk_enable(bridge)) {
923                         pr_info("Tegra emc suspend:"
924                                 " failed to enable bridge.emc\n");
925                         return NOTIFY_STOP;
926                 }
927                 pr_info("Tegra emc suspend: enabled bridge.emc\n");
928         }
929         return NOTIFY_OK;
930 };
931 static struct notifier_block tegra_emc_suspend_nb = {
932         .notifier_call = tegra_emc_suspend_notify,
933         .priority = 2,
934 };
935
936 static int tegra_emc_resume_notify(struct notifier_block *nb,
937                                 unsigned long event, void *data)
938 {
939         if (event != PM_POST_SUSPEND)
940                 return NOTIFY_OK;
941
942         if (dram_type == DRAM_TYPE_DDR3) {
943                 clk_disable(bridge);
944                 pr_info("Tegra emc resume: disabled bridge.emc\n");
945         }
946         return NOTIFY_OK;
947 };
948 static struct notifier_block tegra_emc_resume_nb = {
949         .notifier_call = tegra_emc_resume_notify,
950         .priority = -1,
951 };
952
953 void tegra_emc_set_clk(struct clk *c)
954 {
955         emc = c;
956 }
957
958 static int tegra_emc_probe(struct platform_device *pdev)
959 {
960         struct tegra30_emc_pdata *pdata = NULL;
961         struct resource *res;
962         int i, mv;
963         u32 reg, div_value;
964         bool max_entry = false;
965         unsigned long boot_rate, max_rate;
966         const struct clk_mux_sel *sel;
967         struct clk *min_clk;
968
969         emc_stats.clkchange_count = 0;
970         spin_lock_init(&emc_stats.spinlock);
971         emc_stats.last_update = get_jiffies_64();
972         emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
973
974         min_clk = clk_get(&pdev->dev, NULL);
975         if (IS_ERR(min_clk)) {
976                 dev_err(&pdev->dev, "failed to get clock\n");
977                 return -EINVAL;
978         }
979
980         dram_type = (emc_readl(EMC_FBIO_CFG5) &
981                      EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
982         if (dram_type == DRAM_TYPE_DDR3)
983                 clk_set_rate(min_clk, EMC_MIN_RATE_DDR3);
984
985         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
986         emc_cfg_saved = emc_readl(EMC_CFG);
987
988         boot_rate = clk_get_rate(emc) / 1000;
989         max_rate = clk_get_max_rate(emc) / 1000;
990
991         if ((dram_type != DRAM_TYPE_DDR3) && (dram_type != DRAM_TYPE_LPDDR2)) {
992                 pr_err("tegra: not supported DRAM type %u\n", dram_type);
993                 return -EINVAL;
994         }
995
996         if (emc->parent != tegra_get_clock_by_name("pll_m")) {
997                 pr_err("tegra: boot parent %s is not supported by EMC DFS\n",
998                         emc->parent->name);
999                 return -EINVAL;
1000         }
1001
1002         if (!emc_enable) {
1003                 dev_err(&pdev->dev, "disabled per module parameter\n");
1004                 return -ENODEV;
1005         }
1006
1007         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1008         if (!res) {
1009                 dev_err(&pdev->dev, "missing register base\n");
1010                 return -ENOMEM;
1011         }
1012
1013         emc_regbases[0] = devm_request_and_ioremap(&pdev->dev, res);
1014         if (!emc_regbases[0]) {
1015                 dev_err(&pdev->dev, "failed to remap registers\n");
1016                 return -ENOMEM;
1017         }
1018
1019         res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1020         if (!res) {
1021                 dev_err(&pdev->dev, "missing MC register base\n");
1022                 return -ENOMEM;
1023         }
1024
1025         emc_regbases[1] = devm_request_and_ioremap(&pdev->dev, res);
1026         if (!emc_regbases[1]) {
1027                 dev_err(&pdev->dev, "failed to remap MC registers\n");
1028                 return -ENOMEM;
1029         }
1030
1031         pdev->dev.platform_data = pdata;
1032
1033         if (!pdata->tables || !pdata->num_tables) {
1034                 pr_err("tegra: EMC DFS table is empty\n");
1035                 return -EINVAL;
1036         }
1037
1038         pdata->num_tables = min(pdata->num_tables, TEGRA_EMC_TABLE_MAX_SIZE);
1039         switch (pdata->tables[0].rev) {
1040         case 0x30:
1041                 emc_num_burst_regs = 105;
1042                 break;
1043         case 0x31:
1044         case 0x32:
1045         case 0x33:
1046                 emc_num_burst_regs = 107;
1047                 break;
1048         default:
1049                 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
1050                         pdata->tables[0].rev);
1051                 return -EINVAL;
1052         }
1053
1054         /* Match EMC source/divider settings with table entries */
1055         for (i = 0; i < pdata->num_tables; i++) {
1056                 unsigned long table_rate = pdata->tables[i].rate;
1057                 if (!table_rate)
1058                         continue;
1059
1060                 BUG_ON(pdata->tables[i].rev != pdata->tables[0].rev);
1061
1062                 sel = find_matching_input(table_rate, &div_value);
1063                 if (!sel)
1064                         continue;
1065
1066                 if (table_rate == boot_rate)
1067                         emc_stats.last_sel = i;
1068
1069                 if (table_rate == max_rate)
1070                         max_entry = true;
1071
1072                 tegra_emc_clk_sel[i] = *sel;
1073                 BUG_ON(div_value >
1074                        (EMC_CLK_DIV_MASK >> EMC_CLK_DIV_SHIFT));
1075                 tegra_emc_clk_sel[i].value <<= EMC_CLK_SOURCE_SHIFT;
1076                 tegra_emc_clk_sel[i].value |= (div_value << EMC_CLK_DIV_SHIFT);
1077
1078                 if ((div_value == 0) &&
1079                     (tegra_emc_clk_sel[i].input == emc->parent)) {
1080                         tegra_emc_clk_sel[i].value |= EMC_CLK_LOW_JITTER_ENABLE;
1081                 }
1082
1083                 if (pdata->tables[i].burst_regs[MC_EMEM_ARB_MISC0_INDEX] &
1084                     MC_EMEM_ARB_MISC0_EMC_SAME_FREQ)
1085                         tegra_emc_clk_sel[i].value |= EMC_CLK_MC_SAME_FREQ;
1086         }
1087
1088         /* Validate EMC rate and voltage limits */
1089         if (!max_entry) {
1090                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
1091                        " %lu kHz is not found\n", max_rate);
1092                 return -EINVAL;
1093         }
1094
1095         adjust_emc_dvfs_table(pdata->tables, pdata->num_tables);
1096         mv = tegra_dvfs_predict_millivolts(emc, max_rate * 1000);
1097         if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
1098                 pr_err("tegra: invalid EMC DFS table: maximum rate %lu kHz does"
1099                        " not match nominal voltage %d\n",
1100                        max_rate, emc->dvfs->max_millivolts);
1101                 return -EINVAL;
1102         }
1103
1104         if (!is_emc_bridge()) {
1105                 pr_err("tegra: invalid EMC DFS table: emc bridge not found");
1106                 return -EINVAL;
1107         }
1108         pr_info("tegra: validated EMC DFS table\n");
1109
1110         /* Configure clock change mode according to dram type */
1111         reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
1112         reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
1113                 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
1114         emc_writel(reg, EMC_CFG_2);
1115
1116         emc_pdev = pdev;
1117
1118         register_pm_notifier(&tegra_emc_suspend_nb);
1119         register_pm_notifier(&tegra_emc_resume_nb);
1120
1121         return 0;
1122 }
1123
1124 static struct platform_driver tegra_emc_driver = {
1125         .driver         = {
1126                 .name   = "tegra30-emc",
1127                 .owner  = THIS_MODULE,
1128         },
1129         .probe          = tegra_emc_probe,
1130 };
1131
1132 static int __init tegra_emc_init(void)
1133 {
1134         return platform_driver_register(&tegra_emc_driver);
1135 }
1136 device_initcall(tegra_emc_init);
1137
1138 void tegra_emc_timing_invalidate(void)
1139 {
1140         emc_timing = NULL;
1141 }
1142
1143 void tegra_init_dram_bit_map(const u32 *bit_map, int map_size)
1144 {
1145         BUG_ON(map_size != 32);
1146         dram_to_soc_bit_map = bit_map;
1147 }
1148
1149 int tegra_emc_get_dram_type(void)
1150 {
1151         return dram_type;
1152 }
1153
1154 static u32 soc_to_dram_bit_swap(u32 soc_val, u32 dram_mask, u32 dram_shift)
1155 {
1156         int bit;
1157         u32 dram_val = 0;
1158
1159         /* tegra clocks definitions use shifted mask always */
1160         if (!dram_to_soc_bit_map)
1161                 return soc_val & dram_mask;
1162
1163         for (bit = dram_shift; bit < 32; bit++) {
1164                 u32 dram_bit_mask = 0x1 << bit;
1165                 u32 soc_bit_mask = dram_to_soc_bit_map[bit];
1166
1167                 if (!(dram_bit_mask & dram_mask))
1168                         break;
1169
1170                 if (soc_bit_mask & soc_val)
1171                         dram_val |= dram_bit_mask;
1172         }
1173
1174         return dram_val;
1175 }
1176
1177 static int emc_read_mrr(int dev, int addr)
1178 {
1179         int ret;
1180         u32 val;
1181
1182         if (dram_type != DRAM_TYPE_LPDDR2)
1183                 return -ENODEV;
1184
1185         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, false);
1186         if (ret)
1187                 return ret;
1188
1189         val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
1190         val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
1191         emc_writel(val, EMC_MRR);
1192
1193         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, true);
1194         if (ret)
1195                 return ret;
1196
1197         val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
1198         return val;
1199 }
1200
1201 int tegra_emc_get_dram_temperature(void)
1202 {
1203         int mr4;
1204         unsigned long flags;
1205
1206         spin_lock_irqsave(&emc_access_lock, flags);
1207
1208         mr4 = emc_read_mrr(0, 4);
1209         if (IS_ERR_VALUE(mr4)) {
1210                 spin_unlock_irqrestore(&emc_access_lock, flags);
1211                 return mr4;
1212         }
1213         spin_unlock_irqrestore(&emc_access_lock, flags);
1214
1215         mr4 = soc_to_dram_bit_swap(
1216                 mr4, LPDDR2_MR4_TEMP_MASK, LPDDR2_MR4_TEMP_SHIFT);
1217         return mr4;
1218 }
1219
1220 int tegra_emc_set_over_temp_state(unsigned long state)
1221 {
1222         unsigned long flags;
1223
1224         if (dram_type != DRAM_TYPE_LPDDR2)
1225                 return -ENODEV;
1226
1227         spin_lock_irqsave(&emc_access_lock, flags);
1228
1229         /* Update refresh timing if state changed */
1230         if (emc_timing && (dram_over_temp_state != state)) {
1231                 set_over_temp_timing(emc_timing, state);
1232                 emc_timing_update();
1233                 if (state != DRAM_OVER_TEMP_NONE)
1234                         emc_writel(EMC_REF_FORCE_CMD, EMC_REF);
1235                 dram_over_temp_state = state;
1236         }
1237         spin_unlock_irqrestore(&emc_access_lock, flags);
1238         return 0;
1239 }
1240
1241 int tegra_emc_set_eack_state(unsigned long state)
1242 {
1243         unsigned long flags;
1244         u32 mc_override;
1245
1246         spin_lock_irqsave(&emc_access_lock, flags);
1247
1248         mc_override = mc_readl(MC_EMEM_ARB_OVERRIDE);
1249
1250         if (state)
1251                 enable_early_ack(mc_override);
1252         else
1253                 disable_early_ack(mc_override);
1254
1255         spin_unlock_irqrestore(&emc_access_lock, flags);
1256         return 0;
1257 }
1258
1259 #ifdef CONFIG_DEBUG_FS
1260
1261 static struct dentry *emc_debugfs_root;
1262
1263 static int emc_stats_show(struct seq_file *s, void *data)
1264 {
1265         struct tegra30_emc_pdata *pdata;
1266         int i;
1267
1268         pdata = emc_pdev->dev.platform_data;
1269
1270         emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
1271
1272         seq_printf(s, "%-10s %-10s \n", "rate kHz", "time");
1273         for (i = 0; i < pdata->num_tables; i++) {
1274                 if (tegra_emc_clk_sel[i].input == NULL)
1275                         continue;       /* invalid entry */
1276
1277                 seq_printf(s, "%-10lu %-10llu \n", pdata->tables[i].rate,
1278                            cputime64_to_clock_t(emc_stats.time_at_clock[i]));
1279         }
1280         seq_printf(s, "%-15s %llu\n", "transitions:",
1281                    emc_stats.clkchange_count);
1282         seq_printf(s, "%-15s %llu\n", "time-stamp:",
1283                    cputime64_to_clock_t(emc_stats.last_update));
1284
1285         return 0;
1286 }
1287
1288 static int emc_stats_open(struct inode *inode, struct file *file)
1289 {
1290         return single_open(file, emc_stats_show, inode->i_private);
1291 }
1292
1293 static const struct file_operations emc_stats_fops = {
1294         .open           = emc_stats_open,
1295         .read           = seq_read,
1296         .llseek         = seq_lseek,
1297         .release        = single_release,
1298 };
1299
1300 static int dram_temperature_get(void *data, u64 *val)
1301 {
1302         *val = tegra_emc_get_dram_temperature();
1303         return 0;
1304 }
1305 DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
1306                         NULL, "%lld\n");
1307
1308 static int over_temp_state_get(void *data, u64 *val)
1309 {
1310         *val = dram_over_temp_state;
1311         return 0;
1312 }
1313 static int over_temp_state_set(void *data, u64 val)
1314 {
1315         tegra_emc_set_over_temp_state(val);
1316         return 0;
1317 }
1318 DEFINE_SIMPLE_ATTRIBUTE(over_temp_state_fops, over_temp_state_get,
1319                         over_temp_state_set, "%llu\n");
1320
1321 static int eack_state_get(void *data, u64 *val)
1322 {
1323         unsigned long flags;
1324         u32 mc_override;
1325
1326         spin_lock_irqsave(&emc_access_lock, flags);
1327         mc_override = mc_readl(MC_EMEM_ARB_OVERRIDE);
1328         spin_unlock_irqrestore(&emc_access_lock, flags);
1329
1330         *val = (mc_override & MC_EMEM_ARB_OVERRIDE_EACK_MASK);
1331         return 0;
1332 }
1333
1334 static int eack_state_set(void *data, u64 val)
1335 {
1336         tegra_emc_set_eack_state(val);
1337         return 0;
1338 }
1339 DEFINE_SIMPLE_ATTRIBUTE(eack_state_fops, eack_state_get,
1340                         eack_state_set, "%llu\n");
1341
1342 static int efficiency_get(void *data, u64 *val)
1343 {
1344         *val = tegra_emc_bw_efficiency;
1345         return 0;
1346 }
1347 static int efficiency_set(void *data, u64 val)
1348 {
1349         tegra_emc_bw_efficiency = (val > 100) ? 100 : val;
1350         if (emc)
1351                 tegra_clk_shared_bus_update(emc);
1352
1353         return 0;
1354 }
1355 DEFINE_SIMPLE_ATTRIBUTE(efficiency_fops, efficiency_get,
1356                         efficiency_set, "%llu\n");
1357
1358 static int __init tegra_emc_debug_init(void)
1359 {
1360         if (!emc_pdev)
1361                 return 0;
1362
1363         emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
1364         if (!emc_debugfs_root)
1365                 return -ENOMEM;
1366
1367         if (!debugfs_create_file(
1368                 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
1369                 goto err_out;
1370
1371         if (!debugfs_create_file("dram_temperature", S_IRUGO, emc_debugfs_root,
1372                                  NULL, &dram_temperature_fops))
1373                 goto err_out;
1374
1375         if (!debugfs_create_file("over_temp_state", S_IRUGO | S_IWUSR,
1376                                  emc_debugfs_root, NULL, &over_temp_state_fops))
1377                 goto err_out;
1378
1379         if (!debugfs_create_file(
1380                 "eack_state", S_IRUGO | S_IWUGO, emc_debugfs_root, NULL, &eack_state_fops))
1381                 goto err_out;
1382
1383         if (!debugfs_create_file("efficiency", S_IRUGO | S_IWUSR,
1384                                  emc_debugfs_root, NULL, &efficiency_fops))
1385                 goto err_out;
1386
1387         return 0;
1388
1389 err_out:
1390         debugfs_remove_recursive(emc_debugfs_root);
1391         return -ENOMEM;
1392 }
1393
1394 late_initcall(tegra_emc_debug_init);
1395 #endif