0a3df4c8334a8df07950cd11a0ced4f8c38a2103
[linux-3.10.git] / arch / arm / mach-tegra / tegra3_emc.c
1 /*
2  * arch/arm/mach-tegra/tegra3_emc.c
3  *
4  * Copyright (C) 2011 NVIDIA Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * This program is distributed in the hope that it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along
17  * with this program; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
19  *
20  */
21
22 #include <linux/kernel.h>
23 #include <linux/clk.h>
24 #include <linux/err.h>
25 #include <linux/io.h>
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/platform_device.h>
29 #include <linux/platform_data/tegra30_emc.h>
30 #include <linux/debugfs.h>
31 #include <linux/seq_file.h>
32
33 #include <asm/cputime.h>
34
35 #include <mach/iomap.h>
36
37 #include "clock.h"
38 #include "tegra3_emc.h"
39
40 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
41 static bool emc_enable = true;
42 #else
43 static bool emc_enable;
44 #endif
45 module_param(emc_enable, bool, 0644);
46
47 static struct platform_device *emc_pdev;
48 static void __iomem *emc_regbases[2];
49
50 #define EMC_STATUS_UPDATE_TIMEOUT       100
51 #define TEGRA_EMC_TABLE_MAX_SIZE        16
52
53 enum {
54         DLL_CHANGE_NONE = 0,
55         DLL_CHANGE_ON,
56         DLL_CHANGE_OFF,
57 };
58
59 #define EMC_CLK_DIV_SHIFT               0
60 #define EMC_CLK_DIV_MASK                (0xFF << EMC_CLK_DIV_SHIFT)
61 #define EMC_CLK_SOURCE_SHIFT            30
62 #define EMC_CLK_SOURCE_MASK             (0x3 << EMC_CLK_SOURCE_SHIFT)
63 #define EMC_CLK_LOW_JITTER_ENABLE       (0x1 << 29)
64 #define EMC_CLK_MC_SAME_FREQ            (0x1 << 16)
65
66 #define BURST_REG_LIST \
67         DEFINE_REG(TEGRA_EMC_BASE, EMC_RC)                      \
68         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC)                     \
69         DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS)                     \
70         DEFINE_REG(TEGRA_EMC_BASE, EMC_RP)                      \
71         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W)                     \
72         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R)                     \
73         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P)                     \
74         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P)                     \
75         DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD)                  \
76         DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD)                  \
77         DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD)                     \
78         DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT)                    \
79         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT)                    \
80         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV)                     \
81         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE)                    \
82         DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST)                    \
83         DEFINE_REG(TEGRA_EMC_BASE, EMC_QSAFE)                   \
84         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV)                     \
85         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH)                 \
86         DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM)       \
87         DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT)     \
88         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR)                 \
89         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD)                 \
90         DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN)               \
91         DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN)                \
92         DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN)                 \
93         DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN)                 \
94         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR)                    \
95         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL)                 \
96         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE)                    \
97         DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW)                    \
98         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB)                   \
99         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE)              \
100         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP)                \
101         DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW)                  \
102         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_EXTRA)              \
103         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG6)               \
104         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE)               \
105         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ)                \
106         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5)               \
107         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL)             \
108         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD)      \
109         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS0)          \
110         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS1)          \
111         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS2)          \
112         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS3)          \
113         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4)          \
114         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5)          \
115         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6)          \
116         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7)          \
117         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE0)         \
118         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE1)         \
119         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE2)         \
120         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE3)         \
121         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4)         \
122         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5)         \
123         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6)         \
124         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7)         \
125         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS0)         \
126         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS1)         \
127         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS2)         \
128         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS3)         \
129         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4)         \
130         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5)         \
131         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6)         \
132         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7)         \
133         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ0)           \
134         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ1)           \
135         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ2)           \
136         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ3)           \
137         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL)           \
138         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2)          \
139         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2)           \
140         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL)           \
141         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL)          \
142         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL)        \
143         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2)       \
144         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2QUSEPADCTRL)          \
145         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL3)          \
146         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_TERM_CTRL)           \
147         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL)           \
148         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT)           \
149         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT)            \
150         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG)         \
151         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT)                     \
152         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION)            \
153         DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL)    \
154                                                                 \
155         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG)              \
156         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ)  \
157         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD)       \
158         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP)        \
159         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC)        \
160         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS)       \
161         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW)       \
162         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD)       \
163         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE)   \
164         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE)   \
165         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R)       \
166         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W)       \
167         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W)       \
168         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R)       \
169         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS)         \
170         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS)        \
171         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0)            \
172         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE)
173
174 #define DEFINE_REG(base, reg)   (IO_ADDRESS((base)) + (reg)),
175 static const void __iomem *burst_reg_addr[TEGRA_EMC_NUM_REGS] = {
176         BURST_REG_LIST
177 };
178 #undef DEFINE_REG
179
180 #define DEFINE_REG(base, reg)   reg##_INDEX,
181 enum {
182         BURST_REG_LIST
183 };
184 #undef DEFINE_REG
185
186 static struct clk_mux_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
187 static int emc_last_sel;
188 static struct tegra_emc_table start_timing;
189 static bool emc_timing_in_sync;
190
191 static u32 dram_type;
192 static u32 dram_dev_num;
193 static u32 emc_cfg_saved;
194
195 static struct clk *emc;
196
197 static struct {
198         cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
199         u64 last_update;
200         u64 clkchange_count;
201         spinlock_t spinlock;
202 } emc_stats;
203
204 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
205
206 static inline void emc_writel(int bank, u32 val, unsigned long addr)
207 {
208         writel(val, emc_regbases[bank] + addr);
209         barrier();
210 }
211 static inline u32 emc_readl(int bank, unsigned long addr)
212 {
213         return readl(emc_regbases[bank] + addr);
214 }
215
216 static void emc_last_stats_update(int last_sel)
217 {
218         unsigned long flags;
219         u64 cur_jiffies = get_jiffies_64();
220
221         spin_lock_irqsave(&emc_stats.spinlock, flags);
222
223         emc_stats.time_at_clock[emc_last_sel] =
224                 emc_stats.time_at_clock[emc_last_sel] +
225                         (cur_jiffies - emc_stats.last_update);
226
227         emc_stats.last_update = cur_jiffies;
228
229         if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
230                 emc_stats.clkchange_count++;
231                 emc_last_sel = last_sel;
232         }
233         spin_unlock_irqrestore(&emc_stats.spinlock, flags);
234 }
235
236 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
237 {
238         int i;
239         for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
240                 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
241                         return 0;
242                 udelay(1);
243         }
244         return -ETIMEDOUT;
245 }
246
247 static inline void emc_timing_update(void)
248 {
249         int err;
250
251         emc_writel(0x1, EMC_TIMING_CONTROL);
252         err = wait_for_update(EMC_STATUS,
253                               EMC_STATUS_TIMING_UPDATE_STALLED, false);
254         if (err) {
255                 pr_err("%s: timing update error: %d", __func__, err);
256                 BUG();
257         }
258 }
259
260 static inline void auto_cal_disable(void)
261 {
262         int err;
263
264         emc_writel(0, EMC_AUTO_CAL_INTERVAL);
265         err = wait_for_update(EMC_AUTO_CAL_STATUS,
266                               EMC_AUTO_CAL_STATUS_ACTIVE, false);
267         if (err) {
268                 pr_err("%s: disable auto-cal error: %d", __func__, err);
269                 BUG();
270         }
271 }
272
273 static inline bool dqs_preset(const struct tegra_emc_table *next_timing,
274                               const struct tegra_emc_table *last_timing)
275 {
276         bool ret = false;
277
278 #define DQS_SET(reg, bit)                                                     \
279         do {                                                                  \
280                 if ((next_timing->burst_regs[EMC_##reg##_INDEX] &             \
281                      EMC_##reg##_##bit##_ENABLE) &&                           \
282                     (!(last_timing->burst_regs[EMC_##reg##_INDEX] &           \
283                        EMC_##reg##_##bit##_ENABLE)))   {                      \
284                         emc_writel(last_timing->burst_regs[EMC_##reg##_INDEX] \
285                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg);  \
286                         ret = true;                                           \
287                 }                                                             \
288         } while (0)
289
290         DQS_SET(XM2DQSPADCTRL2, VREF);
291         DQS_SET(XM2DQSPADCTRL3, VREF);
292         DQS_SET(XM2QUSEPADCTRL, IVREF);
293
294         return ret;
295 }
296
297 static inline void overwrite_mrs_wait_cnt(
298         const struct tegra_emc_table *next_timing,
299         bool zcal_long)
300 {
301         u32 reg;
302         u32 cnt = 512;
303
304         /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
305            for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
306            expected operation length. Reduce the latter by the overlapping
307            zq-calibration, if any */
308         if (zcal_long)
309                 cnt -= dram_dev_num * 256;
310
311         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
312                 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
313                 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
314         if (cnt < reg)
315                 cnt = reg;
316
317         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
318                 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
319         reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
320                 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
321
322         emc_writel(reg, EMC_MRS_WAIT_CNT);
323 }
324
325 static inline bool need_qrst(const struct tegra_emc_table *next_timing,
326                              const struct tegra_emc_table *last_timing,
327                              u32 emc_dpd_reg)
328 {
329         u32 last_mode = (last_timing->burst_regs[EMC_FBIO_CFG5_INDEX] &
330                 EMC_CFG5_QUSE_MODE_MASK) >> EMC_CFG5_QUSE_MODE_SHIFT;
331         u32 next_mode = (next_timing->burst_regs[EMC_FBIO_CFG5_INDEX] &
332                 EMC_CFG5_QUSE_MODE_MASK) >> EMC_CFG5_QUSE_MODE_SHIFT;
333
334         /* QUSE DPD is disabled */
335         bool ret = !(emc_dpd_reg & EMC_SEL_DPD_CTRL_QUSE_DPD_ENABLE) &&
336
337         /* QUSE uses external mode before or after clock change */
338                 (((last_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN) &&
339                   (last_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK)) ||
340                  ((next_mode != EMC_CFG5_QUSE_MODE_PULSE_INTERN) &&
341                   (next_mode != EMC_CFG5_QUSE_MODE_INTERNAL_LPBK)))  &&
342
343         /* QUSE pad switches from schmitt to vref mode */
344                 (((last_timing->burst_regs[EMC_XM2QUSEPADCTRL_INDEX] &
345                    EMC_XM2QUSEPADCTRL_IVREF_ENABLE) == 0) &&
346                  ((next_timing->burst_regs[EMC_XM2QUSEPADCTRL_INDEX] &
347                    EMC_XM2QUSEPADCTRL_IVREF_ENABLE) != 0));
348
349         return ret;
350 }
351
352 static inline void periodic_qrst_enable(u32 emc_cfg_reg, u32 emc_dbg_reg)
353 {
354         /* enable write mux => enable periodic QRST => restore mux */
355         emc_writel(emc_dbg_reg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
356         emc_writel(emc_cfg_reg | EMC_CFG_PERIODIC_QRST, EMC_CFG);
357         emc_writel(emc_dbg_reg, EMC_DBG);
358 }
359
360 static inline void periodic_qrst_restore(u32 emc_cfg_reg, u32 emc_dbg_reg)
361 {
362         /* enable write mux => restore periodic QRST => restore mux */
363         emc_writel(emc_dbg_reg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
364         emc_writel(emc_cfg_reg, EMC_CFG);
365         emc_writel(emc_dbg_reg, EMC_DBG);
366 }
367
368 static inline int get_dll_change(const struct tegra_emc_table *next_timing,
369                                  const struct tegra_emc_table *last_timing)
370 {
371         bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
372         bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
373
374         if (next_dll_enabled == last_dll_enabled)
375                 return DLL_CHANGE_NONE;
376         else if (next_dll_enabled)
377                 return DLL_CHANGE_ON;
378         else
379                 return DLL_CHANGE_OFF;
380 }
381
382 static inline void set_dram_mode(const struct tegra_emc_table *next_timing,
383                                  const struct tegra_emc_table *last_timing,
384                                  int dll_change)
385 {
386         if (dram_type == DRAM_TYPE_DDR3) {
387                 /* first mode_1, then mode_2, then mode_reset*/
388                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
389                         emc_writel(next_timing->emc_mode_1, EMC_EMRS);
390                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
391                         emc_writel(next_timing->emc_mode_2, EMC_EMRS);
392
393                 if ((next_timing->emc_mode_reset !=
394                      last_timing->emc_mode_reset) ||
395                     (dll_change == DLL_CHANGE_ON))
396                 {
397                         u32 reg = next_timing->emc_mode_reset &
398                                 (~EMC_MODE_SET_DLL_RESET);
399                         if (dll_change == DLL_CHANGE_ON) {
400                                 reg |= EMC_MODE_SET_DLL_RESET;
401                                 reg |= EMC_MODE_SET_LONG_CNT;
402                         }
403                         emc_writel(reg, EMC_MRS);
404                 }
405         } else {
406                 /* first mode_2, then mode_1; mode_reset is not applicable */
407                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
408                         emc_writel(next_timing->emc_mode_2, EMC_MRW);
409                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
410                         emc_writel(next_timing->emc_mode_1, EMC_MRW);
411         }
412 }
413
414 static inline void do_clock_change(u32 clk_setting)
415 {
416         int err;
417
418         mc_readl(MC_EMEM_ADR_CFG);      /* completes prev writes */
419         writel(clk_setting, clk_base + emc->reg);
420
421         err = wait_for_update(EMC_INTSTATUS,
422                               EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
423         if (err) {
424                 pr_err("%s: clock change completion error: %d", __func__, err);
425                 BUG();
426         }
427 }
428
429 static noinline void emc_set_clock(const struct tegra_emc_table *next_timing,
430                                    const struct tegra_emc_table *last_timing,
431                                    u32 clk_setting)
432 {
433         int i, dll_change, pre_wait;
434         bool dyn_sref_enabled, vref_cal_toggle, qrst_used, zcal_long;
435
436         u32 emc_cfg_reg = emc_readl(EMC_CFG);
437         u32 emc_dbg_reg = emc_readl(EMC_DBG);
438
439         dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
440         dll_change = get_dll_change(next_timing, last_timing);
441         zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
442                 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
443
444         /* FIXME: remove steps enumeration below? */
445
446         /* 1. clear clkchange_complete interrupts */
447         emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
448
449         /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
450            possible self-refresh entry/exit and/or dqs vref settled - waiting
451            before the clock change decreases worst case change stall time */
452         pre_wait = 0;
453         if (dyn_sref_enabled) {
454                 emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
455                 emc_writel(emc_cfg_reg, EMC_CFG);
456                 pre_wait = 5;           /* 5us+ for self-refresh entry/exit */
457         }
458         if (dqs_preset(next_timing, last_timing)) {
459                 if (pre_wait < 3)
460                         pre_wait = 3;   /* 3us+ for dqs vref settled */
461         }
462         if (pre_wait) {
463                 emc_timing_update();
464                 udelay(pre_wait);
465         }
466
467         /* 3. disable auto-cal if vref mode is switching */
468         vref_cal_toggle = (next_timing->emc_acal_interval != 0) &&
469                 ((next_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX] ^
470                   last_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX]) &
471                  EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE);
472         if (vref_cal_toggle)
473                 auto_cal_disable();
474
475         /* 4. program burst shadow registers */
476         for (i = 0; i < TEGRA_EMC_NUM_REGS; i++)
477                 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
478         wmb();
479         barrier();
480
481         /* On ddr3 when DLL is re-started predict MRS long wait count and
482            overwrite DFS table setting */
483         if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
484                 overwrite_mrs_wait_cnt(next_timing, zcal_long);
485
486         /* the last read below makes sure prev writes are completed */
487         qrst_used = need_qrst(next_timing, last_timing,
488                               emc_readl(EMC_SEL_DPD_CTRL));
489
490         /* 5. flow control marker 1 (no EMC read access after this) */
491         emc_writel(1, EMC_STALL_BEFORE_CLKCHANGE);
492
493         /* 6. enable periodic QRST */
494         if (qrst_used)
495                 periodic_qrst_enable(emc_cfg_reg, emc_dbg_reg);
496
497         /* 6.1 disable auto-refresh to save time after clock change */
498         emc_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
499
500         /* 7. turn Off dll and enter self-refresh on DDR3 */
501         if (dram_type == DRAM_TYPE_DDR3) {
502                 if (dll_change == DLL_CHANGE_OFF)
503                         emc_writel(next_timing->emc_mode_1, EMC_EMRS);
504                 emc_writel(DRAM_BROADCAST(dram_dev_num) |
505                            EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
506         }
507
508         /* 8. flow control marker 2 */
509         emc_writel(1, EMC_STALL_AFTER_CLKCHANGE);
510
511         /* 9. exit self-refresh on DDR3 */
512         if (dram_type == DRAM_TYPE_DDR3)
513                 emc_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
514
515         /* 10. restore periodic QRST */
516         if ((qrst_used) || (next_timing->emc_periodic_qrst !=
517                             last_timing->emc_periodic_qrst)) {
518                 emc_cfg_reg = next_timing->emc_periodic_qrst ?
519                         emc_cfg_reg | EMC_CFG_PERIODIC_QRST :
520                         emc_cfg_reg & (~EMC_CFG_PERIODIC_QRST);
521                 periodic_qrst_restore(emc_cfg_reg, emc_dbg_reg);
522         }
523
524         /* 11. set dram mode registers */
525         set_dram_mode(next_timing, last_timing, dll_change);
526
527         /* 12. issue zcal command if turning zcal On */
528         if (zcal_long) {
529                 emc_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
530                 if (dram_dev_num > 1)
531                         emc_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
532         }
533
534         /* 13. flow control marker 3 */
535         emc_writel(1, EMC_UNSTALL_RW_AFTER_CLKCHANGE);
536
537         /* 14. read any MC register to ensure the programming is done
538                change EMC clock source register (EMC read access restored)
539                wait for clk change completion */
540         do_clock_change(clk_setting);
541
542         /* 14.1 re-enable auto-refresh */
543         emc_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
544
545         /* 15. restore auto-cal */
546         if (vref_cal_toggle)
547                 emc_writel(next_timing->emc_acal_interval,
548                            EMC_AUTO_CAL_INTERVAL);
549
550         /* 16. restore dynamic self-refresh */
551         if (dyn_sref_enabled) {
552                 emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
553                 emc_writel(emc_cfg_reg, EMC_CFG);
554         }
555
556         /* 17. set zcal wait count */
557         if (zcal_long)
558                 emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
559
560         /* 18. update restored timing */
561         udelay(2);
562         emc_timing_update();
563 }
564
565 static inline void emc_get_timing(struct tegra_emc_table *timing)
566 {
567         int i;
568
569         for (i = 0; i < TEGRA_EMC_NUM_REGS; i++)
570                 timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
571         timing->emc_acal_interval = 0;
572         timing->emc_zcal_cnt_long = 0;
573         timing->emc_mode_reset = 0;
574         timing->emc_mode_1 = 0;
575         timing->emc_mode_2 = 0;
576         timing->emc_periodic_qrst = (emc_readl(EMC_CFG) &
577                                      EMC_CFG_PERIODIC_QRST) ? 1 : 0;
578 }
579
580 /* After deep sleep EMC power features are not restored.
581  * Do it at run-time after the 1st clock change.
582  */
583 static inline void emc_cfg_power_restore(void)
584 {
585         u32 reg = emc_readl(EMC_CFG);
586         if ((reg ^ emc_cfg_saved) & EMC_CFG_PWR_MASK) {
587                 reg = (reg & (~EMC_CFG_PWR_MASK)) |
588                         (emc_cfg_saved & EMC_CFG_PWR_MASK);
589                 emc_writel(reg, EMC_CFG);
590                 emc_timing_update();
591         }
592 }
593
594 /* The EMC registers have shadow registers. When the EMC clock is updated
595  * in the clock controller, the shadow registers are copied to the active
596  * registers, allowing glitchless memory bus frequency changes.
597  * This function updates the shadow registers for a new clock frequency,
598  * and relies on the clock lock on the emc clock to avoid races between
599  * multiple frequency changes */
600 int tegra_emc_set_rate(unsigned long rate)
601 {
602         int i;
603         u32 clk_setting;
604         const struct tegra_emc_table *last_timing;
605
606         if (!tegra_emc_table)
607                 return -EINVAL;
608
609         /* Table entries specify rate in kHz */
610         rate = rate / 1000;
611
612         for (i = 0; i < tegra_emc_table_size; i++) {
613                 if (tegra_emc_clk_sel[i].input == NULL)
614                         continue;       /* invalid entry */
615
616                 if (tegra_emc_table[i].rate == rate)
617                         break;
618         }
619
620         if (i >= tegra_emc_table_size)
621                 return -EINVAL;
622
623         if (!emc_timing_in_sync) {
624                 /* can not assume that boot timing matches dfs table even
625                    if boot frequency matches one of the table nodes */
626                 emc_get_timing(&start_timing);
627                 last_timing = &start_timing;
628         }
629         else
630                 last_timing = &tegra_emc_table[emc_last_sel];
631
632         clk_setting = tegra_emc_clk_sel[i].value;
633         emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
634         if (!emc_timing_in_sync)
635                 emc_cfg_power_restore();
636         emc_timing_in_sync = true;
637         emc_last_stats_update(i);
638
639         pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
640
641         return 0;
642 }
643
644 /* Select the closest EMC rate that is higher than the requested rate */
645 long tegra_emc_round_rate(unsigned long rate)
646 {
647         int i;
648         int best = -1;
649         unsigned long distance = ULONG_MAX;
650
651         if (!tegra_emc_table)
652                 return -EINVAL;
653
654         if (!emc_enable)
655                 return -EINVAL;
656
657         pr_debug("%s: %lu\n", __func__, rate);
658
659         /* Table entries specify rate in kHz */
660         rate = rate / 1000;
661
662         for (i = 0; i < tegra_emc_table_size; i++) {
663                 if (tegra_emc_clk_sel[i].input == NULL)
664                         continue;       /* invalid entry */
665
666                 if (tegra_emc_table[i].rate >= rate &&
667                     (tegra_emc_table[i].rate - rate) < distance) {
668                         distance = tegra_emc_table[i].rate - rate;
669                         best = i;
670                 }
671         }
672
673         if (best < 0)
674                 return -EINVAL;
675
676         pr_debug("%s: using %lu\n", __func__, tegra_emc_table[best].rate);
677
678         return tegra_emc_table[best].rate * 1000;
679 }
680
681 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
682 {
683         int i;
684
685         if (!tegra_emc_table)
686                 return NULL;
687
688         pr_debug("%s: %lu\n", __func__, rate);
689
690         /* Table entries specify rate in kHz */
691         rate = rate / 1000;
692
693         for (i = 0; i < tegra_emc_table_size; i++) {
694                 if (tegra_emc_table[i].rate == rate) {
695                         *div_value = (tegra_emc_clk_sel[i].value &
696                                 EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
697                         return tegra_emc_clk_sel[i].input;
698                 }
699         }
700
701         return NULL;
702 }
703
704 static const struct clk_mux_sel *find_matching_input(
705         unsigned long table_rate,
706         u32 *div_value)
707 {
708         unsigned long inp_rate;
709         const struct clk_mux_sel *sel;
710
711         for (sel = emc->inputs; sel->input != NULL; sel++) {
712                 /* Table entries specify rate in kHz */
713                 inp_rate = clk_get_rate(sel->input) / 1000;
714
715                 if ((inp_rate >= table_rate) &&
716                      (inp_rate % table_rate == 0)) {
717                         *div_value = 2 * inp_rate / table_rate - 2;
718                         return sel;
719                 }
720         }
721         return NULL;
722 }
723
724 static int tegra_emc_probe(struct platform_device *pdev)
725 {
726         struct tegra_emc_pdata *pdata = NULL;
727         struct resource *res;
728         int i;
729         u32 reg, div_value;
730         bool max_entry = false;
731         unsigned long boot_rate, max_rate;
732         const struct clk_mux_sel *sel;
733
734         emc_cfg_saved = emc_readl(EMC_CFG);
735
736         emc_stats.clkchange_count = 0;
737         spin_lock_init(&emc_stats.spinlock);
738         emc_stats.last_update = get_jiffies_64();
739
740         emc = tegra_get_clock_by_name("emc");
741         BUG_ON(!emc);
742         boot_rate = clk_get_rate(emc) / 1000;
743         max_rate = clk_get_max_rate(emc) / 1000;
744
745         if (emc->parent != tegra_get_clock_by_name("pll_m")) {
746                 pr_warn("tegra: boot parent %s is not supported by EMC DFS\n",
747                         emc->parent->name);
748                 return;
749         }
750
751         if (!emc_enable) {
752                 dev_err(&pdev->dev, "disabled per module parameter\n");
753                 return -ENODEV;
754         }
755
756         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
757         if (!res) {
758                 dev_err(&pdev->dev, "missing register base\n");
759                 return -ENOMEM;
760         }
761
762         emc_regbase = devm_request_and_ioremap(&pdev->dev, res);
763         if (!emc_regbase) {
764                 dev_err(&pdev->dev, "failed to remap registers\n");
765                 return -ENOMEM;
766         }
767
768         pdev->dev.platform_data = pdata;
769
770         emc_pdev = pdev;
771
772         tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
773         for (i = 0; i < tegra_emc_table_size; i++) {
774                 unsigned long table_rate = table[i].rate;
775                 if (!table_rate)
776                         continue;
777
778                 sel = find_matching_input(table_rate, &div_value);
779                 if (!sel)
780                         continue;
781
782                 if (table_rate == boot_rate)
783                         emc_last_sel = i;
784
785                 if (table_rate == max_rate)
786                         max_entry = true;
787
788                 tegra_emc_clk_sel[i] = *sel;
789                 BUG_ON(div_value >
790                        (EMC_CLK_DIV_MASK >> EMC_CLK_DIV_SHIFT));
791                 tegra_emc_clk_sel[i].value <<= EMC_CLK_SOURCE_SHIFT;
792                 tegra_emc_clk_sel[i].value |= (div_value << EMC_CLK_DIV_SHIFT);
793
794                 if ((div_value == 0) &&
795                     (tegra_emc_clk_sel[i].input == emc->parent)) {
796                         tegra_emc_clk_sel[i].value |= EMC_CLK_LOW_JITTER_ENABLE;
797                 }
798
799                 if (table[i].burst_regs[MC_EMEM_ARB_MISC0_INDEX] &
800                     MC_EMEM_ARB_MISC0_EMC_SAME_FREQ)
801                         tegra_emc_clk_sel[i].value |= EMC_CLK_MC_SAME_FREQ;
802         }
803
804         dram_type = (emc_readl(EMC_FBIO_CFG5) &
805                      EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
806         if ((dram_type != DRAM_TYPE_DDR3) && (dram_type != DRAM_TYPE_LPDDR2)) {
807                 pr_err("Not supported DRAM type %u\n", dram_type);
808                 return;
809         }
810         reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
811         reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
812                 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
813         emc_writel(reg, EMC_CFG_2);
814
815         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
816
817         if (!max_entry) {
818                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
819                        " %lu kHz is not found\n", max_rate);
820                 return;
821         }
822         pr_info("tegra: validated EMC DFS table\n");
823         tegra_emc_table = table;
824         return 0;
825 }
826
827 static struct platform_driver tegra_emc_driver = {
828         .driver         = {
829                 .name   = "tegra30-emc",
830                 .owner  = THIS_MODULE,
831         },
832         .probe          = tegra_emc_probe,
833 };
834
835 static int __init tegra_init_emc(void)
836 {
837         return platform_driver_register(&tegra_emc_driver);
838 }
839 device_initcall(tegra_emc_init);
840
841 void tegra_emc_timing_invalidate(void)
842 {
843         emc_timing_in_sync = false;
844 }
845
846 #ifdef CONFIG_DEBUG_FS
847
848 static struct dentry *emc_debugfs_root;
849
850 static int emc_stats_show(struct seq_file *s, void *data)
851 {
852         int i;
853
854         emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
855
856         seq_printf(s, "%-10s %-10s \n", "rate kHz", "time");
857         for (i = 0; i < tegra_emc_table_size; i++) {
858                 if (tegra_emc_clk_sel[i].input == NULL)
859                         continue;       /* invalid entry */
860
861                 seq_printf(s, "%-10lu %-10llu \n", tegra_emc_table[i].rate,
862                            cputime64_to_clock_t(emc_stats.time_at_clock[i]));
863         }
864         seq_printf(s, "%-15s %llu\n", "transitions:",
865                    emc_stats.clkchange_count);
866         seq_printf(s, "%-15s %llu\n", "time-stamp:",
867                    cputime64_to_clock_t(emc_stats.last_update));
868
869         return 0;
870 }
871
872 static int emc_stats_open(struct inode *inode, struct file *file)
873 {
874         return single_open(file, emc_stats_show, inode->i_private);
875 }
876
877 static const struct file_operations emc_stats_fops = {
878         .open           = emc_stats_open,
879         .read           = seq_read,
880         .llseek         = seq_lseek,
881         .release        = single_release,
882 };
883
884 static int __init tegra_emc_debug_init(void)
885 {
886         if (!tegra_emc_table)
887                 return 0;
888
889         emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
890         if (!emc_debugfs_root)
891                 return -ENOMEM;
892
893         if (!debugfs_create_file(
894                 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
895                 goto err_out;
896
897         return 0;
898
899 err_out:
900         debugfs_remove_recursive(emc_debugfs_root);
901         return -ENOMEM;
902 }
903
904 late_initcall(tegra_emc_debug_init);
905 #endif