ARM: tegra11: clock: Optimize traversing EMC DFS table
[linux-2.6.git] / arch / arm / mach-tegra / tegra11_emc.c
1 /*
2  * arch/arm/mach-tegra/tegra11_emc.c
3  *
4  * Copyright (C) 2011-2012 NVIDIA Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
18  *
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/clk.h>
23 #include <linux/err.h>
24 #include <linux/io.h>
25 #include <linux/module.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/platform_data/tegra_emc.h>
29 #include <linux/debugfs.h>
30 #include <linux/seq_file.h>
31 #include <linux/hrtimer.h>
32
33 #include <asm/cputime.h>
34
35 #include <mach/iomap.h>
36
37 #include "clock.h"
38 #include "dvfs.h"
39 #include "tegra11_emc.h"
40
41 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
42 static bool emc_enable = true;
43 #else
44 static bool emc_enable;
45 #endif
46 module_param(emc_enable, bool, 0644);
47
48 u8 tegra_emc_bw_efficiency = 100;
49
50 #define PLL_C_DIRECT_FLOOR              333500000
51 #define EMC_STATUS_UPDATE_TIMEOUT       100
52 #define TEGRA_EMC_TABLE_MAX_SIZE        16
53
54 enum {
55         DLL_CHANGE_NONE = 0,
56         DLL_CHANGE_ON,
57         DLL_CHANGE_OFF,
58 };
59
60 #define EMC_CLK_DIV_SHIFT               0
61 #define EMC_CLK_DIV_MASK                (0xFF << EMC_CLK_DIV_SHIFT)
62 #define EMC_CLK_SOURCE_SHIFT            29
63 #define EMC_CLK_SOURCE_MASK             (0x7 << EMC_CLK_SOURCE_SHIFT)
64 #define EMC_CLK_LOW_JITTER_ENABLE       (0x1 << 31)
65 #define EMC_CLK_MC_SAME_FREQ            (0x1 << 16)
66
67 /* FIXME: actual Tegar11 list */
68 #define BURST_REG_LIST \
69         DEFINE_REG(TEGRA_EMC_BASE, EMC_RC),                     \
70         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC),                    \
71         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC_SLR),                \
72         DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS),                    \
73         DEFINE_REG(TEGRA_EMC_BASE, EMC_RP),                     \
74         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W),                    \
75         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R),                    \
76         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P),                    \
77         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P),                    \
78         DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD),                 \
79         DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD),                 \
80         DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD),                    \
81         DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT),                   \
82         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT),                   \
83         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV),                    \
84         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV_MASK),               \
85         DEFINE_REG(TEGRA_EMC_BASE, EMC_IBDLY),                  \
86         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_EXTRA),           \
87         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_2),             \
88         DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST),                   \
89         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV_MASK),               \
90         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH),                \
91         DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM),      \
92         DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT),    \
93         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR),                \
94         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD),                \
95         DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN),              \
96         DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN),               \
97         DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN),                \
98         DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN),                \
99         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR),                   \
100         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL),                \
101         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE),                   \
102         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKESR),                 \
103         DEFINE_REG(TEGRA_EMC_BASE, EMC_TPD),                    \
104         DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW),                   \
105         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB),                  \
106         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE),             \
107         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP),               \
108         DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW),                 \
109         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_EXTRA),             \
110         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE),              \
111         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ),               \
112         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5),              \
113         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL),            \
114         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD),     \
115         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4),         \
116         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5),         \
117         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6),         \
118         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7),         \
119         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4),        \
120         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5),        \
121         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6),        \
122         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7),        \
123         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4),        \
124         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5),        \
125         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6),        \
126         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7),        \
127         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL),          \
128         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL4),         \
129         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2),         \
130         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2),          \
131         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL),          \
132         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL),         \
133         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL),       \
134         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2),      \
135         DEFINE_REG(TEGRA_EMC_BASE, EMC_DSR_VTTGEN_DRV),         \
136         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXDSRVTTGEN),            \
137         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE),             \
138         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_TERM_CTRL),          \
139         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL),          \
140         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT),          \
141         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT),           \
142         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT2),          \
143         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG2),       \
144         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG3),       \
145         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT),                    \
146         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION),           \
147         DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL),   \
148         DEFINE_REG(TEGRA_EMC_BASE, EMC_CA_TRAINING_TIMING_CNTL1),       \
149         DEFINE_REG(TEGRA_EMC_BASE, EMC_CA_TRAINING_TIMING_CNTL2),       \
150                                                                         \
151         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG),             \
152         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
153         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD),      \
154         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP),       \
155         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC),       \
156         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS),      \
157         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW),      \
158         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD),      \
159         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE),  \
160         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE),  \
161         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R),      \
162         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W),      \
163         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W),      \
164         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R),      \
165         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS),        \
166         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS),       \
167         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0),           \
168         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE),
169
170 #define BURST_UP_DOWN_REG_LIST \
171         DEFINE_REG(TEGRA_MC_BASE, MC_PTSA_GRANT_DECREMENT),     \
172         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_G2_0),   \
173         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_G2_1),   \
174         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_0),   \
175         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV2_0),  \
176         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_2),   \
177         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_1),   \
178         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV2_1),  \
179         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_3),   \
180         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_EPP_0),  \
181         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_EPP_1),
182
183 #define EMC_TRIMMERS_REG_LIST \
184         DEFINE_REG(0, EMC_CDB_CNTL_1),                          \
185         DEFINE_REG(0, EMC_FBIO_CFG6),                           \
186         DEFINE_REG(0, EMC_QUSE),                                \
187         DEFINE_REG(0, EMC_EINPUT),                              \
188         DEFINE_REG(0, EMC_EINPUT_DURATION),                     \
189         DEFINE_REG(0, EMC_DLL_XFORM_DQS0),                      \
190         DEFINE_REG(0, EMC_QSAFE),                               \
191         DEFINE_REG(0, EMC_DLL_XFORM_QUSE0),                     \
192         DEFINE_REG(0, EMC_RDV),                                 \
193         DEFINE_REG(0, EMC_XM2DQSPADCTRL4),                      \
194         DEFINE_REG(0, EMC_XM2DQSPADCTRL3),                      \
195         DEFINE_REG(0, EMC_DLL_XFORM_DQ0),                       \
196         DEFINE_REG(0, EMC_AUTO_CAL_CONFIG),                     \
197         DEFINE_REG(0, EMC_DLL_XFORM_ADDR0),                     \
198         DEFINE_REG(0, EMC_XM2CLKPADCTRL2),                      \
199         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS0),                     \
200         DEFINE_REG(0, EMC_DLL_XFORM_ADDR1),                     \
201         DEFINE_REG(0, EMC_DLL_XFORM_ADDR2),                     \
202         DEFINE_REG(0, EMC_DLL_XFORM_DQS1),                      \
203         DEFINE_REG(0, EMC_DLL_XFORM_DQS2),                      \
204         DEFINE_REG(0, EMC_DLL_XFORM_DQS3),                      \
205         DEFINE_REG(0, EMC_DLL_XFORM_DQ1),                       \
206         DEFINE_REG(0, EMC_DLL_XFORM_DQ2),                       \
207         DEFINE_REG(0, EMC_DLL_XFORM_DQ3),                       \
208         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS1),                     \
209         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS2),                     \
210         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS3),                     \
211         DEFINE_REG(0, EMC_DLL_XFORM_QUSE1),                     \
212         DEFINE_REG(0, EMC_DLL_XFORM_QUSE2),                     \
213         DEFINE_REG(0, EMC_DLL_XFORM_QUSE3),
214
215
216 #define DEFINE_REG(base, reg) ((base) ? (IO_ADDRESS((base)) + (reg)) : 0)
217 static const void __iomem *burst_reg_addr[TEGRA11_EMC_MAX_NUM_REGS] = {
218         BURST_REG_LIST
219 };
220 #ifndef EMULATE_CLOCK_SWITCH
221 static const void __iomem *burst_up_down_reg_addr[TEGRA11_EMC_MAX_NUM_REGS] = {
222         BURST_UP_DOWN_REG_LIST
223 };
224 #endif
225 #undef DEFINE_REG
226
227
228 #define DEFINE_REG(base, reg) (reg)
229 #ifndef EMULATE_CLOCK_SWITCH
230 static const u32 emc_trimmer_offs[TEGRA11_EMC_MAX_NUM_REGS] = {
231         EMC_TRIMMERS_REG_LIST
232 };
233 #endif
234 #undef DEFINE_REG
235
236
237 #define DEFINE_REG(base, reg)   reg##_INDEX
238 enum {
239         BURST_REG_LIST
240 };
241 #undef DEFINE_REG
242
243 #define DEFINE_REG(base, reg)   reg##_TRIM_INDEX
244 enum {
245         EMC_TRIMMERS_REG_LIST
246 };
247 #undef DEFINE_REG
248
249
250 struct emc_sel {
251         struct clk      *input;
252         u32             value;
253         unsigned long   input_rate;
254 };
255 static struct emc_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
256 static struct tegra11_emc_table start_timing;
257 static const struct tegra11_emc_table *emc_timing;
258
259 static ktime_t clkchange_time;
260 static int clkchange_delay = 100;
261
262 static const u32 *dram_to_soc_bit_map;
263 static const struct tegra11_emc_table *tegra_emc_table;
264 static int tegra_emc_table_size;
265
266 static u32 dram_dev_num;
267 static u32 dram_type = -1;
268
269 static struct clk *emc;
270
271 static struct {
272         cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
273         int last_sel;
274         u64 last_update;
275         u64 clkchange_count;
276         spinlock_t spinlock;
277 } emc_stats;
278
279 static DEFINE_SPINLOCK(emc_access_lock);
280
281 static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
282 static void __iomem *emc0_base = IO_ADDRESS(TEGRA_EMC0_BASE);
283 static void __iomem *emc1_base = IO_ADDRESS(TEGRA_EMC1_BASE);
284 static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
285 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
286
287 static inline void emc_writel(u32 val, unsigned long addr)
288 {
289         writel(val, (u32)emc_base + addr);
290 }
291 static inline void emc0_writel(u32 val, unsigned long addr)
292 {
293         writel(val, (u32)emc0_base + addr);
294 }
295 static inline void emc1_writel(u32 val, unsigned long addr)
296 {
297         writel(val, (u32)emc1_base + addr);
298 }
299 static inline u32 emc_readl(unsigned long addr)
300 {
301         return readl((u32)emc_base + addr);
302 }
303 static inline void mc_writel(u32 val, unsigned long addr)
304 {
305         writel(val, (u32)mc_base + addr);
306 }
307 static inline u32 mc_readl(unsigned long addr)
308 {
309         return readl((u32)mc_base + addr);
310 }
311
312 static inline void ccfifo_writel(u32 val, unsigned long addr)
313 {
314         writel(val, (u32)emc_base + EMC_CCFIFO_DATA);
315         writel(addr, (u32)emc_base + EMC_CCFIFO_ADDR);
316 }
317
318 static int last_round_idx;
319 static inline int get_start_idx(unsigned long rate)
320 {
321         if (tegra_emc_table[last_round_idx].rate == rate)
322                 return last_round_idx;
323         return 0;
324 }
325
326 static void emc_last_stats_update(int last_sel)
327 {
328         unsigned long flags;
329         u64 cur_jiffies = get_jiffies_64();
330
331         spin_lock_irqsave(&emc_stats.spinlock, flags);
332
333         if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
334                 emc_stats.time_at_clock[emc_stats.last_sel] =
335                         emc_stats.time_at_clock[emc_stats.last_sel] +
336                         (cur_jiffies - emc_stats.last_update);
337
338         emc_stats.last_update = cur_jiffies;
339
340         if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
341                 emc_stats.clkchange_count++;
342                 emc_stats.last_sel = last_sel;
343         }
344         spin_unlock_irqrestore(&emc_stats.spinlock, flags);
345 }
346
347 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
348 {
349         int i;
350         for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
351                 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
352                         return 0;
353                 udelay(1);
354         }
355         return -ETIMEDOUT;
356 }
357
358 static inline void emc_timing_update(void)
359 {
360         int err;
361
362         emc_writel(0x1, EMC_TIMING_CONTROL);
363         err = wait_for_update(EMC_STATUS,
364                               EMC_STATUS_TIMING_UPDATE_STALLED, false);
365         if (err) {
366                 pr_err("%s: timing update error: %d", __func__, err);
367                 BUG();
368         }
369 }
370
371 static inline void auto_cal_disable(void)
372 {
373         int err;
374
375         emc_writel(0, EMC_AUTO_CAL_INTERVAL);
376         err = wait_for_update(EMC_AUTO_CAL_STATUS,
377                               EMC_AUTO_CAL_STATUS_ACTIVE, false);
378         if (err) {
379                 pr_err("%s: disable auto-cal error: %d", __func__, err);
380                 BUG();
381         }
382 }
383
384 static inline bool dqs_preset(const struct tegra11_emc_table *next_timing,
385                               const struct tegra11_emc_table *last_timing)
386 {
387         bool ret = false;
388
389 #define DQS_SET(reg, bit)                                                     \
390         do {                                                                  \
391                 if ((next_timing->burst_regs[EMC_##reg##_INDEX] &             \
392                      EMC_##reg##_##bit##_ENABLE) &&                           \
393                     (!(last_timing->burst_regs[EMC_##reg##_INDEX] &           \
394                        EMC_##reg##_##bit##_ENABLE)))   {                      \
395                         emc_writel(last_timing->burst_regs[EMC_##reg##_INDEX] \
396                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg);  \
397                         ret = true;                                           \
398                 }                                                             \
399         } while (0)
400
401
402 #define DQS_SET_TRIM(reg, bit, ch)                                             \
403         do {                                                                   \
404                 if ((next_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX]    \
405                      & EMC_##reg##_##bit##_ENABLE) &&                          \
406                     (!(last_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX]  \
407                        & EMC_##reg##_##bit##_ENABLE)))   {                     \
408                         emc##ch##_writel(last_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX] \
409                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg);   \
410                         ret = true;                                            \
411                 }                                                              \
412         } while (0)
413
414         DQS_SET(XM2DQSPADCTRL2, VREF);
415
416         return ret;
417 }
418
419 static inline void overwrite_mrs_wait_cnt(
420         const struct tegra11_emc_table *next_timing,
421         bool zcal_long)
422 {
423         u32 reg;
424         u32 cnt = 512;
425
426         /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
427            for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
428            expected operation length. Reduce the latter by the overlapping
429            zq-calibration, if any */
430         if (zcal_long)
431                 cnt -= dram_dev_num * 256;
432
433         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
434                 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
435                 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
436         if (cnt < reg)
437                 cnt = reg;
438
439         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
440                 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
441         reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
442                 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
443
444         emc_writel(reg, EMC_MRS_WAIT_CNT);
445 }
446
447 static inline int get_dll_change(const struct tegra11_emc_table *next_timing,
448                                  const struct tegra11_emc_table *last_timing)
449 {
450         bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
451         bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
452
453         if (next_dll_enabled == last_dll_enabled)
454                 return DLL_CHANGE_NONE;
455         else if (next_dll_enabled)
456                 return DLL_CHANGE_ON;
457         else
458                 return DLL_CHANGE_OFF;
459 }
460
461 static inline void set_dram_mode(const struct tegra11_emc_table *next_timing,
462                                  const struct tegra11_emc_table *last_timing,
463                                  int dll_change)
464 {
465         if (dram_type == DRAM_TYPE_DDR3) {
466                 /* first mode_1, then mode_2, then mode_reset*/
467                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
468                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
469                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
470                         ccfifo_writel(next_timing->emc_mode_2, EMC_EMRS2);
471
472                 if ((next_timing->emc_mode_reset !=
473                      last_timing->emc_mode_reset) ||
474                     (dll_change == DLL_CHANGE_ON)) {
475                         u32 reg = next_timing->emc_mode_reset &
476                                 (~EMC_MODE_SET_DLL_RESET);
477                         if (dll_change == DLL_CHANGE_ON) {
478                                 reg |= EMC_MODE_SET_DLL_RESET;
479                                 reg |= EMC_MODE_SET_LONG_CNT;
480                         }
481                         ccfifo_writel(reg, EMC_MRS);
482                 }
483         } else {
484                 /* first mode_2, then mode_1; mode_reset is not applicable */
485                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
486                         ccfifo_writel(next_timing->emc_mode_2, EMC_MRW2);
487                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
488                         ccfifo_writel(next_timing->emc_mode_1, EMC_MRW);
489                 if (next_timing->emc_mode_4 != last_timing->emc_mode_4)
490                         ccfifo_writel(next_timing->emc_mode_4, EMC_MRW4);
491         }
492 }
493
494 static inline void do_clock_change(u32 clk_setting)
495 {
496         int err;
497
498         mc_readl(MC_EMEM_ADR_CFG);      /* completes prev writes */
499         writel(clk_setting, (u32)clk_base + emc->reg);
500         readl((u32)clk_base + emc->reg);/* completes prev write */
501
502         err = wait_for_update(EMC_INTSTATUS,
503                               EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
504         if (err) {
505                 pr_err("%s: clock change completion error: %d", __func__, err);
506                 BUG();
507         }
508 }
509
510 static noinline void emc_set_clock(const struct tegra11_emc_table *next_timing,
511                                    const struct tegra11_emc_table *last_timing,
512                                    u32 clk_setting)
513 {
514 #ifndef EMULATE_CLOCK_SWITCH
515         int i, dll_change, pre_wait;
516         bool dyn_sref_enabled, zcal_long;
517
518         u32 emc_cfg_reg = emc_readl(EMC_CFG);
519
520         dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
521         dll_change = get_dll_change(next_timing, last_timing);
522         zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
523                 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
524
525         /* FIXME: remove steps enumeration below? */
526
527         /* 1. clear clkchange_complete interrupts */
528         emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
529
530         /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
531            possible self-refresh entry/exit and/or dqs vref settled - waiting
532            before the clock change decreases worst case change stall time */
533         pre_wait = 0;
534         if (dyn_sref_enabled) {
535                 emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
536                 emc_writel(emc_cfg_reg, EMC_CFG);
537                 pre_wait = 5;           /* 5us+ for self-refresh entry/exit */
538         }
539
540         /* 2.5 check dq/dqs vref delay */
541         if (dqs_preset(next_timing, last_timing)) {
542                 if (pre_wait < 3)
543                         pre_wait = 3;   /* 3us+ for dqs vref settled */
544         }
545         if (pre_wait) {
546                 emc_timing_update();
547                 udelay(pre_wait);
548         }
549
550         /* 3. disable auto-cal if vref mode is switching - removed */
551
552         /* 4. program burst shadow registers */
553         for (i = 0; i < next_timing->burst_regs_num; i++) {
554                 if (!burst_reg_addr[i])
555                         continue;
556                 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
557         }
558         for (i = 0; i < next_timing->emc_trimmers_num; i++) {
559                 __raw_writel(next_timing->emc_trimmers_0[i],
560                         (u32)emc0_base + emc_trimmer_offs[i]);
561                 __raw_writel(next_timing->emc_trimmers_1[i],
562                         (u32)emc1_base + emc_trimmer_offs[i]);
563         }
564         emc_cfg_reg &= ~EMC_CFG_UPDATE_MASK;
565         emc_cfg_reg |= next_timing->emc_cfg & EMC_CFG_UPDATE_MASK;
566         emc_writel(emc_cfg_reg, EMC_CFG);
567         wmb();
568         barrier();
569
570         /* 4.1 On ddr3 when DLL is re-started predict MRS long wait count and
571            overwrite DFS table setting */
572         if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
573                 overwrite_mrs_wait_cnt(next_timing, zcal_long);
574
575         /* 5.2 disable auto-refresh to save time after clock change */
576         emc_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
577
578         /* 6. turn Off dll and enter self-refresh on DDR3 */
579         if (dram_type == DRAM_TYPE_DDR3) {
580                 if (dll_change == DLL_CHANGE_OFF)
581                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
582                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num) |
583                               EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
584         }
585
586         /* 7. flow control marker 2 */
587         ccfifo_writel(1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
588
589         /* 8. exit self-refresh on DDR3 */
590         if (dram_type == DRAM_TYPE_DDR3)
591                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
592
593         /* 9. set dram mode registers */
594         set_dram_mode(next_timing, last_timing, dll_change);
595
596         /* 10. issue zcal command if turning zcal On */
597         if (zcal_long) {
598                 ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
599                 if (dram_dev_num > 1)
600                         ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
601         }
602
603         /* 10.1 dummy write to RO register to remove stall after change */
604         ccfifo_writel(0, EMC_CCFIFO_STATUS);
605
606         /* 11.5 program burst_up_down registers if emc rate is going down */
607         if (next_timing->rate < last_timing->rate) {
608                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
609                         __raw_writel(next_timing->burst_up_down_regs[i],
610                                 burst_up_down_reg_addr[i]);
611                 wmb();
612         }
613
614         /* 12-14. read any MC register to ensure the programming is done
615            change EMC clock source register wait for clk change completion */
616         do_clock_change(clk_setting);
617
618         /* 14.1 re-enable auto-refresh */
619         emc_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
620
621         /* 14.2 program burst_up_down registers if emc rate is going up */
622         if (next_timing->rate > last_timing->rate) {
623                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
624                         __raw_writel(next_timing->burst_up_down_regs[i],
625                                 burst_up_down_reg_addr[i]);
626                 wmb();
627         }
628
629         /* 15. restore auto-cal - removed */
630
631         /* 16. restore dynamic self-refresh */
632         if (next_timing->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
633                 emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
634                 emc_writel(emc_cfg_reg, EMC_CFG);
635         }
636
637         /* 17. set zcal wait count */
638         if (zcal_long)
639                 emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
640
641         /* 18. update restored timing */
642         udelay(2);
643         emc_timing_update();
644 #else
645         /* FIXME: implement */
646         pr_info("tegra11_emc: Configuring EMC rate %lu (setting: 0x%x)\n",
647                 next_timing->rate, clk_setting);
648 #endif
649 }
650
651 static inline void emc_get_timing(struct tegra11_emc_table *timing)
652 {
653         int i;
654
655         /* burst and trimmers updates depends on previous state; burst_up_down
656            are stateless */
657         for (i = 0; i < timing->burst_regs_num; i++) {
658                 if (burst_reg_addr[i])
659                         timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
660                 else
661                         timing->burst_regs[i] = 0;
662         }
663         for (i = 0; i < timing->emc_trimmers_num; i++) {
664                 timing->emc_trimmers_0[i] =
665                         __raw_readl((u32)emc0_base + emc_trimmer_offs[i]);
666                 timing->emc_trimmers_1[i] =
667                         __raw_readl((u32)emc1_base + emc_trimmer_offs[i]);
668         }
669         timing->emc_acal_interval = 0;
670         timing->emc_zcal_cnt_long = 0;
671         timing->emc_mode_reset = 0;
672         timing->emc_mode_1 = 0;
673         timing->emc_mode_2 = 0;
674         timing->emc_mode_4 = 0;
675         timing->emc_cfg = emc_readl(EMC_CFG);
676         timing->rate = clk_get_rate_locked(emc) / 1000;
677 }
678
679 /* The EMC registers have shadow registers. When the EMC clock is updated
680  * in the clock controller, the shadow registers are copied to the active
681  * registers, allowing glitchless memory bus frequency changes.
682  * This function updates the shadow registers for a new clock frequency,
683  * and relies on the clock lock on the emc clock to avoid races between
684  * multiple frequency changes. In addition access lock prevents concurrent
685  * access to EMC registers from reading MRR registers */
686 int tegra_emc_set_rate(unsigned long rate)
687 {
688         int i;
689         u32 clk_setting;
690         const struct tegra11_emc_table *last_timing;
691         unsigned long flags;
692         s64 last_change_delay;
693
694         if (!tegra_emc_table)
695                 return -EINVAL;
696
697         /* Table entries specify rate in kHz */
698         rate = rate / 1000;
699
700         i = get_start_idx(rate);
701         for (; i < tegra_emc_table_size; i++) {
702                 if (tegra_emc_clk_sel[i].input == NULL)
703                         continue;       /* invalid entry */
704
705                 if (tegra_emc_table[i].rate == rate)
706                         break;
707         }
708
709         if (i >= tegra_emc_table_size)
710                 return -EINVAL;
711
712         if (!emc_timing) {
713                 /* can not assume that boot timing matches dfs table even
714                    if boot frequency matches one of the table nodes */
715                 emc_get_timing(&start_timing);
716                 last_timing = &start_timing;
717         }
718         else
719                 last_timing = emc_timing;
720
721         clk_setting = tegra_emc_clk_sel[i].value;
722
723         last_change_delay = ktime_us_delta(ktime_get(), clkchange_time);
724         if ((last_change_delay >= 0) && (last_change_delay < clkchange_delay))
725                 udelay(clkchange_delay - (int)last_change_delay);
726
727         spin_lock_irqsave(&emc_access_lock, flags);
728         emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
729         clkchange_time = ktime_get();
730         emc_timing = &tegra_emc_table[i];
731         spin_unlock_irqrestore(&emc_access_lock, flags);
732
733         emc_last_stats_update(i);
734
735         pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
736
737         return 0;
738 }
739
740 long tegra_emc_round_rate(unsigned long rate)
741 {
742         int i;
743
744         if (!tegra_emc_table)
745                 return clk_get_rate_locked(emc); /* no table - no rate change */
746
747         if (!emc_enable)
748                 return -EINVAL;
749
750         pr_debug("%s: %lu\n", __func__, rate);
751
752         /* Table entries specify rate in kHz */
753         rate = rate / 1000;
754
755         i = get_start_idx(rate);
756         for (; i < tegra_emc_table_size; i++) {
757                 if (tegra_emc_clk_sel[i].input == NULL)
758                         continue;       /* invalid entry */
759
760                 if (tegra_emc_table[i].rate >= rate) {
761                         pr_debug("%s: using %lu\n",
762                                  __func__, tegra_emc_table[i].rate);
763                         last_round_idx = i;
764                         return tegra_emc_table[i].rate * 1000;
765                 }
766         }
767
768         return -EINVAL;
769 }
770
771 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
772 {
773         int i;
774
775         if (!tegra_emc_table) {
776                 if (rate == clk_get_rate_locked(emc)) {
777                         *div_value = emc->div - 2;
778                         return emc->parent;
779                 }
780                 return NULL;
781         }
782
783         pr_debug("%s: %lu\n", __func__, rate);
784
785         /* Table entries specify rate in kHz */
786         rate = rate / 1000;
787
788         i = get_start_idx(rate);
789         for (; i < tegra_emc_table_size; i++) {
790                 if (tegra_emc_table[i].rate == rate) {
791                         struct clk *p = tegra_emc_clk_sel[i].input;
792
793                         if (p && (tegra_emc_clk_sel[i].input_rate ==
794                                   clk_get_rate(p))) {
795                                 *div_value = (tegra_emc_clk_sel[i].value &
796                                         EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
797                                 return p;
798                         }
799                 }
800         }
801         return NULL;
802 }
803
804 bool tegra_emc_is_parent_ready(unsigned long rate, struct clk **parent,
805                 unsigned long *parent_rate, unsigned long *backup_rate)
806 {
807
808         int i;
809         struct clk *p = NULL;
810         unsigned long p_rate = 0;
811
812         if (!tegra_emc_table || !emc_enable)
813                 return true;
814
815         pr_debug("%s: %lu\n", __func__, rate);
816
817         /* Table entries specify rate in kHz */
818         rate = rate / 1000;
819
820         i = get_start_idx(rate);
821         for (; i < tegra_emc_table_size; i++) {
822                 if (tegra_emc_table[i].rate == rate) {
823                         p = tegra_emc_clk_sel[i].input;
824                         if (!p)
825                                 continue;       /* invalid entry */
826
827                         p_rate = tegra_emc_clk_sel[i].input_rate;
828                         if (p_rate == clk_get_rate(p))
829                                 return true;
830                         break;
831                 }
832         }
833
834         /* Table match not found - "non existing parent" is ready */
835         if (!p)
836                 return true;
837
838         /*
839          * Table match found, but parent is not ready - continue search
840          * for backup rate: min rate above requested that has different
841          * parent source (since only pll_c is scaled and may not be ready,
842          * any other parent can provide backup)
843          */
844         *parent = p;
845         *parent_rate = p_rate;
846
847         for (i++; i < tegra_emc_table_size; i++) {
848                 p = tegra_emc_clk_sel[i].input;
849                 if (!p)
850                         continue;       /* invalid entry */
851
852                 if (p != (*parent)) {
853                         *backup_rate = tegra_emc_table[i].rate * 1000;
854                         return false;
855                 }
856         }
857
858         /* Parent is not ready, and no backup found */
859         *backup_rate = -EINVAL;
860         return false;
861 }
862
863 static inline const struct clk_mux_sel *get_emc_input(u32 val)
864 {
865         const struct clk_mux_sel *sel;
866
867         for (sel = emc->inputs; sel->input != NULL; sel++) {
868                 if (sel->value == val)
869                         break;
870         }
871         return sel;
872 }
873
874 static int find_matching_input(const struct tegra11_emc_table *table,
875                         struct clk *pll_c, struct emc_sel *emc_clk_sel)
876 {
877         u32 div_value = (table->src_sel_reg & EMC_CLK_DIV_MASK) >>
878                 EMC_CLK_DIV_SHIFT;
879         u32 src_value = (table->src_sel_reg & EMC_CLK_SOURCE_MASK) >>
880                 EMC_CLK_SOURCE_SHIFT;
881         unsigned long input_rate = 0;
882         unsigned long table_rate = table->rate * 1000; /* table rate in kHz */
883         const struct clk_mux_sel *sel = get_emc_input(src_value);
884
885         if (div_value & 0x1) {
886                 pr_warn("tegra: invalid odd divider for EMC rate %lu\n",
887                         table_rate);
888                 return -EINVAL;
889         }
890         if (!sel->input) {
891                 pr_warn("tegra: no matching input found for EMC rate %lu\n",
892                         table_rate);
893                 return -EINVAL;
894         }
895         if (div_value && (table->src_sel_reg & EMC_CLK_LOW_JITTER_ENABLE)) {
896                 pr_warn("tegra: invalid LJ path for EMC rate %lu\n",
897                         table_rate);
898                 return -EINVAL;
899         }
900         if (!(table->src_sel_reg & EMC_CLK_MC_SAME_FREQ) !=
901             !(MC_EMEM_ARB_MISC0_EMC_SAME_FREQ &
902               table->burst_regs[MC_EMEM_ARB_MISC0_INDEX])) {
903                 pr_warn("tegra: ambiguous EMC to MC ratio for EMC rate %lu\n",
904                         table_rate);
905                 return -EINVAL;
906         }
907
908         if (sel->input == pll_c) {
909                 /* PLLC is a scalable source */
910 #ifdef CONFIG_TEGRA_DUAL_CBUS
911                 input_rate = table_rate * (1 + div_value / 2);
912 #else
913                 pr_warn("tegra: %s cannot be used as EMC rate %lu source\n",
914                         sel->input->name, table_rate);
915                 return -EINVAL;
916 #endif
917         } else {
918                 /* all other sources are fixed, must exactly match the rate */
919                 input_rate = clk_get_rate(sel->input);
920                 if (input_rate != (table_rate * (1 + div_value / 2))) {
921                         pr_warn("tegra: %s rate %lu does not match EMC rate %lu\n",
922                                 sel->input->name, input_rate, table_rate);
923                         return -EINVAL;
924                 }
925         }
926
927         /* Get ready emc clock selection settings for this table rate */
928         emc_clk_sel->input = sel->input;
929         emc_clk_sel->input_rate = input_rate;
930         emc_clk_sel->value = table->src_sel_reg;
931
932         return 0;
933 }
934
935 static void adjust_emc_dvfs_table(const struct tegra11_emc_table *table,
936                                   int table_size)
937 {
938         int i, j;
939         unsigned long rate;
940
941         for (i = 0; i < MAX_DVFS_FREQS; i++) {
942                 int mv = emc->dvfs->millivolts[i];
943                 if (!mv)
944                         break;
945
946                 /* For each dvfs voltage find maximum supported rate;
947                    use 1MHz placeholder if not found */
948                 for (rate = 1000, j = 0; j < table_size; j++) {
949                         if (tegra_emc_clk_sel[j].input == NULL)
950                                 continue;       /* invalid entry */
951
952                         if ((mv >= table[j].emc_min_mv) &&
953                             (rate < table[j].rate))
954                                 rate = table[j].rate;
955                 }
956                 /* Table entries specify rate in kHz */
957                 emc->dvfs->freqs[i] = rate * 1000;
958         }
959 }
960
961 static int init_emc_table(const struct tegra11_emc_table *table, int table_size)
962 {
963         int i, mv;
964         u32 reg;
965         bool max_entry = false;
966         unsigned long boot_rate, max_rate;
967         struct clk *pll_c = tegra_get_clock_by_name("pll_c");
968
969         emc_stats.clkchange_count = 0;
970         spin_lock_init(&emc_stats.spinlock);
971         emc_stats.last_update = get_jiffies_64();
972         emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
973
974         boot_rate = clk_get_rate(emc) / 1000;
975         max_rate = clk_get_max_rate(emc) / 1000;
976
977         if ((dram_type != DRAM_TYPE_DDR3) && (dram_type != DRAM_TYPE_LPDDR2)) {
978                 pr_err("tegra: not supported DRAM type %u\n", dram_type);
979                 return -ENODATA;
980         }
981
982         if (emc->parent != tegra_get_clock_by_name("pll_m")) {
983                 pr_err("tegra: boot parent %s is not supported by EMC DFS\n",
984                         emc->parent->name);
985                 return -ENODATA;
986         }
987
988         if (!table || !table_size) {
989                 pr_err("tegra: EMC DFS table is empty\n");
990                 return -ENODATA;
991         }
992
993         tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
994         switch (table[0].rev) {
995         case 0x40:
996         case 0x41:
997                 start_timing.burst_regs_num = table[0].burst_regs_num;
998                 start_timing.emc_trimmers_num = table[0].emc_trimmers_num;
999                 break;
1000         default:
1001                 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
1002                         table[0].rev);
1003                 return -ENODATA;
1004         }
1005
1006         /* Match EMC source/divider settings with table entries */
1007         for (i = 0; i < tegra_emc_table_size; i++) {
1008                 unsigned long table_rate = table[i].rate;
1009
1010                 /* Skip "no-rate" entry, or entry violating ascending order */
1011                 if (!table_rate ||
1012                     (i && (table_rate <= table[i-1].rate)))
1013                         continue;
1014
1015                 BUG_ON(table[i].rev != table[0].rev);
1016
1017                 if (find_matching_input(&table[i], pll_c,
1018                                         &tegra_emc_clk_sel[i]))
1019                         continue;
1020
1021                 if (table_rate == boot_rate)
1022                         emc_stats.last_sel = i;
1023
1024                 if (table_rate == max_rate)
1025                         max_entry = true;
1026         }
1027
1028         /* Validate EMC rate and voltage limits */
1029         if (!max_entry) {
1030                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
1031                        " %lu kHz is not found\n", max_rate);
1032                 return -ENODATA;
1033         }
1034
1035         tegra_emc_table = table;
1036
1037         if (emc->dvfs) {
1038                 adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
1039                 mv = tegra_dvfs_predict_millivolts(emc, max_rate * 1000);
1040                 if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
1041                         tegra_emc_table = NULL;
1042                         pr_err("tegra: invalid EMC DFS table: maximum rate %lu"
1043                                " kHz does not match nominal voltage %d\n",
1044                                max_rate, emc->dvfs->max_millivolts);
1045                         return -ENODATA;
1046                 }
1047         }
1048
1049         pr_info("tegra: validated EMC DFS table\n");
1050
1051         /* Configure clock change mode according to dram type */
1052         reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
1053         reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
1054                 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
1055         emc_writel(reg, EMC_CFG_2);
1056         return 0;
1057 }
1058
1059 static int __devinit tegra11_emc_probe(struct platform_device *pdev)
1060 {
1061         struct tegra11_emc_pdata *pdata;
1062         struct resource *res;
1063
1064         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1065         if (!res) {
1066                 dev_err(&pdev->dev, "missing register base\n");
1067                 return -ENOMEM;
1068         }
1069
1070         pdata = pdev->dev.platform_data;
1071         if (!pdata) {
1072                 dev_err(&pdev->dev, "missing platform data\n");
1073                 return -ENODATA;
1074         }
1075
1076         return init_emc_table(pdata->tables, pdata->num_tables);
1077 }
1078
1079 static struct platform_driver tegra11_emc_driver = {
1080         .driver         = {
1081                 .name   = "tegra-emc",
1082                 .owner  = THIS_MODULE,
1083         },
1084         .probe          = tegra11_emc_probe,
1085 };
1086
1087 int __init tegra11_emc_init(void)
1088 {
1089         return platform_driver_register(&tegra11_emc_driver);
1090 }
1091
1092 void tegra_emc_timing_invalidate(void)
1093 {
1094         emc_timing = NULL;
1095 }
1096
1097 void tegra_emc_dram_type_init(struct clk *c)
1098 {
1099         emc = c;
1100
1101         dram_type = (emc_readl(EMC_FBIO_CFG5) &
1102                      EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
1103
1104         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
1105 }
1106
1107 int tegra_emc_get_dram_type(void)
1108 {
1109         return dram_type;
1110 }
1111
1112 static u32 soc_to_dram_bit_swap(u32 soc_val, u32 dram_mask, u32 dram_shift)
1113 {
1114         int bit;
1115         u32 dram_val = 0;
1116
1117         /* tegra clocks definitions use shifted mask always */
1118         if (!dram_to_soc_bit_map)
1119                 return soc_val & dram_mask;
1120
1121         for (bit = dram_shift; bit < 32; bit++) {
1122                 u32 dram_bit_mask = 0x1 << bit;
1123                 u32 soc_bit_mask = dram_to_soc_bit_map[bit];
1124
1125                 if (!(dram_bit_mask & dram_mask))
1126                         break;
1127
1128                 if (soc_bit_mask & soc_val)
1129                         dram_val |= dram_bit_mask;
1130         }
1131
1132         return dram_val;
1133 }
1134
1135 static int emc_read_mrr(int dev, int addr)
1136 {
1137         int ret;
1138         u32 val;
1139
1140         if (dram_type != DRAM_TYPE_LPDDR2)
1141                 return -ENODEV;
1142
1143         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, false);
1144         if (ret)
1145                 return ret;
1146
1147         val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
1148         val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
1149         emc_writel(val, EMC_MRR);
1150
1151         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, true);
1152         if (ret)
1153                 return ret;
1154
1155         val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
1156         return val;
1157 }
1158
1159 int tegra_emc_get_dram_temperature(void)
1160 {
1161         int mr4;
1162         unsigned long flags;
1163
1164         spin_lock_irqsave(&emc_access_lock, flags);
1165
1166         mr4 = emc_read_mrr(0, 4);
1167         if (IS_ERR_VALUE(mr4)) {
1168                 spin_unlock_irqrestore(&emc_access_lock, flags);
1169                 return mr4;
1170         }
1171         spin_unlock_irqrestore(&emc_access_lock, flags);
1172
1173         mr4 = soc_to_dram_bit_swap(
1174                 mr4, LPDDR2_MR4_TEMP_MASK, LPDDR2_MR4_TEMP_SHIFT);
1175         return mr4;
1176 }
1177
1178 #ifdef CONFIG_DEBUG_FS
1179
1180 static struct dentry *emc_debugfs_root;
1181
1182 static int emc_stats_show(struct seq_file *s, void *data)
1183 {
1184         int i;
1185
1186         emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
1187
1188         seq_printf(s, "%-10s %-10s \n", "rate kHz", "time");
1189         for (i = 0; i < tegra_emc_table_size; i++) {
1190                 if (tegra_emc_clk_sel[i].input == NULL)
1191                         continue;       /* invalid entry */
1192
1193                 seq_printf(s, "%-10lu %-10llu \n", tegra_emc_table[i].rate,
1194                            cputime64_to_clock_t(emc_stats.time_at_clock[i]));
1195         }
1196         seq_printf(s, "%-15s %llu\n", "transitions:",
1197                    emc_stats.clkchange_count);
1198         seq_printf(s, "%-15s %llu\n", "time-stamp:",
1199                    cputime64_to_clock_t(emc_stats.last_update));
1200
1201         return 0;
1202 }
1203
1204 static int emc_stats_open(struct inode *inode, struct file *file)
1205 {
1206         return single_open(file, emc_stats_show, inode->i_private);
1207 }
1208
1209 static const struct file_operations emc_stats_fops = {
1210         .open           = emc_stats_open,
1211         .read           = seq_read,
1212         .llseek         = seq_lseek,
1213         .release        = single_release,
1214 };
1215
1216 static int dram_temperature_get(void *data, u64 *val)
1217 {
1218         *val = tegra_emc_get_dram_temperature();
1219         return 0;
1220 }
1221 DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
1222                         NULL, "%lld\n");
1223
1224 static int efficiency_get(void *data, u64 *val)
1225 {
1226         *val = tegra_emc_bw_efficiency;
1227         return 0;
1228 }
1229 static int efficiency_set(void *data, u64 val)
1230 {
1231         tegra_emc_bw_efficiency = (val > 100) ? 100 : val;
1232         if (emc)
1233                 tegra_clk_shared_bus_update(emc);
1234
1235         return 0;
1236 }
1237 DEFINE_SIMPLE_ATTRIBUTE(efficiency_fops, efficiency_get,
1238                         efficiency_set, "%llu\n");
1239
1240 static int __init tegra_emc_debug_init(void)
1241 {
1242         if (!tegra_emc_table)
1243                 return 0;
1244
1245         emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
1246         if (!emc_debugfs_root)
1247                 return -ENOMEM;
1248
1249         if (!debugfs_create_file(
1250                 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
1251                 goto err_out;
1252
1253         if (!debugfs_create_u32("clkchange_delay", S_IRUGO | S_IWUSR,
1254                 emc_debugfs_root, (u32 *)&clkchange_delay))
1255                 goto err_out;
1256
1257         if (!debugfs_create_file("dram_temperature", S_IRUGO, emc_debugfs_root,
1258                                  NULL, &dram_temperature_fops))
1259                 goto err_out;
1260
1261         if (!debugfs_create_file("efficiency", S_IRUGO | S_IWUSR,
1262                                  emc_debugfs_root, NULL, &efficiency_fops))
1263                 goto err_out;
1264
1265         return 0;
1266
1267 err_out:
1268         debugfs_remove_recursive(emc_debugfs_root);
1269         return -ENOMEM;
1270 }
1271
1272 late_initcall(tegra_emc_debug_init);
1273 #endif