ARM: tegra11: clock: Modify dqs preset operation
[linux-2.6.git] / arch / arm / mach-tegra / tegra11_emc.c
1 /*
2  * arch/arm/mach-tegra/tegra11_emc.c
3  *
4  * Copyright (C) 2011-2012 NVIDIA Corporation
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
18  *
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/clk.h>
23 #include <linux/err.h>
24 #include <linux/io.h>
25 #include <linux/module.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/platform_data/tegra_emc.h>
29 #include <linux/debugfs.h>
30 #include <linux/seq_file.h>
31 #include <linux/hrtimer.h>
32
33 #include <asm/cputime.h>
34
35 #include <mach/iomap.h>
36
37 #include "clock.h"
38 #include "dvfs.h"
39 #include "tegra11_emc.h"
40
41 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
42 static bool emc_enable = true;
43 #else
44 static bool emc_enable;
45 #endif
46 module_param(emc_enable, bool, 0644);
47
48 u8 tegra_emc_bw_efficiency = 100;
49
50 #define PLL_C_DIRECT_FLOOR              333500000
51 #define EMC_STATUS_UPDATE_TIMEOUT       100
52 #define TEGRA_EMC_TABLE_MAX_SIZE        16
53
54 enum {
55         DLL_CHANGE_NONE = 0,
56         DLL_CHANGE_ON,
57         DLL_CHANGE_OFF,
58 };
59
60 #define EMC_CLK_DIV_SHIFT               0
61 #define EMC_CLK_DIV_MAX_VALUE           0xFF
62 #define EMC_CLK_DIV_MASK                (0xFF << EMC_CLK_DIV_SHIFT)
63 #define EMC_CLK_SOURCE_SHIFT            29
64 #define EMC_CLK_SOURCE_MAX_VALUE        3
65 #define EMC_CLK_LOW_JITTER_ENABLE       (0x1 << 31)
66 #define EMC_CLK_MC_SAME_FREQ            (0x1 << 16)
67
68 /* FIXME: actual Tegar11 list */
69 #define BURST_REG_LIST \
70         DEFINE_REG(TEGRA_EMC_BASE, EMC_RC),                     \
71         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC),                    \
72         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC_SLR),                \
73         DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS),                    \
74         DEFINE_REG(TEGRA_EMC_BASE, EMC_RP),                     \
75         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W),                    \
76         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R),                    \
77         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P),                    \
78         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P),                    \
79         DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD),                 \
80         DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD),                 \
81         DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD),                    \
82         DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT),                   \
83         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT),                   \
84         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV),                    \
85         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV_MASK),               \
86         DEFINE_REG(TEGRA_EMC_BASE, EMC_IBDLY),                  \
87         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_EXTRA),           \
88         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_2),             \
89         DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST),                   \
90         DEFINE_REG(TEGRA_EMC_BASE, EMC_QSAFE),                  \
91         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV_MASK),               \
92         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH),                \
93         DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM),      \
94         DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT),    \
95         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR),                \
96         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD),                \
97         DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN),              \
98         DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN),               \
99         DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN),                \
100         DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN),                \
101         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR),                   \
102         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL),                \
103         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE),                   \
104         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKESR),                 \
105         DEFINE_REG(TEGRA_EMC_BASE, EMC_TPD),                    \
106         DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW),                   \
107         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB),                  \
108         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE),             \
109         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP),               \
110         DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW),                 \
111         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_EXTRA),             \
112         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE),              \
113         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ),               \
114         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5),              \
115         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL),            \
116         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD),     \
117         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS1),         \
118         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS2),         \
119         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS3),         \
120         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4),         \
121         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5),         \
122         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6),         \
123         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7),         \
124         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE1),        \
125         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE2),        \
126         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE3),        \
127         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4),        \
128         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5),        \
129         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6),        \
130         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7),        \
131         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR1),        \
132         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR2),        \
133         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS1),        \
134         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS2),        \
135         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS3),        \
136         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4),        \
137         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5),        \
138         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6),        \
139         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7),        \
140         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ1),          \
141         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ2),          \
142         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ3),          \
143         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL),          \
144         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL4),         \
145         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2),         \
146         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2),          \
147         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL),          \
148         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL),         \
149         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL),       \
150         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2),      \
151         DEFINE_REG(TEGRA_EMC_BASE, EMC_DSR_VTTGEN_DRV),         \
152         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXDSRVTTGEN),            \
153         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE),             \
154         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_TERM_CTRL),          \
155         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL),          \
156         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT),          \
157         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT),           \
158         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT2),          \
159         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG2),       \
160         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG3),       \
161         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT),                    \
162         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION),           \
163         DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL),   \
164         DEFINE_REG(TEGRA_EMC_BASE, EMC_CA_TRAINING_TIMING_CNTL1),       \
165         DEFINE_REG(TEGRA_EMC_BASE, EMC_CA_TRAINING_TIMING_CNTL2),       \
166                                                                         \
167         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG),             \
168         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
169         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD),      \
170         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP),       \
171         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC),       \
172         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS),      \
173         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW),      \
174         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD),      \
175         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE),  \
176         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE),  \
177         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R),      \
178         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W),      \
179         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W),      \
180         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R),      \
181         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS),        \
182         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS),       \
183         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0),           \
184         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE),
185
186 #define BURST_UP_DOWN_REG_LIST \
187         DEFINE_REG(TEGRA_MC_BASE, MC_PTSA_GRANT_DECREMENT),     \
188         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_G2_0),   \
189         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_G2_1),   \
190         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_0),   \
191         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV2_0),  \
192         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_2),   \
193         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_1),   \
194         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV2_1),  \
195         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_3),   \
196         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_EPP_0),  \
197         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_EPP_1),
198
199 #define EMC_TRIMMERS_REG_LIST \
200         DEFINE_REG(0, EMC_FBIO_CFG6),                           \
201         DEFINE_REG(0, EMC_QUSE),                                \
202         DEFINE_REG(0, EMC_EINPUT),                              \
203         DEFINE_REG(0, EMC_EINPUT_DURATION),                     \
204         DEFINE_REG(0, EMC_DLL_XFORM_DQS0),                      \
205         DEFINE_REG(0, EMC_QSAFE),                               \
206         DEFINE_REG(0, EMC_DLL_XFORM_QUSE0),                     \
207         DEFINE_REG(0, EMC_RDV),                                 \
208         DEFINE_REG(0, EMC_XM2DQSPADCTRL4),                      \
209         DEFINE_REG(0, EMC_XM2DQSPADCTRL3),                      \
210         DEFINE_REG(0, EMC_DLL_XFORM_DQ0),                       \
211         DEFINE_REG(0, EMC_AUTO_CAL_CONFIG),                     \
212         DEFINE_REG(0, EMC_DLL_XFORM_ADDR0),                     \
213         DEFINE_REG(0, EMC_XM2CLKPADCTRL2),                      \
214         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS0),                     \
215         DEFINE_REG(0, EMC_CDB_CNTL_1),
216
217
218 #define DEFINE_REG(base, reg) ((base) ? (IO_ADDRESS((base)) + (reg)) : 0)
219 static const void __iomem *burst_reg_addr[TEGRA11_EMC_MAX_NUM_REGS] = {
220         BURST_REG_LIST
221 };
222 #ifndef EMULATE_CLOCK_SWITCH
223 static const void __iomem *burst_up_down_reg_addr[TEGRA11_EMC_MAX_NUM_REGS] = {
224         BURST_UP_DOWN_REG_LIST
225 };
226 #endif
227 #undef DEFINE_REG
228
229
230 #define DEFINE_REG(base, reg) (reg)
231 #ifndef EMULATE_CLOCK_SWITCH
232 static const u32 emc_trimmer_offs[TEGRA11_EMC_MAX_NUM_REGS] = {
233         EMC_TRIMMERS_REG_LIST
234 };
235 #endif
236 #undef DEFINE_REG
237
238
239 #define DEFINE_REG(base, reg)   reg##_INDEX
240 enum {
241         BURST_REG_LIST
242 };
243 #undef DEFINE_REG
244
245 #define DEFINE_REG(base, reg)   reg##_TRIM_INDEX
246 enum {
247         EMC_TRIMMERS_REG_LIST
248 };
249 #undef DEFINE_REG
250
251
252 struct emc_sel {
253         struct clk      *input;
254         u32             value;
255         unsigned long   input_rate;
256 };
257 static struct emc_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
258 static struct tegra11_emc_table start_timing;
259 static const struct tegra11_emc_table *emc_timing;
260
261 static ktime_t clkchange_time;
262 static int clkchange_delay = 100;
263
264 static const u32 *dram_to_soc_bit_map;
265 static const struct tegra11_emc_table *tegra_emc_table;
266 static int tegra_emc_table_size;
267
268 static u32 dram_dev_num;
269 static u32 dram_type = -1;
270
271 static struct clk *emc;
272
273 static struct {
274         cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
275         int last_sel;
276         u64 last_update;
277         u64 clkchange_count;
278         spinlock_t spinlock;
279 } emc_stats;
280
281 static DEFINE_SPINLOCK(emc_access_lock);
282
283 static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
284 static void __iomem *emc0_base = IO_ADDRESS(TEGRA_EMC0_BASE);
285 static void __iomem *emc1_base = IO_ADDRESS(TEGRA_EMC1_BASE);
286 static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
287 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
288
289 static inline void emc_writel(u32 val, unsigned long addr)
290 {
291         writel(val, (u32)emc_base + addr);
292 }
293 static inline void emc0_writel(u32 val, unsigned long addr)
294 {
295         writel(val, (u32)emc0_base + addr);
296 }
297 static inline void emc1_writel(u32 val, unsigned long addr)
298 {
299         writel(val, (u32)emc1_base + addr);
300 }
301 static inline u32 emc_readl(unsigned long addr)
302 {
303         return readl((u32)emc_base + addr);
304 }
305 static inline void mc_writel(u32 val, unsigned long addr)
306 {
307         writel(val, (u32)mc_base + addr);
308 }
309 static inline u32 mc_readl(unsigned long addr)
310 {
311         return readl((u32)mc_base + addr);
312 }
313
314 static inline void ccfifo_writel(u32 val, unsigned long addr)
315 {
316         writel(val, (u32)emc_base + EMC_CCFIFO_DATA);
317         writel(addr, (u32)emc_base + EMC_CCFIFO_ADDR);
318 }
319
320 static void emc_last_stats_update(int last_sel)
321 {
322         unsigned long flags;
323         u64 cur_jiffies = get_jiffies_64();
324
325         spin_lock_irqsave(&emc_stats.spinlock, flags);
326
327         if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
328                 emc_stats.time_at_clock[emc_stats.last_sel] =
329                         emc_stats.time_at_clock[emc_stats.last_sel] +
330                         (cur_jiffies - emc_stats.last_update);
331
332         emc_stats.last_update = cur_jiffies;
333
334         if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
335                 emc_stats.clkchange_count++;
336                 emc_stats.last_sel = last_sel;
337         }
338         spin_unlock_irqrestore(&emc_stats.spinlock, flags);
339 }
340
341 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
342 {
343         int i;
344         for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
345                 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
346                         return 0;
347                 udelay(1);
348         }
349         return -ETIMEDOUT;
350 }
351
352 static inline void emc_timing_update(void)
353 {
354         int err;
355
356         emc_writel(0x1, EMC_TIMING_CONTROL);
357         err = wait_for_update(EMC_STATUS,
358                               EMC_STATUS_TIMING_UPDATE_STALLED, false);
359         if (err) {
360                 pr_err("%s: timing update error: %d", __func__, err);
361                 BUG();
362         }
363 }
364
365 static inline void auto_cal_disable(void)
366 {
367         int err;
368
369         emc_writel(0, EMC_AUTO_CAL_INTERVAL);
370         err = wait_for_update(EMC_AUTO_CAL_STATUS,
371                               EMC_AUTO_CAL_STATUS_ACTIVE, false);
372         if (err) {
373                 pr_err("%s: disable auto-cal error: %d", __func__, err);
374                 BUG();
375         }
376 }
377
378 static inline bool dqs_preset(const struct tegra11_emc_table *next_timing,
379                               const struct tegra11_emc_table *last_timing)
380 {
381         bool ret = false;
382
383 #define DQS_SET(reg, bit)                                                     \
384         do {                                                                  \
385                 if ((next_timing->burst_regs[EMC_##reg##_INDEX] &             \
386                      EMC_##reg##_##bit##_ENABLE) &&                           \
387                     (!(last_timing->burst_regs[EMC_##reg##_INDEX] &           \
388                        EMC_##reg##_##bit##_ENABLE)))   {                      \
389                         emc_writel(last_timing->burst_regs[EMC_##reg##_INDEX] \
390                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg);  \
391                         ret = true;                                           \
392                 }                                                             \
393         } while (0)
394
395
396 #define DQS_SET_TRIM(reg, bit, ch)                                             \
397         do {                                                                   \
398                 if ((next_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX]    \
399                      & EMC_##reg##_##bit##_ENABLE) &&                          \
400                     (!(last_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX]  \
401                        & EMC_##reg##_##bit##_ENABLE)))   {                     \
402                         emc##ch##_writel(last_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX] \
403                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg);   \
404                         ret = true;                                            \
405                 }                                                              \
406         } while (0)
407
408         DQS_SET(XM2DQSPADCTRL2, VREF);
409         DQS_SET_TRIM(XM2DQSPADCTRL3, VREF, 0);
410         DQS_SET_TRIM(XM2DQSPADCTRL3, VREF, 1);
411
412         return ret;
413 }
414
415 static inline void overwrite_mrs_wait_cnt(
416         const struct tegra11_emc_table *next_timing,
417         bool zcal_long)
418 {
419         u32 reg;
420         u32 cnt = 512;
421
422         /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
423            for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
424            expected operation length. Reduce the latter by the overlapping
425            zq-calibration, if any */
426         if (zcal_long)
427                 cnt -= dram_dev_num * 256;
428
429         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
430                 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
431                 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
432         if (cnt < reg)
433                 cnt = reg;
434
435         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
436                 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
437         reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
438                 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
439
440         emc_writel(reg, EMC_MRS_WAIT_CNT);
441 }
442
443 static inline int get_dll_change(const struct tegra11_emc_table *next_timing,
444                                  const struct tegra11_emc_table *last_timing)
445 {
446         bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
447         bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
448
449         if (next_dll_enabled == last_dll_enabled)
450                 return DLL_CHANGE_NONE;
451         else if (next_dll_enabled)
452                 return DLL_CHANGE_ON;
453         else
454                 return DLL_CHANGE_OFF;
455 }
456
457 static inline void set_dram_mode(const struct tegra11_emc_table *next_timing,
458                                  const struct tegra11_emc_table *last_timing,
459                                  int dll_change)
460 {
461         if (dram_type == DRAM_TYPE_DDR3) {
462                 /* first mode_1, then mode_2, then mode_reset*/
463                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
464                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
465                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
466                         ccfifo_writel(next_timing->emc_mode_2, EMC_EMRS2);
467
468                 if ((next_timing->emc_mode_reset !=
469                      last_timing->emc_mode_reset) ||
470                     (dll_change == DLL_CHANGE_ON)) {
471                         u32 reg = next_timing->emc_mode_reset &
472                                 (~EMC_MODE_SET_DLL_RESET);
473                         if (dll_change == DLL_CHANGE_ON) {
474                                 reg |= EMC_MODE_SET_DLL_RESET;
475                                 reg |= EMC_MODE_SET_LONG_CNT;
476                         }
477                         ccfifo_writel(reg, EMC_MRS);
478                 }
479         } else {
480                 /* first mode_2, then mode_1; mode_reset is not applicable */
481                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
482                         ccfifo_writel(next_timing->emc_mode_2, EMC_MRW2);
483                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
484                         ccfifo_writel(next_timing->emc_mode_1, EMC_MRW);
485                 if (next_timing->emc_mode_4 != last_timing->emc_mode_4)
486                         ccfifo_writel(next_timing->emc_mode_4, EMC_MRW4);
487         }
488 }
489
490 static inline void do_clock_change(u32 clk_setting)
491 {
492         int err;
493
494         mc_readl(MC_EMEM_ADR_CFG);      /* completes prev writes */
495         writel(clk_setting, (u32)clk_base + emc->reg);
496         readl((u32)clk_base + emc->reg);/* completes prev write */
497
498         err = wait_for_update(EMC_INTSTATUS,
499                               EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
500         if (err) {
501                 pr_err("%s: clock change completion error: %d", __func__, err);
502                 BUG();
503         }
504 }
505
506 static noinline void emc_set_clock(const struct tegra11_emc_table *next_timing,
507                                    const struct tegra11_emc_table *last_timing,
508                                    u32 clk_setting)
509 {
510 #ifndef EMULATE_CLOCK_SWITCH
511         int i, dll_change, pre_wait;
512         bool dyn_sref_enabled, vref_cal_toggle, zcal_long;
513
514         u32 emc_cfg_reg = emc_readl(EMC_CFG);
515
516         dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
517         dll_change = get_dll_change(next_timing, last_timing);
518         zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
519                 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
520
521         /* FIXME: remove steps enumeration below? */
522
523         /* 1. clear clkchange_complete interrupts */
524         emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
525
526         /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
527            possible self-refresh entry/exit and/or dqs vref settled - waiting
528            before the clock change decreases worst case change stall time */
529         pre_wait = 0;
530         if (dyn_sref_enabled) {
531                 emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
532                 emc_writel(emc_cfg_reg, EMC_CFG);
533                 pre_wait = 5;           /* 5us+ for self-refresh entry/exit */
534         }
535
536         /* 2.5 check dq/dqs vref delay */
537         if (dqs_preset(next_timing, last_timing)) {
538                 if (pre_wait < 3)
539                         pre_wait = 3;   /* 3us+ for dqs vref settled */
540         }
541         if (pre_wait) {
542                 emc_timing_update();
543                 udelay(pre_wait);
544         }
545
546         /* 3. disable auto-cal if vref mode is switching */
547         vref_cal_toggle = (next_timing->emc_acal_interval != 0) &&
548                 ((next_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX] ^
549                   last_timing->burst_regs[EMC_XM2COMPPADCTRL_INDEX]) &
550                  EMC_XM2COMPPADCTRL_VREF_CAL_ENABLE);
551         if (vref_cal_toggle)
552                 auto_cal_disable();
553
554         /* 4. program burst shadow registers */
555         for (i = 0; i < next_timing->burst_regs_num; i++) {
556                 if (!burst_reg_addr[i])
557                         continue;
558                 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
559         }
560         for (i = 0; i < next_timing->emc_trimmers_num; i++) {
561                 __raw_writel(next_timing->emc_trimmers_0[i],
562                         (u32)emc0_base + emc_trimmer_offs[i]);
563                 __raw_writel(next_timing->emc_trimmers_1[i],
564                         (u32)emc1_base + emc_trimmer_offs[i]);
565         }
566         wmb();
567         barrier();
568
569         /* 4.1 On ddr3 when DLL is re-started predict MRS long wait count and
570            overwrite DFS table setting */
571         if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
572                 overwrite_mrs_wait_cnt(next_timing, zcal_long);
573
574         /* 5.2 disable auto-refresh to save time after clock change */
575         emc_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
576
577         /* 6. turn Off dll and enter self-refresh on DDR3 */
578         if (dram_type == DRAM_TYPE_DDR3) {
579                 if (dll_change == DLL_CHANGE_OFF)
580                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
581                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num) |
582                               EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
583         }
584
585         /* 7. flow control marker 2 */
586         ccfifo_writel(1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
587
588         /* 8. exit self-refresh on DDR3 */
589         if (dram_type == DRAM_TYPE_DDR3)
590                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
591
592         /* 9. set dram mode registers */
593         set_dram_mode(next_timing, last_timing, dll_change);
594
595         /* 10. issue zcal command if turning zcal On */
596         if (zcal_long) {
597                 ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
598                 if (dram_dev_num > 1)
599                         ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
600         }
601
602         /* 11.5 program burst_up_down registers if emc rate is going down */
603         if (next_timing->rate < last_timing->rate) {
604                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
605                         __raw_writel(next_timing->burst_up_down_regs[i],
606                                 burst_up_down_reg_addr[i]);
607                 wmb();
608         }
609
610         /* 12-14. read any MC register to ensure the programming is done
611            change EMC clock source register wait for clk change completion */
612         do_clock_change(clk_setting);
613
614         /* 14.1 re-enable auto-refresh */
615         emc_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
616
617         /* 14.2 program burst_up_down registers if emc rate is going up */
618         if (next_timing->rate > last_timing->rate) {
619                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
620                         __raw_writel(next_timing->burst_up_down_regs[i],
621                                 burst_up_down_reg_addr[i]);
622                 wmb();
623         }
624
625         /* 15. restore auto-cal */
626         if (vref_cal_toggle)
627                 emc_writel(next_timing->emc_acal_interval,
628                            EMC_AUTO_CAL_INTERVAL);
629
630         /* 16. restore dynamic self-refresh */
631         if (next_timing->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
632                 emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
633                 emc_writel(emc_cfg_reg, EMC_CFG);
634         }
635
636         /* 17. set zcal wait count */
637         if (zcal_long)
638                 emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
639
640         /* 18. update restored timing */
641         udelay(2);
642         emc_timing_update();
643 #else
644         /* FIXME: implement */
645         pr_info("tegra11_emc: Configuring EMC rate %lu (setting: 0x%x)\n",
646                 next_timing->rate, clk_setting);
647 #endif
648 }
649
650 static inline void emc_get_timing(struct tegra11_emc_table *timing)
651 {
652         int i;
653
654         /* burst and trimmers updates depends on previous state; burst_up_down
655            are stateless */
656         for (i = 0; i < timing->burst_regs_num; i++) {
657                 if (burst_reg_addr[i])
658                         timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
659                 else
660                         timing->burst_regs[i] = 0;
661         }
662         for (i = 0; i < timing->emc_trimmers_num; i++) {
663                 timing->emc_trimmers_0[i] =
664                         __raw_readl((u32)emc0_base + emc_trimmer_offs[i]);
665                 timing->emc_trimmers_1[i] =
666                         __raw_readl((u32)emc1_base + emc_trimmer_offs[i]);
667         }
668         timing->emc_acal_interval = 0;
669         timing->emc_zcal_cnt_long = 0;
670         timing->emc_mode_reset = 0;
671         timing->emc_mode_1 = 0;
672         timing->emc_mode_2 = 0;
673         timing->emc_mode_4 = 0;
674         timing->emc_cfg = emc_readl(EMC_CFG);
675         timing->rate = clk_get_rate_locked(emc);
676 }
677
678 /* The EMC registers have shadow registers. When the EMC clock is updated
679  * in the clock controller, the shadow registers are copied to the active
680  * registers, allowing glitchless memory bus frequency changes.
681  * This function updates the shadow registers for a new clock frequency,
682  * and relies on the clock lock on the emc clock to avoid races between
683  * multiple frequency changes. In addition access lock prevents concurrent
684  * access to EMC registers from reading MRR registers */
685 int tegra_emc_set_rate(unsigned long rate)
686 {
687         int i;
688         u32 clk_setting;
689         const struct tegra11_emc_table *last_timing;
690         unsigned long flags;
691         s64 last_change_delay;
692
693         if (!tegra_emc_table)
694                 return -EINVAL;
695
696         /* Table entries specify rate in kHz */
697         rate = rate / 1000;
698
699         for (i = 0; i < tegra_emc_table_size; i++) {
700                 if (tegra_emc_clk_sel[i].input == NULL)
701                         continue;       /* invalid entry */
702
703                 if (tegra_emc_table[i].rate == rate)
704                         break;
705         }
706
707         if (i >= tegra_emc_table_size)
708                 return -EINVAL;
709
710         if (!emc_timing) {
711                 /* can not assume that boot timing matches dfs table even
712                    if boot frequency matches one of the table nodes */
713                 emc_get_timing(&start_timing);
714                 last_timing = &start_timing;
715         }
716         else
717                 last_timing = emc_timing;
718
719         clk_setting = tegra_emc_clk_sel[i].value;
720
721         last_change_delay = ktime_us_delta(ktime_get(), clkchange_time);
722         if ((last_change_delay >= 0) && (last_change_delay < clkchange_delay))
723                 udelay(clkchange_delay - (int)last_change_delay);
724
725         spin_lock_irqsave(&emc_access_lock, flags);
726         emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
727         clkchange_time = ktime_get();
728         emc_timing = &tegra_emc_table[i];
729         spin_unlock_irqrestore(&emc_access_lock, flags);
730
731         emc_last_stats_update(i);
732
733         pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
734
735         return 0;
736 }
737
738 long tegra_emc_round_rate(unsigned long rate)
739 {
740         int i;
741
742         if (!tegra_emc_table)
743                 return clk_get_rate_locked(emc); /* no table - no rate change */
744
745         if (!emc_enable)
746                 return -EINVAL;
747
748         pr_debug("%s: %lu\n", __func__, rate);
749
750         /* Table entries specify rate in kHz */
751         rate = rate / 1000;
752
753         for (i = 0; i < tegra_emc_table_size; i++) {
754                 if (tegra_emc_clk_sel[i].input == NULL)
755                         continue;       /* invalid entry */
756
757                 if (tegra_emc_table[i].rate >= rate) {
758                         pr_debug("%s: using %lu\n",
759                                  __func__, tegra_emc_table[i].rate);
760                         return tegra_emc_table[i].rate * 1000;
761                 }
762         }
763
764         return -EINVAL;
765 }
766
767 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
768 {
769         int i;
770
771         if (!tegra_emc_table) {
772                 if (rate == clk_get_rate_locked(emc)) {
773                         *div_value = emc->div - 2;
774                         return emc->parent;
775                 }
776                 return NULL;
777         }
778
779         pr_debug("%s: %lu\n", __func__, rate);
780
781         /* Table entries specify rate in kHz */
782         rate = rate / 1000;
783
784         for (i = 0; i < tegra_emc_table_size; i++) {
785                 if (tegra_emc_table[i].rate == rate) {
786                         struct clk *p = tegra_emc_clk_sel[i].input;
787
788                         if (p && (tegra_emc_clk_sel[i].input_rate ==
789                                   clk_get_rate(p))) {
790                                 *div_value = (tegra_emc_clk_sel[i].value &
791                                         EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
792                                 return p;
793                         }
794                 }
795         }
796         return NULL;
797 }
798
799 bool tegra_emc_is_parent_ready(unsigned long rate, struct clk **parent,
800                 unsigned long *parent_rate, unsigned long *backup_rate)
801 {
802
803         int i;
804         struct clk *p = NULL;
805         unsigned long p_rate = 0;
806
807         if (!tegra_emc_table || !emc_enable)
808                 return true;
809
810         pr_debug("%s: %lu\n", __func__, rate);
811
812         /* Table entries specify rate in kHz */
813         rate = rate / 1000;
814
815         for (i = 0; i < tegra_emc_table_size; i++) {
816                 if (tegra_emc_table[i].rate == rate) {
817                         p = tegra_emc_clk_sel[i].input;
818                         if (!p)
819                                 continue;       /* invalid entry */
820
821                         p_rate = tegra_emc_clk_sel[i].input_rate;
822                         if (p_rate == clk_get_rate(p))
823                                 return true;
824                         break;
825                 }
826         }
827
828         /* Table match not found - "non existing parent" is ready */
829         if (!p)
830                 return true;
831
832         /*
833          * Table match found, but parent is not ready - continue search
834          * for backup rate: min rate above requested that has different
835          * parent source (since only pll_c is scaled and may not be ready,
836          * any other parent can provide backup)
837          */
838         *parent = p;
839         *parent_rate = p_rate;
840
841         for (i++; i < tegra_emc_table_size; i++) {
842                 p = tegra_emc_clk_sel[i].input;
843                 if (!p)
844                         continue;       /* invalid entry */
845
846                 if (p != (*parent)) {
847                         *backup_rate = tegra_emc_table[i].rate * 1000;
848                         return false;
849                 }
850         }
851
852         /* Parent is not ready, and no backup found */
853         *backup_rate = -EINVAL;
854         return false;
855 }
856
857 /* FIXME: take advantage of table->src_sel_reg */
858 static int find_matching_input(const struct tegra11_emc_table *table,
859                         struct clk *pll_c, struct emc_sel *emc_clk_sel)
860 {
861         u32 div_value = 0;
862         unsigned long input_rate = 0;
863         unsigned long table_rate = table->rate * 1000; /* table rate in kHz */
864         struct clk *src = tegra_get_clock_by_name(table->src_name);
865         const struct clk_mux_sel *sel;
866
867         for (sel = emc->inputs; sel->input != NULL; sel++) {
868                 if (sel->input != src)
869                         continue;
870                 /*
871                  * PLLC is a scalable source. For rates below PLL_C_DIRECT_FLOOR
872                  * configure PLLC at double rate and set 1:2 divider, otherwise
873                  * configure PLLC at target rate with divider 1:1.
874                  */
875                 if (src == pll_c) {
876 #ifdef CONFIG_TEGRA_DUAL_CBUS
877                         if (table_rate < PLL_C_DIRECT_FLOOR) {
878                                 input_rate = 2 * table_rate;
879                                 div_value = 2;
880                         } else {
881                                 input_rate = table_rate;
882                                 div_value = 0;
883                         }
884                         break;
885 #else
886                         continue;       /* pll_c is used for cbus - skip */
887 #endif
888                 }
889
890                 /*
891                  * All other clock sources are fixed rate sources, and must
892                  * run at rate that is an exact multiple of the target.
893                  */
894                 input_rate = clk_get_rate(src);
895
896                 if ((input_rate >= table_rate) &&
897                      (input_rate % table_rate == 0)) {
898                         div_value = 2 * input_rate / table_rate - 2;
899                         break;
900                 }
901         }
902
903         if (!sel->input || (sel->value > EMC_CLK_SOURCE_MAX_VALUE) ||
904             (div_value > EMC_CLK_DIV_MAX_VALUE)) {
905                 pr_warn("tegra: no matching input found for EMC rate %lu\n",
906                         table_rate);
907                 return -EINVAL;
908         }
909
910         emc_clk_sel->input = sel->input;
911         emc_clk_sel->input_rate = input_rate;
912
913         /* Get ready emc clock selection settings for this table rate */
914         emc_clk_sel->value = sel->value << EMC_CLK_SOURCE_SHIFT;
915         emc_clk_sel->value |= (div_value << EMC_CLK_DIV_SHIFT);
916         if ((div_value == 0) && (emc_clk_sel->input == emc->parent))
917                 emc_clk_sel->value |= EMC_CLK_LOW_JITTER_ENABLE;
918
919         if (MC_EMEM_ARB_MISC0_EMC_SAME_FREQ &
920             table->burst_regs[MC_EMEM_ARB_MISC0_INDEX])
921                 emc_clk_sel->value |= EMC_CLK_MC_SAME_FREQ;
922
923         return 0;
924 }
925
926 static void adjust_emc_dvfs_table(const struct tegra11_emc_table *table,
927                                   int table_size)
928 {
929         int i, j;
930         unsigned long rate;
931
932         for (i = 0; i < MAX_DVFS_FREQS; i++) {
933                 int mv = emc->dvfs->millivolts[i];
934                 if (!mv)
935                         break;
936
937                 /* For each dvfs voltage find maximum supported rate;
938                    use 1MHz placeholder if not found */
939                 for (rate = 1000, j = 0; j < table_size; j++) {
940                         if (tegra_emc_clk_sel[j].input == NULL)
941                                 continue;       /* invalid entry */
942
943                         if ((mv >= table[j].emc_min_mv) &&
944                             (rate < table[j].rate))
945                                 rate = table[j].rate;
946                 }
947                 /* Table entries specify rate in kHz */
948                 emc->dvfs->freqs[i] = rate * 1000;
949         }
950 }
951
952 static int init_emc_table(const struct tegra11_emc_table *table, int table_size)
953 {
954         int i, mv;
955         u32 reg;
956         bool max_entry = false;
957         unsigned long boot_rate, max_rate;
958         struct clk *pll_c = tegra_get_clock_by_name("pll_c");
959
960         emc_stats.clkchange_count = 0;
961         spin_lock_init(&emc_stats.spinlock);
962         emc_stats.last_update = get_jiffies_64();
963         emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
964
965         boot_rate = clk_get_rate(emc) / 1000;
966         max_rate = clk_get_max_rate(emc) / 1000;
967
968         if ((dram_type != DRAM_TYPE_DDR3) && (dram_type != DRAM_TYPE_LPDDR2)) {
969                 pr_err("tegra: not supported DRAM type %u\n", dram_type);
970                 return -ENODATA;
971         }
972
973         if (emc->parent != tegra_get_clock_by_name("pll_m")) {
974                 pr_err("tegra: boot parent %s is not supported by EMC DFS\n",
975                         emc->parent->name);
976                 return -ENODATA;
977         }
978
979         if (!table || !table_size) {
980                 pr_err("tegra: EMC DFS table is empty\n");
981                 return -ENODATA;
982         }
983
984         tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
985         switch (table[0].rev) {
986         case 0x40:
987                 start_timing.burst_regs_num = table[0].burst_regs_num;
988                 start_timing.emc_trimmers_num = table[0].emc_trimmers_num;
989                 break;
990         default:
991                 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
992                         table[0].rev);
993                 return -ENODATA;
994         }
995
996         /* Match EMC source/divider settings with table entries */
997         for (i = 0; i < tegra_emc_table_size; i++) {
998                 unsigned long table_rate = table[i].rate;
999
1000                 /* Skip "no-rate" entry, or entry violating ascending order */
1001                 if (!table_rate ||
1002                     (i && (table_rate <= table[i-1].rate)))
1003                         continue;
1004
1005                 BUG_ON(table[i].rev != table[0].rev);
1006
1007                 if (find_matching_input(&table[i], pll_c,
1008                                         &tegra_emc_clk_sel[i]))
1009                         continue;
1010
1011                 if (table_rate == boot_rate)
1012                         emc_stats.last_sel = i;
1013
1014                 if (table_rate == max_rate)
1015                         max_entry = true;
1016         }
1017
1018         /* Validate EMC rate and voltage limits */
1019         if (!max_entry) {
1020                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
1021                        " %lu kHz is not found\n", max_rate);
1022                 return -ENODATA;
1023         }
1024
1025         tegra_emc_table = table;
1026
1027         if (emc->dvfs) {
1028                 adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
1029                 mv = tegra_dvfs_predict_millivolts(emc, max_rate * 1000);
1030                 if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
1031                         tegra_emc_table = NULL;
1032                         pr_err("tegra: invalid EMC DFS table: maximum rate %lu"
1033                                " kHz does not match nominal voltage %d\n",
1034                                max_rate, emc->dvfs->max_millivolts);
1035                         return -ENODATA;
1036                 }
1037         }
1038
1039         pr_info("tegra: validated EMC DFS table\n");
1040
1041         /* Configure clock change mode according to dram type */
1042         reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
1043         reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
1044                 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
1045         emc_writel(reg, EMC_CFG_2);
1046         return 0;
1047 }
1048
1049 static int __devinit tegra11_emc_probe(struct platform_device *pdev)
1050 {
1051         struct tegra11_emc_pdata *pdata;
1052         struct resource *res;
1053
1054         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1055         if (!res) {
1056                 dev_err(&pdev->dev, "missing register base\n");
1057                 return -ENOMEM;
1058         }
1059
1060         pdata = pdev->dev.platform_data;
1061         if (!pdata) {
1062                 dev_err(&pdev->dev, "missing platform data\n");
1063                 return -ENODATA;
1064         }
1065
1066         return init_emc_table(pdata->tables, pdata->num_tables);
1067 }
1068
1069 static struct platform_driver tegra11_emc_driver = {
1070         .driver         = {
1071                 .name   = "tegra-emc",
1072                 .owner  = THIS_MODULE,
1073         },
1074         .probe          = tegra11_emc_probe,
1075 };
1076
1077 int __init tegra11_emc_init(void)
1078 {
1079         return platform_driver_register(&tegra11_emc_driver);
1080 }
1081
1082 void tegra_emc_timing_invalidate(void)
1083 {
1084         emc_timing = NULL;
1085 }
1086
1087 void tegra_emc_dram_type_init(struct clk *c)
1088 {
1089         emc = c;
1090
1091         dram_type = (emc_readl(EMC_FBIO_CFG5) &
1092                      EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
1093
1094         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
1095 }
1096
1097 int tegra_emc_get_dram_type(void)
1098 {
1099         return dram_type;
1100 }
1101
1102 static u32 soc_to_dram_bit_swap(u32 soc_val, u32 dram_mask, u32 dram_shift)
1103 {
1104         int bit;
1105         u32 dram_val = 0;
1106
1107         /* tegra clocks definitions use shifted mask always */
1108         if (!dram_to_soc_bit_map)
1109                 return soc_val & dram_mask;
1110
1111         for (bit = dram_shift; bit < 32; bit++) {
1112                 u32 dram_bit_mask = 0x1 << bit;
1113                 u32 soc_bit_mask = dram_to_soc_bit_map[bit];
1114
1115                 if (!(dram_bit_mask & dram_mask))
1116                         break;
1117
1118                 if (soc_bit_mask & soc_val)
1119                         dram_val |= dram_bit_mask;
1120         }
1121
1122         return dram_val;
1123 }
1124
1125 static int emc_read_mrr(int dev, int addr)
1126 {
1127         int ret;
1128         u32 val;
1129
1130         if (dram_type != DRAM_TYPE_LPDDR2)
1131                 return -ENODEV;
1132
1133         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, false);
1134         if (ret)
1135                 return ret;
1136
1137         val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
1138         val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
1139         emc_writel(val, EMC_MRR);
1140
1141         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, true);
1142         if (ret)
1143                 return ret;
1144
1145         val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
1146         return val;
1147 }
1148
1149 int tegra_emc_get_dram_temperature(void)
1150 {
1151         int mr4;
1152         unsigned long flags;
1153
1154         spin_lock_irqsave(&emc_access_lock, flags);
1155
1156         mr4 = emc_read_mrr(0, 4);
1157         if (IS_ERR_VALUE(mr4)) {
1158                 spin_unlock_irqrestore(&emc_access_lock, flags);
1159                 return mr4;
1160         }
1161         spin_unlock_irqrestore(&emc_access_lock, flags);
1162
1163         mr4 = soc_to_dram_bit_swap(
1164                 mr4, LPDDR2_MR4_TEMP_MASK, LPDDR2_MR4_TEMP_SHIFT);
1165         return mr4;
1166 }
1167
1168 #ifdef CONFIG_DEBUG_FS
1169
1170 static struct dentry *emc_debugfs_root;
1171
1172 static int emc_stats_show(struct seq_file *s, void *data)
1173 {
1174         int i;
1175
1176         emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
1177
1178         seq_printf(s, "%-10s %-10s \n", "rate kHz", "time");
1179         for (i = 0; i < tegra_emc_table_size; i++) {
1180                 if (tegra_emc_clk_sel[i].input == NULL)
1181                         continue;       /* invalid entry */
1182
1183                 seq_printf(s, "%-10lu %-10llu \n", tegra_emc_table[i].rate,
1184                            cputime64_to_clock_t(emc_stats.time_at_clock[i]));
1185         }
1186         seq_printf(s, "%-15s %llu\n", "transitions:",
1187                    emc_stats.clkchange_count);
1188         seq_printf(s, "%-15s %llu\n", "time-stamp:",
1189                    cputime64_to_clock_t(emc_stats.last_update));
1190
1191         return 0;
1192 }
1193
1194 static int emc_stats_open(struct inode *inode, struct file *file)
1195 {
1196         return single_open(file, emc_stats_show, inode->i_private);
1197 }
1198
1199 static const struct file_operations emc_stats_fops = {
1200         .open           = emc_stats_open,
1201         .read           = seq_read,
1202         .llseek         = seq_lseek,
1203         .release        = single_release,
1204 };
1205
1206 static int dram_temperature_get(void *data, u64 *val)
1207 {
1208         *val = tegra_emc_get_dram_temperature();
1209         return 0;
1210 }
1211 DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
1212                         NULL, "%lld\n");
1213
1214 static int efficiency_get(void *data, u64 *val)
1215 {
1216         *val = tegra_emc_bw_efficiency;
1217         return 0;
1218 }
1219 static int efficiency_set(void *data, u64 val)
1220 {
1221         tegra_emc_bw_efficiency = (val > 100) ? 100 : val;
1222         if (emc)
1223                 tegra_clk_shared_bus_update(emc);
1224
1225         return 0;
1226 }
1227 DEFINE_SIMPLE_ATTRIBUTE(efficiency_fops, efficiency_get,
1228                         efficiency_set, "%llu\n");
1229
1230 static int __init tegra_emc_debug_init(void)
1231 {
1232         if (!tegra_emc_table)
1233                 return 0;
1234
1235         emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
1236         if (!emc_debugfs_root)
1237                 return -ENOMEM;
1238
1239         if (!debugfs_create_file(
1240                 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
1241                 goto err_out;
1242
1243         if (!debugfs_create_u32("clkchange_delay", S_IRUGO | S_IWUSR,
1244                 emc_debugfs_root, (u32 *)&clkchange_delay))
1245                 goto err_out;
1246
1247         if (!debugfs_create_file("dram_temperature", S_IRUGO, emc_debugfs_root,
1248                                  NULL, &dram_temperature_fops))
1249                 goto err_out;
1250
1251         if (!debugfs_create_file("efficiency", S_IRUGO | S_IWUSR,
1252                                  emc_debugfs_root, NULL, &efficiency_fops))
1253                 goto err_out;
1254
1255         return 0;
1256
1257 err_out:
1258         debugfs_remove_recursive(emc_debugfs_root);
1259         return -ENOMEM;
1260 }
1261
1262 late_initcall(tegra_emc_debug_init);
1263 #endif