9926b01d5fca96c3331529bd4b6c1d52992d44a2
[linux-2.6.git] / arch / arm / mach-tegra / tegra11_emc.c
1 /*
2  * arch/arm/mach-tegra/tegra11_emc.c
3  *
4  * Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
18  *
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/clk.h>
23 #include <linux/err.h>
24 #include <linux/io.h>
25 #include <linux/module.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/platform_data/tegra_emc.h>
29 #include <linux/debugfs.h>
30 #include <linux/seq_file.h>
31 #include <linux/hrtimer.h>
32
33 #include <asm/cputime.h>
34
35 #include <mach/iomap.h>
36
37 #include "clock.h"
38 #include "dvfs.h"
39 #include "board.h"
40 #include "tegra11_emc.h"
41
42 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
43 static bool emc_enable = true;
44 #else
45 static bool emc_enable;
46 #endif
47 module_param(emc_enable, bool, 0644);
48
49 u8 tegra_emc_bw_efficiency = 100;
50
51 #define PLL_C_DIRECT_FLOOR              333500000
52 #define EMC_STATUS_UPDATE_TIMEOUT       100
53 #define TEGRA_EMC_TABLE_MAX_SIZE        16
54
55 enum {
56         DLL_CHANGE_NONE = 0,
57         DLL_CHANGE_ON,
58         DLL_CHANGE_OFF,
59 };
60
61 #define EMC_CLK_DIV_SHIFT               0
62 #define EMC_CLK_DIV_MASK                (0xFF << EMC_CLK_DIV_SHIFT)
63 #define EMC_CLK_SOURCE_SHIFT            29
64 #define EMC_CLK_SOURCE_MASK             (0x7 << EMC_CLK_SOURCE_SHIFT)
65 #define EMC_CLK_LOW_JITTER_ENABLE       (0x1 << 31)
66 #define EMC_CLK_MC_SAME_FREQ            (0x1 << 16)
67
68 /* FIXME: actual Tegar11 list */
69 #define BURST_REG_LIST \
70         DEFINE_REG(TEGRA_EMC_BASE, EMC_RC),                     \
71         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC),                    \
72         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC_SLR),                \
73         DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS),                    \
74         DEFINE_REG(TEGRA_EMC_BASE, EMC_RP),                     \
75         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W),                    \
76         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R),                    \
77         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P),                    \
78         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P),                    \
79         DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD),                 \
80         DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD),                 \
81         DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD),                    \
82         DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT),                   \
83         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT),                   \
84         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV),                    \
85         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV_MASK),               \
86         DEFINE_REG(TEGRA_EMC_BASE, EMC_IBDLY),                  \
87         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_EXTRA),           \
88         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_2),             \
89         DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST),                   \
90         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV_MASK),               \
91         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH),                \
92         DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM),      \
93         DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT),    \
94         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR),                \
95         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD),                \
96         DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN),              \
97         DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN),               \
98         DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN),                \
99         DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN),                \
100         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR),                   \
101         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL),                \
102         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE),                   \
103         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKESR),                 \
104         DEFINE_REG(TEGRA_EMC_BASE, EMC_TPD),                    \
105         DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW),                   \
106         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB),                  \
107         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE),             \
108         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP),               \
109         DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW),                 \
110         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_EXTRA),             \
111         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE),              \
112         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ),               \
113         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5),              \
114         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL),            \
115         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD),     \
116         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4),         \
117         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5),         \
118         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6),         \
119         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7),         \
120         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4),        \
121         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5),        \
122         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6),        \
123         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7),        \
124         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4),        \
125         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5),        \
126         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6),        \
127         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7),        \
128         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL),          \
129         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL4),         \
130         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2),         \
131         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2),          \
132         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL),          \
133         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL),         \
134         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL),       \
135         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2),      \
136         DEFINE_REG(TEGRA_EMC_BASE, EMC_DSR_VTTGEN_DRV),         \
137         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXDSRVTTGEN),            \
138         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE),             \
139         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_TERM_CTRL),          \
140         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL),          \
141         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT),          \
142         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT),           \
143         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT2),          \
144         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG2),       \
145         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG3),       \
146         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT),                    \
147         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION),           \
148         DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL),   \
149         DEFINE_REG(TEGRA_EMC_BASE, EMC_CA_TRAINING_TIMING_CNTL1),       \
150         DEFINE_REG(TEGRA_EMC_BASE, EMC_CA_TRAINING_TIMING_CNTL2),       \
151                                                                         \
152         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG),             \
153         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
154         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD),      \
155         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP),       \
156         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC),       \
157         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS),      \
158         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW),      \
159         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD),      \
160         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE),  \
161         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE),  \
162         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R),      \
163         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W),      \
164         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W),      \
165         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R),      \
166         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS),        \
167         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS),       \
168         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0),           \
169         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE),  \
170         DEFINE_REG(TEGRA_EMC_BASE, EMC_SEL_DPD_CTRL),
171
172 #define BURST_UP_DOWN_REG_LIST \
173         DEFINE_REG(TEGRA_MC_BASE, MC_PTSA_GRANT_DECREMENT),     \
174         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_G2_0),   \
175         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_G2_1),   \
176         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_0),   \
177         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV2_0),  \
178         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_2),   \
179         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_1),   \
180         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV2_1),  \
181         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_3),   \
182         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_EPP_0),  \
183         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_EPP_1),
184
185 #define EMC_TRIMMERS_REG_LIST \
186         DEFINE_REG(0, EMC_CDB_CNTL_1),                          \
187         DEFINE_REG(0, EMC_FBIO_CFG6),                           \
188         DEFINE_REG(0, EMC_QUSE),                                \
189         DEFINE_REG(0, EMC_EINPUT),                              \
190         DEFINE_REG(0, EMC_EINPUT_DURATION),                     \
191         DEFINE_REG(0, EMC_DLL_XFORM_DQS0),                      \
192         DEFINE_REG(0, EMC_QSAFE),                               \
193         DEFINE_REG(0, EMC_DLL_XFORM_QUSE0),                     \
194         DEFINE_REG(0, EMC_RDV),                                 \
195         DEFINE_REG(0, EMC_XM2DQSPADCTRL4),                      \
196         DEFINE_REG(0, EMC_XM2DQSPADCTRL3),                      \
197         DEFINE_REG(0, EMC_DLL_XFORM_DQ0),                       \
198         DEFINE_REG(0, EMC_AUTO_CAL_CONFIG),                     \
199         DEFINE_REG(0, EMC_DLL_XFORM_ADDR0),                     \
200         DEFINE_REG(0, EMC_XM2CLKPADCTRL2),                      \
201         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS0),                     \
202         DEFINE_REG(0, EMC_DLL_XFORM_ADDR1),                     \
203         DEFINE_REG(0, EMC_DLL_XFORM_ADDR2),                     \
204         DEFINE_REG(0, EMC_DLL_XFORM_DQS1),                      \
205         DEFINE_REG(0, EMC_DLL_XFORM_DQS2),                      \
206         DEFINE_REG(0, EMC_DLL_XFORM_DQS3),                      \
207         DEFINE_REG(0, EMC_DLL_XFORM_DQ1),                       \
208         DEFINE_REG(0, EMC_DLL_XFORM_DQ2),                       \
209         DEFINE_REG(0, EMC_DLL_XFORM_DQ3),                       \
210         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS1),                     \
211         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS2),                     \
212         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS3),                     \
213         DEFINE_REG(0, EMC_DLL_XFORM_QUSE1),                     \
214         DEFINE_REG(0, EMC_DLL_XFORM_QUSE2),                     \
215         DEFINE_REG(0, EMC_DLL_XFORM_QUSE3),
216
217
218 #define DEFINE_REG(base, reg) ((base) ? (IO_ADDRESS((base)) + (reg)) : 0)
219 static const void __iomem *burst_reg_addr[TEGRA11_EMC_MAX_NUM_REGS] = {
220         BURST_REG_LIST
221 };
222 #ifndef EMULATE_CLOCK_SWITCH
223 static const void __iomem *burst_up_down_reg_addr[TEGRA11_EMC_MAX_NUM_REGS] = {
224         BURST_UP_DOWN_REG_LIST
225 };
226 #endif
227 #undef DEFINE_REG
228
229
230 #define DEFINE_REG(base, reg) (reg)
231 #ifndef EMULATE_CLOCK_SWITCH
232 static const u32 emc_trimmer_offs[TEGRA11_EMC_MAX_NUM_REGS] = {
233         EMC_TRIMMERS_REG_LIST
234 };
235 #endif
236 #undef DEFINE_REG
237
238
239 #define DEFINE_REG(base, reg)   reg##_INDEX
240 enum {
241         BURST_REG_LIST
242 };
243 #undef DEFINE_REG
244
245 #define DEFINE_REG(base, reg)   reg##_TRIM_INDEX
246 enum {
247         EMC_TRIMMERS_REG_LIST
248 };
249 #undef DEFINE_REG
250
251
252 struct emc_sel {
253         struct clk      *input;
254         u32             value;
255         unsigned long   input_rate;
256 };
257 static struct emc_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
258 static struct tegra11_emc_table start_timing;
259 static const struct tegra11_emc_table *emc_timing;
260
261 static ktime_t clkchange_time;
262 static int clkchange_delay = 100;
263
264 static const u32 *dram_to_soc_bit_map;
265 static const struct tegra11_emc_table *tegra_emc_table;
266 static int tegra_emc_table_size;
267
268 static u32 dram_dev_num;
269 static u32 dram_type = -1;
270
271 static struct clk *emc;
272
273 static struct {
274         cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
275         int last_sel;
276         u64 last_update;
277         u64 clkchange_count;
278         spinlock_t spinlock;
279 } emc_stats;
280
281 static DEFINE_SPINLOCK(emc_access_lock);
282
283 static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
284 static void __iomem *emc0_base = IO_ADDRESS(TEGRA_EMC0_BASE);
285 static void __iomem *emc1_base = IO_ADDRESS(TEGRA_EMC1_BASE);
286 static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
287 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
288
289 static inline void emc_writel(u32 val, unsigned long addr)
290 {
291         writel(val, (u32)emc_base + addr);
292 }
293 static inline void emc0_writel(u32 val, unsigned long addr)
294 {
295         writel(val, (u32)emc0_base + addr);
296 }
297 static inline void emc1_writel(u32 val, unsigned long addr)
298 {
299         writel(val, (u32)emc1_base + addr);
300 }
301 static inline u32 emc_readl(unsigned long addr)
302 {
303         return readl((u32)emc_base + addr);
304 }
305 static inline void mc_writel(u32 val, unsigned long addr)
306 {
307         writel(val, (u32)mc_base + addr);
308 }
309 static inline u32 mc_readl(unsigned long addr)
310 {
311         return readl((u32)mc_base + addr);
312 }
313
314 static inline void ccfifo_writel(u32 val, unsigned long addr)
315 {
316         writel(val, (u32)emc_base + EMC_CCFIFO_DATA);
317         writel(addr, (u32)emc_base + EMC_CCFIFO_ADDR);
318 }
319
320 static int last_round_idx;
321 static inline int get_start_idx(unsigned long rate)
322 {
323         if (tegra_emc_table[last_round_idx].rate == rate)
324                 return last_round_idx;
325         return 0;
326 }
327
328 static void emc_last_stats_update(int last_sel)
329 {
330         unsigned long flags;
331         u64 cur_jiffies = get_jiffies_64();
332
333         spin_lock_irqsave(&emc_stats.spinlock, flags);
334
335         if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
336                 emc_stats.time_at_clock[emc_stats.last_sel] =
337                         emc_stats.time_at_clock[emc_stats.last_sel] +
338                         (cur_jiffies - emc_stats.last_update);
339
340         emc_stats.last_update = cur_jiffies;
341
342         if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
343                 emc_stats.clkchange_count++;
344                 emc_stats.last_sel = last_sel;
345         }
346         spin_unlock_irqrestore(&emc_stats.spinlock, flags);
347 }
348
349 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
350 {
351         int i;
352         for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
353                 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
354                         return 0;
355                 udelay(1);
356         }
357         return -ETIMEDOUT;
358 }
359
360 static inline void emc_timing_update(void)
361 {
362         int err;
363
364         emc_writel(0x1, EMC_TIMING_CONTROL);
365         err = wait_for_update(EMC_STATUS,
366                               EMC_STATUS_TIMING_UPDATE_STALLED, false);
367         if (err) {
368                 pr_err("%s: timing update error: %d", __func__, err);
369                 BUG();
370         }
371 }
372
373 static inline void auto_cal_disable(void)
374 {
375         int err;
376
377         emc_writel(0, EMC_AUTO_CAL_INTERVAL);
378         err = wait_for_update(EMC_AUTO_CAL_STATUS,
379                               EMC_AUTO_CAL_STATUS_ACTIVE, false);
380         if (err) {
381                 pr_err("%s: disable auto-cal error: %d", __func__, err);
382                 BUG();
383         }
384 }
385
386 static inline bool dqs_preset(const struct tegra11_emc_table *next_timing,
387                               const struct tegra11_emc_table *last_timing)
388 {
389         bool ret = false;
390
391 #define DQS_SET(reg, bit)                                                     \
392         do {                                                                  \
393                 if ((next_timing->burst_regs[EMC_##reg##_INDEX] &             \
394                      EMC_##reg##_##bit##_ENABLE) &&                           \
395                     (!(last_timing->burst_regs[EMC_##reg##_INDEX] &           \
396                        EMC_##reg##_##bit##_ENABLE)))   {                      \
397                         emc_writel(last_timing->burst_regs[EMC_##reg##_INDEX] \
398                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg);  \
399                         ret = true;                                           \
400                 }                                                             \
401         } while (0)
402
403
404 #define DQS_SET_TRIM(reg, bit, ch)                                             \
405         do {                                                                   \
406                 if ((next_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX]    \
407                      & EMC_##reg##_##bit##_ENABLE) &&                          \
408                     (!(last_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX]  \
409                        & EMC_##reg##_##bit##_ENABLE)))   {                     \
410                         emc##ch##_writel(last_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX] \
411                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg);   \
412                         ret = true;                                            \
413                 }                                                              \
414         } while (0)
415
416         DQS_SET(XM2DQSPADCTRL2, VREF);
417
418         return ret;
419 }
420
421 static inline void overwrite_mrs_wait_cnt(
422         const struct tegra11_emc_table *next_timing,
423         bool zcal_long)
424 {
425         u32 reg;
426         u32 cnt = 512;
427
428         /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
429            for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
430            expected operation length. Reduce the latter by the overlapping
431            zq-calibration, if any */
432         if (zcal_long)
433                 cnt -= dram_dev_num * 256;
434
435         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
436                 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
437                 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
438         if (cnt < reg)
439                 cnt = reg;
440
441         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
442                 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
443         reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
444                 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
445
446         emc_writel(reg, EMC_MRS_WAIT_CNT);
447 }
448
449 static inline int get_dll_change(const struct tegra11_emc_table *next_timing,
450                                  const struct tegra11_emc_table *last_timing)
451 {
452         bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
453         bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
454
455         if (next_dll_enabled == last_dll_enabled)
456                 return DLL_CHANGE_NONE;
457         else if (next_dll_enabled)
458                 return DLL_CHANGE_ON;
459         else
460                 return DLL_CHANGE_OFF;
461 }
462
463 static inline void set_dram_mode(const struct tegra11_emc_table *next_timing,
464                                  const struct tegra11_emc_table *last_timing,
465                                  int dll_change)
466 {
467         if (dram_type == DRAM_TYPE_DDR3) {
468                 /* first mode_1, then mode_2, then mode_reset*/
469                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
470                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
471                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
472                         ccfifo_writel(next_timing->emc_mode_2, EMC_EMRS2);
473
474                 if ((next_timing->emc_mode_reset !=
475                      last_timing->emc_mode_reset) ||
476                     (dll_change == DLL_CHANGE_ON)) {
477                         u32 reg = next_timing->emc_mode_reset &
478                                 (~EMC_MODE_SET_DLL_RESET);
479                         if (dll_change == DLL_CHANGE_ON) {
480                                 reg |= EMC_MODE_SET_DLL_RESET;
481                                 reg |= EMC_MODE_SET_LONG_CNT;
482                         }
483                         ccfifo_writel(reg, EMC_MRS);
484                 }
485         } else {
486                 /* first mode_2, then mode_1; mode_reset is not applicable */
487                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
488                         ccfifo_writel(next_timing->emc_mode_2, EMC_MRW2);
489                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
490                         ccfifo_writel(next_timing->emc_mode_1, EMC_MRW);
491                 if (next_timing->emc_mode_4 != last_timing->emc_mode_4)
492                         ccfifo_writel(next_timing->emc_mode_4, EMC_MRW4);
493         }
494 }
495
496 static inline void do_clock_change(u32 clk_setting)
497 {
498         int err;
499
500         mc_readl(MC_EMEM_ADR_CFG);      /* completes prev writes */
501         writel(clk_setting, (u32)clk_base + emc->reg);
502         readl((u32)clk_base + emc->reg);/* completes prev write */
503
504         err = wait_for_update(EMC_INTSTATUS,
505                               EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
506         if (err) {
507                 pr_err("%s: clock change completion error: %d", __func__, err);
508                 BUG();
509         }
510 }
511
512 static noinline void emc_set_clock(const struct tegra11_emc_table *next_timing,
513                                    const struct tegra11_emc_table *last_timing,
514                                    u32 clk_setting)
515 {
516 #ifndef EMULATE_CLOCK_SWITCH
517         int i, dll_change, pre_wait;
518         bool dyn_sref_enabled, zcal_long;
519
520         u32 emc_cfg_reg = emc_readl(EMC_CFG);
521
522         dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
523         dll_change = get_dll_change(next_timing, last_timing);
524         zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
525                 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
526
527         /* FIXME: remove steps enumeration below? */
528
529         /* 1. clear clkchange_complete interrupts */
530         emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
531
532         /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
533            possible self-refresh entry/exit and/or dqs vref settled - waiting
534            before the clock change decreases worst case change stall time */
535         pre_wait = 0;
536         if (dyn_sref_enabled) {
537                 emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
538                 emc_writel(emc_cfg_reg, EMC_CFG);
539                 pre_wait = 5;           /* 5us+ for self-refresh entry/exit */
540         }
541
542         /* 2.5 check dq/dqs vref delay */
543         if (dqs_preset(next_timing, last_timing)) {
544                 if (pre_wait < 3)
545                         pre_wait = 3;   /* 3us+ for dqs vref settled */
546         }
547         if (pre_wait) {
548                 emc_timing_update();
549                 udelay(pre_wait);
550         }
551
552         /* 3. disable auto-cal if vref mode is switching - removed */
553
554         /* 4. program burst shadow registers */
555         for (i = 0; i < next_timing->burst_regs_num; i++) {
556                 if (!burst_reg_addr[i])
557                         continue;
558                 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
559         }
560         for (i = 0; i < next_timing->emc_trimmers_num; i++) {
561                 __raw_writel(next_timing->emc_trimmers_0[i],
562                         (u32)emc0_base + emc_trimmer_offs[i]);
563                 __raw_writel(next_timing->emc_trimmers_1[i],
564                         (u32)emc1_base + emc_trimmer_offs[i]);
565         }
566         emc_cfg_reg &= ~EMC_CFG_UPDATE_MASK;
567         emc_cfg_reg |= next_timing->emc_cfg & EMC_CFG_UPDATE_MASK;
568         emc_writel(emc_cfg_reg, EMC_CFG);
569         wmb();
570         barrier();
571
572         /* 4.1 On ddr3 when DLL is re-started predict MRS long wait count and
573            overwrite DFS table setting */
574         if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
575                 overwrite_mrs_wait_cnt(next_timing, zcal_long);
576
577         /* 5.2 disable auto-refresh to save time after clock change */
578         emc_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
579
580         /* 6. turn Off dll and enter self-refresh on DDR3 */
581         if (dram_type == DRAM_TYPE_DDR3) {
582                 if (dll_change == DLL_CHANGE_OFF)
583                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
584                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num) |
585                               EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
586         }
587
588         /* 7. flow control marker 2 */
589         ccfifo_writel(1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
590
591         /* 8. exit self-refresh on DDR3 */
592         if (dram_type == DRAM_TYPE_DDR3)
593                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
594
595         /* 9. set dram mode registers */
596         set_dram_mode(next_timing, last_timing, dll_change);
597
598         /* 10. issue zcal command if turning zcal On */
599         if (zcal_long) {
600                 ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
601                 if (dram_dev_num > 1)
602                         ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
603         }
604
605         /* 10.1 dummy write to RO register to remove stall after change */
606         ccfifo_writel(0, EMC_CCFIFO_STATUS);
607
608         /* 11.5 program burst_up_down registers if emc rate is going down */
609         if (next_timing->rate < last_timing->rate) {
610                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
611                         __raw_writel(next_timing->burst_up_down_regs[i],
612                                 burst_up_down_reg_addr[i]);
613                 wmb();
614         }
615
616         /* 12-14. read any MC register to ensure the programming is done
617            change EMC clock source register wait for clk change completion */
618         do_clock_change(clk_setting);
619
620         /* 14.1 re-enable auto-refresh */
621         emc_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
622
623         /* 14.2 program burst_up_down registers if emc rate is going up */
624         if (next_timing->rate > last_timing->rate) {
625                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
626                         __raw_writel(next_timing->burst_up_down_regs[i],
627                                 burst_up_down_reg_addr[i]);
628                 wmb();
629         }
630
631         /* 15. restore auto-cal - removed */
632
633         /* 16. restore dynamic self-refresh */
634         if (next_timing->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
635                 emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
636                 emc_writel(emc_cfg_reg, EMC_CFG);
637         }
638
639         /* 17. set zcal wait count */
640         if (zcal_long)
641                 emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
642
643         /* 18. update restored timing */
644         udelay(2);
645         emc_timing_update();
646 #else
647         /* FIXME: implement */
648         pr_info("tegra11_emc: Configuring EMC rate %lu (setting: 0x%x)\n",
649                 next_timing->rate, clk_setting);
650 #endif
651 }
652
653 static inline void emc_get_timing(struct tegra11_emc_table *timing)
654 {
655         int i;
656
657         /* burst and trimmers updates depends on previous state; burst_up_down
658            are stateless */
659         for (i = 0; i < timing->burst_regs_num; i++) {
660                 if (burst_reg_addr[i])
661                         timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
662                 else
663                         timing->burst_regs[i] = 0;
664         }
665         for (i = 0; i < timing->emc_trimmers_num; i++) {
666                 timing->emc_trimmers_0[i] =
667                         __raw_readl((u32)emc0_base + emc_trimmer_offs[i]);
668                 timing->emc_trimmers_1[i] =
669                         __raw_readl((u32)emc1_base + emc_trimmer_offs[i]);
670         }
671         timing->emc_acal_interval = 0;
672         timing->emc_zcal_cnt_long = 0;
673         timing->emc_mode_reset = 0;
674         timing->emc_mode_1 = 0;
675         timing->emc_mode_2 = 0;
676         timing->emc_mode_4 = 0;
677         timing->emc_cfg = emc_readl(EMC_CFG);
678         timing->rate = clk_get_rate_locked(emc) / 1000;
679 }
680
681 /* The EMC registers have shadow registers. When the EMC clock is updated
682  * in the clock controller, the shadow registers are copied to the active
683  * registers, allowing glitchless memory bus frequency changes.
684  * This function updates the shadow registers for a new clock frequency,
685  * and relies on the clock lock on the emc clock to avoid races between
686  * multiple frequency changes. In addition access lock prevents concurrent
687  * access to EMC registers from reading MRR registers */
688 int tegra_emc_set_rate(unsigned long rate)
689 {
690         int i;
691         u32 clk_setting;
692         const struct tegra11_emc_table *last_timing;
693         unsigned long flags;
694         s64 last_change_delay;
695
696         if (!tegra_emc_table)
697                 return -EINVAL;
698
699         /* Table entries specify rate in kHz */
700         rate = rate / 1000;
701
702         i = get_start_idx(rate);
703         for (; i < tegra_emc_table_size; i++) {
704                 if (tegra_emc_clk_sel[i].input == NULL)
705                         continue;       /* invalid entry */
706
707                 if (tegra_emc_table[i].rate == rate)
708                         break;
709         }
710
711         if (i >= tegra_emc_table_size)
712                 return -EINVAL;
713
714         if (!emc_timing) {
715                 /* can not assume that boot timing matches dfs table even
716                    if boot frequency matches one of the table nodes */
717                 emc_get_timing(&start_timing);
718                 last_timing = &start_timing;
719         }
720         else
721                 last_timing = emc_timing;
722
723         clk_setting = tegra_emc_clk_sel[i].value;
724
725         last_change_delay = ktime_us_delta(ktime_get(), clkchange_time);
726         if ((last_change_delay >= 0) && (last_change_delay < clkchange_delay))
727                 udelay(clkchange_delay - (int)last_change_delay);
728
729         spin_lock_irqsave(&emc_access_lock, flags);
730         emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
731         clkchange_time = ktime_get();
732         emc_timing = &tegra_emc_table[i];
733         spin_unlock_irqrestore(&emc_access_lock, flags);
734
735         emc_last_stats_update(i);
736
737         pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
738
739         return 0;
740 }
741
742 long tegra_emc_round_rate_updown(unsigned long rate, bool up)
743 {
744         int i;
745         unsigned long table_rate;
746
747         if (!tegra_emc_table)
748                 return clk_get_rate_locked(emc); /* no table - no rate change */
749
750         if (!emc_enable)
751                 return -EINVAL;
752
753         pr_debug("%s: %lu\n", __func__, rate);
754
755         /* Table entries specify rate in kHz */
756         rate = rate / 1000;
757
758         i = get_start_idx(rate);
759         for (; i < tegra_emc_table_size; i++) {
760                 if (tegra_emc_clk_sel[i].input == NULL)
761                         continue;       /* invalid entry */
762
763                 table_rate = tegra_emc_table[i].rate;
764                 if (table_rate >= rate) {
765                         if (!up && i && (table_rate > rate)) {
766                                 i--;
767                                 table_rate = tegra_emc_table[i].rate;
768                         }
769                         pr_debug("%s: using %lu\n", __func__, table_rate);
770                         last_round_idx = i;
771                         return table_rate * 1000;
772                 }
773         }
774
775         return -EINVAL;
776 }
777
778 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
779 {
780         int i;
781
782         if (!tegra_emc_table) {
783                 if (rate == clk_get_rate_locked(emc)) {
784                         *div_value = emc->div - 2;
785                         return emc->parent;
786                 }
787                 return NULL;
788         }
789
790         pr_debug("%s: %lu\n", __func__, rate);
791
792         /* Table entries specify rate in kHz */
793         rate = rate / 1000;
794
795         i = get_start_idx(rate);
796         for (; i < tegra_emc_table_size; i++) {
797                 if (tegra_emc_table[i].rate == rate) {
798                         struct clk *p = tegra_emc_clk_sel[i].input;
799
800                         if (p && (tegra_emc_clk_sel[i].input_rate ==
801                                   clk_get_rate(p))) {
802                                 *div_value = (tegra_emc_clk_sel[i].value &
803                                         EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
804                                 return p;
805                         }
806                 }
807         }
808         return NULL;
809 }
810
811 bool tegra_emc_is_parent_ready(unsigned long rate, struct clk **parent,
812                 unsigned long *parent_rate, unsigned long *backup_rate)
813 {
814
815         int i;
816         struct clk *p = NULL;
817         unsigned long p_rate = 0;
818
819         if (!tegra_emc_table)
820                 return true;
821
822         pr_debug("%s: %lu\n", __func__, rate);
823
824         /* Table entries specify rate in kHz */
825         rate = rate / 1000;
826
827         i = get_start_idx(rate);
828         for (; i < tegra_emc_table_size; i++) {
829                 if (tegra_emc_table[i].rate == rate) {
830                         p = tegra_emc_clk_sel[i].input;
831                         if (!p)
832                                 continue;       /* invalid entry */
833
834                         p_rate = tegra_emc_clk_sel[i].input_rate;
835                         if (p_rate == clk_get_rate(p))
836                                 return true;
837                         break;
838                 }
839         }
840
841         /* Table match not found - "non existing parent" is ready */
842         if (!p)
843                 return true;
844
845 #ifdef CONFIG_TEGRA_PLLM_SCALED
846         /*
847          * Table match found, but parent is not ready - check if backup entry
848          * was found during initialization, and return the respective backup
849          * rate
850          */
851         if (emc->shared_bus_backup.input &&
852             (emc->shared_bus_backup.input != p)) {
853                 *parent = p;
854                 *parent_rate = p_rate;
855                 *backup_rate = emc->shared_bus_backup.bus_rate;
856                 return false;
857         }
858 #else
859         /*
860          * Table match found, but parent is not ready - continue search
861          * for backup rate: min rate above requested that has different
862          * parent source (since only pll_c is scaled and may not be ready,
863          * any other parent can provide backup)
864          */
865         *parent = p;
866         *parent_rate = p_rate;
867
868         for (i++; i < tegra_emc_table_size; i++) {
869                 p = tegra_emc_clk_sel[i].input;
870                 if (!p)
871                         continue;       /* invalid entry */
872
873                 if (p != (*parent)) {
874                         *backup_rate = tegra_emc_table[i].rate * 1000;
875                         return false;
876                 }
877         }
878 #endif
879         /* Parent is not ready, and no backup found */
880         *backup_rate = -EINVAL;
881         return false;
882 }
883
884 static inline const struct clk_mux_sel *get_emc_input(u32 val)
885 {
886         const struct clk_mux_sel *sel;
887
888         for (sel = emc->inputs; sel->input != NULL; sel++) {
889                 if (sel->value == val)
890                         break;
891         }
892         return sel;
893 }
894
895 static int find_matching_input(const struct tegra11_emc_table *table,
896                         struct clk *pll_c, struct emc_sel *emc_clk_sel)
897 {
898         u32 div_value = (table->src_sel_reg & EMC_CLK_DIV_MASK) >>
899                 EMC_CLK_DIV_SHIFT;
900         u32 src_value = (table->src_sel_reg & EMC_CLK_SOURCE_MASK) >>
901                 EMC_CLK_SOURCE_SHIFT;
902         unsigned long input_rate = 0;
903         unsigned long table_rate = table->rate * 1000; /* table rate in kHz */
904         const struct clk_mux_sel *sel = get_emc_input(src_value);
905
906 #ifdef CONFIG_TEGRA_PLLM_SCALED
907         struct clk *scalable_pll = emc->parent; /* pll_m is a boot parent */
908 #else
909         struct clk *scalable_pll = pll_c;
910 #endif
911         pr_info_once("tegra: %s is selected as scalable EMC clock source\n",
912                      scalable_pll->name);
913
914         if (div_value & 0x1) {
915                 pr_warn("tegra: invalid odd divider for EMC rate %lu\n",
916                         table_rate);
917                 return -EINVAL;
918         }
919         if (!sel->input) {
920                 pr_warn("tegra: no matching input found for EMC rate %lu\n",
921                         table_rate);
922                 return -EINVAL;
923         }
924         if (div_value && (table->src_sel_reg & EMC_CLK_LOW_JITTER_ENABLE)) {
925                 pr_warn("tegra: invalid LJ path for EMC rate %lu\n",
926                         table_rate);
927                 return -EINVAL;
928         }
929         if (!(table->src_sel_reg & EMC_CLK_MC_SAME_FREQ) !=
930             !(MC_EMEM_ARB_MISC0_EMC_SAME_FREQ &
931               table->burst_regs[MC_EMEM_ARB_MISC0_INDEX])) {
932                 pr_warn("tegra: ambiguous EMC to MC ratio for EMC rate %lu\n",
933                         table_rate);
934                 return -EINVAL;
935         }
936
937 #ifndef CONFIG_TEGRA_DUAL_CBUS
938         if (sel->input == pll_c) {
939                 pr_warn("tegra: %s is cbus source: no EMC rate %lu support\n",
940                         sel->input->name, table_rate);
941                 return -EINVAL;
942         }
943 #endif
944
945         if (sel->input == scalable_pll) {
946                 input_rate = table_rate * (1 + div_value / 2);
947         } else {
948                 /* all other sources are fixed, must exactly match the rate */
949                 input_rate = clk_get_rate(sel->input);
950                 if (input_rate != (table_rate * (1 + div_value / 2))) {
951                         pr_warn("tegra: EMC rate %lu does not match %s rate %lu\n",
952                                 table_rate, sel->input->name, input_rate);
953                         return -EINVAL;
954                 }
955         }
956
957 #ifdef CONFIG_TEGRA_PLLM_SCALED
958                 if (sel->input == pll_c) {
959                         /* maybe overwritten in a loop - end up at max rate
960                            from pll_c */
961                         emc->shared_bus_backup.input = pll_c;
962                         emc->shared_bus_backup.bus_rate = table_rate;
963                 }
964 #endif
965         /* Get ready emc clock selection settings for this table rate */
966         emc_clk_sel->input = sel->input;
967         emc_clk_sel->input_rate = input_rate;
968         emc_clk_sel->value = table->src_sel_reg;
969
970         return 0;
971 }
972
973 static void adjust_emc_dvfs_table(const struct tegra11_emc_table *table,
974                                   int table_size)
975 {
976         int i, j;
977         unsigned long rate;
978
979         for (i = 0; i < MAX_DVFS_FREQS; i++) {
980                 int mv = emc->dvfs->millivolts[i];
981                 if (!mv)
982                         break;
983
984                 /* For each dvfs voltage find maximum supported rate;
985                    use 1MHz placeholder if not found */
986                 for (rate = 1000, j = 0; j < table_size; j++) {
987                         if (tegra_emc_clk_sel[j].input == NULL)
988                                 continue;       /* invalid entry */
989
990                         if ((mv >= table[j].emc_min_mv) &&
991                             (rate < table[j].rate))
992                                 rate = table[j].rate;
993                 }
994                 /* Table entries specify rate in kHz */
995                 emc->dvfs->freqs[i] = rate * 1000;
996         }
997 }
998
999 #ifdef CONFIG_TEGRA_PLLM_SCALED
1000 /* When pll_m is scaled, pll_c must provide backup rate;
1001    if not - remove rates that require pll_m scaling */
1002 static int purge_emc_table(unsigned long max_rate)
1003 {
1004         int i;
1005         int ret = 0;
1006
1007         if (emc->shared_bus_backup.input)
1008                 return ret;
1009
1010         pr_warn("tegra: selected pll_m scaling option but no backup source:\n");
1011         pr_warn("       removed not supported entries from the table:\n");
1012
1013         /* made all entries with non matching rate invalid */
1014         for (i = 0; i < tegra_emc_table_size; i++) {
1015                 struct emc_sel *sel = &tegra_emc_clk_sel[i];
1016                 if (sel->input) {
1017                         if (clk_get_rate(sel->input) != sel->input_rate) {
1018                                 pr_warn("       EMC rate %lu\n",
1019                                         tegra_emc_table[i].rate * 1000);
1020                                 sel->input = NULL;
1021                                 sel->input_rate = 0;
1022                                 sel->value = 0;
1023                                 if (max_rate == tegra_emc_table[i].rate)
1024                                         ret = -EINVAL;
1025                         }
1026                 }
1027         }
1028         return ret;
1029 }
1030 #else
1031 /* When pll_m is fixed @ max EMC rate, it always provides backup for pll_c */
1032 #define purge_emc_table(max_rate) (0)
1033 #endif
1034
1035 static int init_emc_table(const struct tegra11_emc_table *table, int table_size)
1036 {
1037         int i, mv;
1038         u32 reg;
1039         bool max_entry = false;
1040         bool emc_max_dvfs_sel = get_emc_max_dvfs();
1041         unsigned long boot_rate, max_rate;
1042         struct clk *pll_c = tegra_get_clock_by_name("pll_c");
1043
1044         emc_stats.clkchange_count = 0;
1045         spin_lock_init(&emc_stats.spinlock);
1046         emc_stats.last_update = get_jiffies_64();
1047         emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
1048
1049         if ((dram_type != DRAM_TYPE_DDR3) && (dram_type != DRAM_TYPE_LPDDR2)) {
1050                 pr_err("tegra: not supported DRAM type %u\n", dram_type);
1051                 return -ENODATA;
1052         }
1053
1054         if (emc->parent != tegra_get_clock_by_name("pll_m")) {
1055                 pr_err("tegra: boot parent %s is not supported by EMC DFS\n",
1056                         emc->parent->name);
1057                 return -ENODATA;
1058         }
1059
1060         if (!table || !table_size) {
1061                 pr_err("tegra: EMC DFS table is empty\n");
1062                 return -ENODATA;
1063         }
1064
1065         boot_rate = clk_get_rate(emc) / 1000;
1066         max_rate = clk_get_rate(emc->parent) / 1000;
1067
1068         tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
1069         switch (table[0].rev) {
1070         case 0x40:
1071         case 0x41:
1072         case 0x42:
1073                 start_timing.burst_regs_num = table[0].burst_regs_num;
1074                 start_timing.emc_trimmers_num = table[0].emc_trimmers_num;
1075                 break;
1076         default:
1077                 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
1078                         table[0].rev);
1079                 return -ENODATA;
1080         }
1081
1082         /* Match EMC source/divider settings with table entries */
1083         for (i = 0; i < tegra_emc_table_size; i++) {
1084                 unsigned long table_rate = table[i].rate;
1085
1086                 /* Skip "no-rate" entry, or entry violating ascending order */
1087                 if (!table_rate ||
1088                     (i && (table_rate <= table[i-1].rate)))
1089                         continue;
1090
1091                 BUG_ON(table[i].rev != table[0].rev);
1092
1093                 if (find_matching_input(&table[i], pll_c,
1094                                         &tegra_emc_clk_sel[i]))
1095                         continue;
1096
1097                 if (table_rate == boot_rate)
1098                         emc_stats.last_sel = i;
1099
1100                 if (emc_max_dvfs_sel) {
1101                         /* EMC max rate = max table entry above boot pll_m */
1102                         if (table_rate >= max_rate) {
1103                                 max_rate = table_rate;
1104                                 max_entry = true;
1105                         }
1106                 } else if (table_rate == max_rate) {
1107                         /* EMC max rate = boot pll_m rate */
1108                         max_entry = true;
1109                         break;
1110                 }
1111         }
1112
1113         /* Validate EMC rate and voltage limits */
1114         if (!max_entry) {
1115                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
1116                        " %lu kHz is not found\n", max_rate);
1117                 return -ENODATA;
1118         }
1119
1120         tegra_emc_table = table;
1121
1122         /*
1123          * Purge rates that cannot be reached because table does not specify
1124          * proper backup source. If maximum rate was purged, fall back on boot
1125          * pll_m rate as maximum limit. In any case propagate new maximum limit
1126          * down stream to shared users, and check it against nominal voltage.
1127          */
1128         if (purge_emc_table(max_rate))
1129                 max_rate = clk_get_rate(emc->parent) / 1000;
1130         tegra_init_max_rate(emc, max_rate * 1000);
1131
1132         if (emc->dvfs) {
1133                 adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
1134                 mv = tegra_dvfs_predict_millivolts(emc, max_rate * 1000);
1135                 if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
1136                         tegra_emc_table = NULL;
1137                         pr_err("tegra: invalid EMC DFS table: maximum rate %lu"
1138                                " kHz does not match nominal voltage %d\n",
1139                                max_rate, emc->dvfs->max_millivolts);
1140                         return -ENODATA;
1141                 }
1142         }
1143
1144         pr_info("tegra: validated EMC DFS table\n");
1145
1146         /* Configure clock change mode according to dram type */
1147         reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
1148         reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
1149                 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
1150         emc_writel(reg, EMC_CFG_2);
1151         return 0;
1152 }
1153
1154 static int __devinit tegra11_emc_probe(struct platform_device *pdev)
1155 {
1156         struct tegra11_emc_pdata *pdata;
1157         struct resource *res;
1158
1159         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1160         if (!res) {
1161                 dev_err(&pdev->dev, "missing register base\n");
1162                 return -ENOMEM;
1163         }
1164
1165         pdata = pdev->dev.platform_data;
1166         if (!pdata) {
1167                 dev_err(&pdev->dev, "missing platform data\n");
1168                 return -ENODATA;
1169         }
1170
1171         return init_emc_table(pdata->tables, pdata->num_tables);
1172 }
1173
1174 static struct platform_driver tegra11_emc_driver = {
1175         .driver         = {
1176                 .name   = "tegra-emc",
1177                 .owner  = THIS_MODULE,
1178         },
1179         .probe          = tegra11_emc_probe,
1180 };
1181
1182 static struct emc_iso_usage tegra11_emc_iso_usage[] = {
1183         { BIT(EMC_USER_DC),                     80 },
1184         { BIT(EMC_USER_DC) | BIT(EMC_USER_VI),  45 },
1185 };
1186
1187 int __init tegra11_emc_init(void)
1188 {
1189         int ret = platform_driver_register(&tegra11_emc_driver);
1190         if (!ret) {
1191                 tegra_clk_preset_emc_monitor();
1192                 tegra_emc_iso_usage_table_init(tegra11_emc_iso_usage,
1193                         ARRAY_SIZE(tegra11_emc_iso_usage));
1194         }
1195         return ret;
1196 }
1197
1198 void tegra_emc_timing_invalidate(void)
1199 {
1200         emc_timing = NULL;
1201 }
1202
1203 void tegra_emc_dram_type_init(struct clk *c)
1204 {
1205         emc = c;
1206
1207         dram_type = (emc_readl(EMC_FBIO_CFG5) &
1208                      EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
1209
1210         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
1211 }
1212
1213 int tegra_emc_get_dram_type(void)
1214 {
1215         return dram_type;
1216 }
1217
1218 static u32 soc_to_dram_bit_swap(u32 soc_val, u32 dram_mask, u32 dram_shift)
1219 {
1220         int bit;
1221         u32 dram_val = 0;
1222
1223         /* tegra clocks definitions use shifted mask always */
1224         if (!dram_to_soc_bit_map)
1225                 return soc_val & dram_mask;
1226
1227         for (bit = dram_shift; bit < 32; bit++) {
1228                 u32 dram_bit_mask = 0x1 << bit;
1229                 u32 soc_bit_mask = dram_to_soc_bit_map[bit];
1230
1231                 if (!(dram_bit_mask & dram_mask))
1232                         break;
1233
1234                 if (soc_bit_mask & soc_val)
1235                         dram_val |= dram_bit_mask;
1236         }
1237
1238         return dram_val;
1239 }
1240
1241 static int emc_read_mrr(int dev, int addr)
1242 {
1243         int ret;
1244         u32 val;
1245
1246         if (dram_type != DRAM_TYPE_LPDDR2)
1247                 return -ENODEV;
1248
1249         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, false);
1250         if (ret)
1251                 return ret;
1252
1253         val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
1254         val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
1255         emc_writel(val, EMC_MRR);
1256
1257         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, true);
1258         if (ret)
1259                 return ret;
1260
1261         val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
1262         return val;
1263 }
1264
1265 int tegra_emc_get_dram_temperature(void)
1266 {
1267         int mr4;
1268         unsigned long flags;
1269
1270         spin_lock_irqsave(&emc_access_lock, flags);
1271
1272         mr4 = emc_read_mrr(0, 4);
1273         if (IS_ERR_VALUE(mr4)) {
1274                 spin_unlock_irqrestore(&emc_access_lock, flags);
1275                 return mr4;
1276         }
1277         spin_unlock_irqrestore(&emc_access_lock, flags);
1278
1279         mr4 = soc_to_dram_bit_swap(
1280                 mr4, LPDDR2_MR4_TEMP_MASK, LPDDR2_MR4_TEMP_SHIFT);
1281         return mr4;
1282 }
1283
1284 #ifdef CONFIG_DEBUG_FS
1285
1286 static struct dentry *emc_debugfs_root;
1287
1288 static int emc_stats_show(struct seq_file *s, void *data)
1289 {
1290         int i;
1291
1292         emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
1293
1294         seq_printf(s, "%-10s %-10s \n", "rate kHz", "time");
1295         for (i = 0; i < tegra_emc_table_size; i++) {
1296                 if (tegra_emc_clk_sel[i].input == NULL)
1297                         continue;       /* invalid entry */
1298
1299                 seq_printf(s, "%-10lu %-10llu \n", tegra_emc_table[i].rate,
1300                            cputime64_to_clock_t(emc_stats.time_at_clock[i]));
1301         }
1302         seq_printf(s, "%-15s %llu\n", "transitions:",
1303                    emc_stats.clkchange_count);
1304         seq_printf(s, "%-15s %llu\n", "time-stamp:",
1305                    cputime64_to_clock_t(emc_stats.last_update));
1306
1307         return 0;
1308 }
1309
1310 static int emc_stats_open(struct inode *inode, struct file *file)
1311 {
1312         return single_open(file, emc_stats_show, inode->i_private);
1313 }
1314
1315 static const struct file_operations emc_stats_fops = {
1316         .open           = emc_stats_open,
1317         .read           = seq_read,
1318         .llseek         = seq_lseek,
1319         .release        = single_release,
1320 };
1321
1322 static int dram_temperature_get(void *data, u64 *val)
1323 {
1324         *val = tegra_emc_get_dram_temperature();
1325         return 0;
1326 }
1327 DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
1328                         NULL, "%lld\n");
1329
1330 static int efficiency_get(void *data, u64 *val)
1331 {
1332         *val = tegra_emc_bw_efficiency;
1333         return 0;
1334 }
1335 static int efficiency_set(void *data, u64 val)
1336 {
1337         tegra_emc_bw_efficiency = (val > 100) ? 100 : val;
1338         if (emc)
1339                 tegra_clk_shared_bus_update(emc);
1340
1341         return 0;
1342 }
1343 DEFINE_SIMPLE_ATTRIBUTE(efficiency_fops, efficiency_get,
1344                         efficiency_set, "%llu\n");
1345
1346 static int __init tegra_emc_debug_init(void)
1347 {
1348         if (!tegra_emc_table)
1349                 return 0;
1350
1351         emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
1352         if (!emc_debugfs_root)
1353                 return -ENOMEM;
1354
1355         if (!debugfs_create_file(
1356                 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
1357                 goto err_out;
1358
1359         if (!debugfs_create_u32("clkchange_delay", S_IRUGO | S_IWUSR,
1360                 emc_debugfs_root, (u32 *)&clkchange_delay))
1361                 goto err_out;
1362
1363         if (!debugfs_create_file("dram_temperature", S_IRUGO, emc_debugfs_root,
1364                                  NULL, &dram_temperature_fops))
1365                 goto err_out;
1366
1367         if (!debugfs_create_file("efficiency", S_IRUGO | S_IWUSR,
1368                                  emc_debugfs_root, NULL, &efficiency_fops))
1369                 goto err_out;
1370
1371         if (tegra_emc_iso_usage_debugfs_init(emc_debugfs_root))
1372                 goto err_out;
1373
1374         return 0;
1375
1376 err_out:
1377         debugfs_remove_recursive(emc_debugfs_root);
1378         return -ENOMEM;
1379 }
1380
1381 late_initcall(tegra_emc_debug_init);
1382 #endif