1a6fd95a44c3e459a76d2290ce97c57e0766f57b
[linux-2.6.git] / arch / arm / mach-tegra / tegra11_emc.c
1 /*
2  * arch/arm/mach-tegra/tegra11_emc.c
3  *
4  * Copyright (c) 2011-2013, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
18  *
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/clk.h>
23 #include <linux/err.h>
24 #include <linux/io.h>
25 #include <linux/of.h>
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/platform_device.h>
29 #include <linux/platform_data/tegra_emc.h>
30 #include <linux/debugfs.h>
31 #include <linux/seq_file.h>
32 #include <linux/hrtimer.h>
33
34 #include <asm/cputime.h>
35
36 #include <mach/iomap.h>
37
38 #include "clock.h"
39 #include "dvfs.h"
40 #include "board.h"
41 #include "tegra11_emc.h"
42 #include "fuse.h"
43
44 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
45 static bool emc_enable = true;
46 #else
47 static bool emc_enable;
48 #endif
49 module_param(emc_enable, bool, 0644);
50
51 static u32 bw_calc_freqs_lpddr3[] = {
52         20000, 40000, 60000, 80000, 100000,
53         120000, 140000, 160000, 180000, 200000,
54         220000, 240000, 260000, 280000, 300000
55 };
56
57 static u32 tegra11_lpddr3_emc_usage_share_default[] = {
58         /* TODO. May need to be more conservative */
59         28, 35, 38, 40, 41, 42, 43, 43, 45, 45, 45, 46, 47, 48, 48,
60         /* When >300MHz BW is requested, assume 50% for default */
61         50
62 };
63
64 static u32 tegra11_lpddr3_emc_usage_share_dc[] = {
65         /* TODO. May need to be more conservative */
66         35, 47, 52, 55, 57, 58, 59, 60, 62, 62, 63, 64, 66, 67, 68,
67         /* When >300MHz BW is requested, assume 70% for dc only */
68         70
69 };
70
71 static u8 iso_share_calc_t114_lpddr3_default(unsigned long iso_bw);
72 static u8 iso_share_calc_t114_lpddr3_dc(unsigned long iso_bw);
73
74 static u32 bw_calc_freqs_ddr3[] = {
75         20000, 40000, 60000, 80000, 100000,
76         120000, 140000, 160000, 180000, 200000,
77         220000, 240000, 260000, 280000, 300000
78 };
79
80 static u32 tegra11_ddr3_emc_usage_share_default[] = {
81         28, 35, 38, 40, 41, 42, 43, 43, 45, 45, 45, 46, 47, 48, 48,
82         /* When >300MHz BW is requested, assume 50% for default */
83         50
84 };
85
86 static u32 tegra11_ddr3_emc_usage_share_dc[] = {
87         35, 47, 52, 55, 57, 58, 59, 60, 62, 62, 63, 64, 66, 67, 68,
88         /* When >300MHz BW is requested, assume 70% for dc only */
89         70
90 };
91
92 static u8 iso_share_calc_t114_ddr3_default(unsigned long iso_bw);
93 static u8 iso_share_calc_t114_ddr3_dc(unsigned long iso_bw);
94
95 u8 tegra_emc_bw_efficiency = 80;
96
97 static struct emc_iso_usage tegra11_lpddr3_emc_iso_usage[] = {
98         {
99                 BIT(EMC_USER_DC),
100                 80, iso_share_calc_t114_lpddr3_dc
101         },
102         {
103                 BIT(EMC_USER_DC) | BIT(EMC_USER_VI),
104                 45, iso_share_calc_t114_lpddr3_default
105         },
106         {
107                 BIT(EMC_USER_DC) | BIT(EMC_USER_MSENC),
108                 50, iso_share_calc_t114_lpddr3_default
109         },
110         {
111                 BIT(EMC_USER_DC) | BIT(EMC_USER_3D),
112                 50, iso_share_calc_t114_lpddr3_default
113         },
114         {
115                 BIT(EMC_USER_DC) | BIT(EMC_USER_VDE),
116                 45, iso_share_calc_t114_lpddr3_default
117         },
118 };
119
120 static struct emc_iso_usage tegra11_ddr3_emc_iso_usage[] = {
121         {
122                 BIT(EMC_USER_DC),
123                 80, iso_share_calc_t114_ddr3_dc
124         },
125         {
126                 BIT(EMC_USER_DC) | BIT(EMC_USER_VI),
127                 45, iso_share_calc_t114_ddr3_default
128         },
129         {
130                 BIT(EMC_USER_DC) | BIT(EMC_USER_MSENC),
131                 50, iso_share_calc_t114_ddr3_default
132         },
133         {
134                 BIT(EMC_USER_DC) | BIT(EMC_USER_3D),
135                 50, iso_share_calc_t114_ddr3_default
136         },
137         {
138                 BIT(EMC_USER_DC) | BIT(EMC_USER_VDE),
139                 45, iso_share_calc_t114_ddr3_default
140         },
141 };
142
143 #define KHZ     1000
144
145 #define PLL_C_DIRECT_FLOOR              333500000
146 #define EMC_STATUS_UPDATE_TIMEOUT       100
147 #define TEGRA_EMC_TABLE_MAX_SIZE        16
148
149 enum {
150         DLL_CHANGE_NONE = 0,
151         DLL_CHANGE_ON,
152         DLL_CHANGE_OFF,
153 };
154
155 #define EMC_CLK_DIV_SHIFT               0
156 #define EMC_CLK_DIV_MASK                (0xFF << EMC_CLK_DIV_SHIFT)
157 #define EMC_CLK_SOURCE_SHIFT            29
158 #define EMC_CLK_SOURCE_MASK             (0x7 << EMC_CLK_SOURCE_SHIFT)
159 #define EMC_CLK_LOW_JITTER_ENABLE       (0x1 << 31)
160 #define EMC_CLK_MC_SAME_FREQ            (0x1 << 16)
161
162 /* FIXME: actual Tegar11 list */
163 #define BURST_REG_LIST \
164         DEFINE_REG(TEGRA_EMC_BASE, EMC_RC),                     \
165         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC),                    \
166         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC_SLR),                \
167         DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS),                    \
168         DEFINE_REG(TEGRA_EMC_BASE, EMC_RP),                     \
169         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W),                    \
170         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R),                    \
171         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P),                    \
172         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P),                    \
173         DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD),                 \
174         DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD),                 \
175         DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD),                    \
176         DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT),                   \
177         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT),                   \
178         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV),                    \
179         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV_MASK),               \
180         DEFINE_REG(TEGRA_EMC_BASE, EMC_IBDLY),                  \
181         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_EXTRA),           \
182         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_2),             \
183         DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST),                   \
184         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV_MASK),               \
185         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH),                \
186         DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM),      \
187         DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT),    \
188         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR),                \
189         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD),                \
190         DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN),              \
191         DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN),               \
192         DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN),                \
193         DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN),                \
194         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR),                   \
195         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL),                \
196         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE),                   \
197         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKESR),                 \
198         DEFINE_REG(TEGRA_EMC_BASE, EMC_TPD),                    \
199         DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW),                   \
200         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB),                  \
201         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE),             \
202         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP),               \
203         DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW),                 \
204         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_EXTRA),             \
205         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE),              \
206         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ),               \
207         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5),              \
208         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL),            \
209         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD),     \
210         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4),         \
211         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5),         \
212         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6),         \
213         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7),         \
214         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4),        \
215         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5),        \
216         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6),        \
217         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7),        \
218         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4),        \
219         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5),        \
220         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6),        \
221         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7),        \
222         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL),          \
223         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL4),         \
224         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2),         \
225         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2),          \
226         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL),          \
227         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL),         \
228         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL),       \
229         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2),      \
230         DEFINE_REG(TEGRA_EMC_BASE, EMC_DSR_VTTGEN_DRV),         \
231         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXDSRVTTGEN),            \
232         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE),             \
233         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_TERM_CTRL),          \
234         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL),          \
235         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT),          \
236         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT),           \
237         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT2),          \
238         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG2),       \
239         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG3),       \
240         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT),                    \
241         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION),           \
242         DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL),   \
243         DEFINE_REG(TEGRA_EMC_BASE, EMC_CA_TRAINING_TIMING_CNTL1),       \
244         DEFINE_REG(TEGRA_EMC_BASE, EMC_CA_TRAINING_TIMING_CNTL2),       \
245                                                                         \
246         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG),             \
247         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
248         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD),      \
249         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP),       \
250         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC),       \
251         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS),      \
252         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW),      \
253         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD),      \
254         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE),  \
255         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE),  \
256         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R),      \
257         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W),      \
258         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W),      \
259         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R),      \
260         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS),        \
261         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS),       \
262         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0),           \
263         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE),  \
264         DEFINE_REG(TEGRA_EMC_BASE, EMC_SEL_DPD_CTRL),
265
266 #define BURST_UP_DOWN_REG_LIST \
267         DEFINE_REG(TEGRA_MC_BASE, MC_PTSA_GRANT_DECREMENT),     \
268         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_G2_0),   \
269         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_G2_1),   \
270         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_0),   \
271         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV2_0),  \
272         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_2),   \
273         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_1),   \
274         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV2_1),  \
275         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_3),   \
276         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_EPP_0),  \
277         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_EPP_1),
278
279 #define EMC_TRIMMERS_REG_LIST \
280         DEFINE_REG(0, EMC_CDB_CNTL_1),                          \
281         DEFINE_REG(0, EMC_FBIO_CFG6),                           \
282         DEFINE_REG(0, EMC_QUSE),                                \
283         DEFINE_REG(0, EMC_EINPUT),                              \
284         DEFINE_REG(0, EMC_EINPUT_DURATION),                     \
285         DEFINE_REG(0, EMC_DLL_XFORM_DQS0),                      \
286         DEFINE_REG(0, EMC_QSAFE),                               \
287         DEFINE_REG(0, EMC_DLL_XFORM_QUSE0),                     \
288         DEFINE_REG(0, EMC_RDV),                                 \
289         DEFINE_REG(0, EMC_XM2DQSPADCTRL4),                      \
290         DEFINE_REG(0, EMC_XM2DQSPADCTRL3),                      \
291         DEFINE_REG(0, EMC_DLL_XFORM_DQ0),                       \
292         DEFINE_REG(0, EMC_AUTO_CAL_CONFIG),                     \
293         DEFINE_REG(0, EMC_DLL_XFORM_ADDR0),                     \
294         DEFINE_REG(0, EMC_XM2CLKPADCTRL2),                      \
295         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS0),                     \
296         DEFINE_REG(0, EMC_DLL_XFORM_ADDR1),                     \
297         DEFINE_REG(0, EMC_DLL_XFORM_ADDR2),                     \
298         DEFINE_REG(0, EMC_DLL_XFORM_DQS1),                      \
299         DEFINE_REG(0, EMC_DLL_XFORM_DQS2),                      \
300         DEFINE_REG(0, EMC_DLL_XFORM_DQS3),                      \
301         DEFINE_REG(0, EMC_DLL_XFORM_DQ1),                       \
302         DEFINE_REG(0, EMC_DLL_XFORM_DQ2),                       \
303         DEFINE_REG(0, EMC_DLL_XFORM_DQ3),                       \
304         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS1),                     \
305         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS2),                     \
306         DEFINE_REG(0, EMC_DLI_TRIM_TXDQS3),                     \
307         DEFINE_REG(0, EMC_DLL_XFORM_QUSE1),                     \
308         DEFINE_REG(0, EMC_DLL_XFORM_QUSE2),                     \
309         DEFINE_REG(0, EMC_DLL_XFORM_QUSE3),
310
311
312 #define DEFINE_REG(base, reg) ((base) ? (IO_ADDRESS((base)) + (reg)) : 0)
313 static const void __iomem *burst_reg_addr[TEGRA11_EMC_MAX_NUM_REGS] = {
314         BURST_REG_LIST
315 };
316 #ifndef EMULATE_CLOCK_SWITCH
317 static const void __iomem *burst_up_down_reg_addr[TEGRA11_EMC_MAX_NUM_REGS] = {
318         BURST_UP_DOWN_REG_LIST
319 };
320 #endif
321 #undef DEFINE_REG
322
323
324 #define DEFINE_REG(base, reg) (reg)
325 #ifndef EMULATE_CLOCK_SWITCH
326 static const u32 emc_trimmer_offs[TEGRA11_EMC_MAX_NUM_REGS] = {
327         EMC_TRIMMERS_REG_LIST
328 };
329 #endif
330 #undef DEFINE_REG
331
332
333 #define DEFINE_REG(base, reg)   reg##_INDEX
334 enum {
335         BURST_REG_LIST
336 };
337 #undef DEFINE_REG
338
339 #define DEFINE_REG(base, reg)   reg##_TRIM_INDEX
340 enum {
341         EMC_TRIMMERS_REG_LIST
342 };
343 #undef DEFINE_REG
344
345
346 struct emc_sel {
347         struct clk      *input;
348         u32             value;
349         unsigned long   input_rate;
350 };
351 static struct emc_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
352 static struct tegra11_emc_table start_timing;
353 static const struct tegra11_emc_table *emc_timing;
354
355 static ktime_t clkchange_time;
356 static int clkchange_delay = 100;
357
358 static const u32 *dram_to_soc_bit_map;
359 static const struct tegra11_emc_table *tegra_emc_table;
360 static int tegra_emc_table_size;
361
362 static u32 dram_dev_num;
363 static u32 dram_type = -1;
364
365 static struct clk *emc;
366
367 static struct {
368         cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
369         int last_sel;
370         u64 last_update;
371         u64 clkchange_count;
372         spinlock_t spinlock;
373 } emc_stats;
374
375 static DEFINE_SPINLOCK(emc_access_lock);
376
377 static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
378 static void __iomem *emc0_base = IO_ADDRESS(TEGRA_EMC0_BASE);
379 static void __iomem *emc1_base = IO_ADDRESS(TEGRA_EMC1_BASE);
380 static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
381 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
382
383 static inline void emc_writel(u32 val, unsigned long addr)
384 {
385         writel(val, (u32)emc_base + addr);
386 }
387 static inline void emc0_writel(u32 val, unsigned long addr)
388 {
389         writel(val, (u32)emc0_base + addr);
390 }
391 static inline void emc1_writel(u32 val, unsigned long addr)
392 {
393         writel(val, (u32)emc1_base + addr);
394 }
395 static inline u32 emc_readl(unsigned long addr)
396 {
397         return readl((u32)emc_base + addr);
398 }
399 static inline void mc_writel(u32 val, unsigned long addr)
400 {
401         writel(val, (u32)mc_base + addr);
402 }
403 static inline u32 mc_readl(unsigned long addr)
404 {
405         return readl((u32)mc_base + addr);
406 }
407
408 static inline void ccfifo_writel(u32 val, unsigned long addr)
409 {
410         writel(val, (u32)emc_base + EMC_CCFIFO_DATA);
411         writel(addr, (u32)emc_base + EMC_CCFIFO_ADDR);
412 }
413
414 static int last_round_idx;
415 static inline int get_start_idx(unsigned long rate)
416 {
417         if (tegra_emc_table[last_round_idx].rate == rate)
418                 return last_round_idx;
419         return 0;
420 }
421
422 static void emc_last_stats_update(int last_sel)
423 {
424         unsigned long flags;
425         u64 cur_jiffies = get_jiffies_64();
426
427         spin_lock_irqsave(&emc_stats.spinlock, flags);
428
429         if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
430                 emc_stats.time_at_clock[emc_stats.last_sel] =
431                         emc_stats.time_at_clock[emc_stats.last_sel] +
432                         (cur_jiffies - emc_stats.last_update);
433
434         emc_stats.last_update = cur_jiffies;
435
436         if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
437                 emc_stats.clkchange_count++;
438                 emc_stats.last_sel = last_sel;
439         }
440         spin_unlock_irqrestore(&emc_stats.spinlock, flags);
441 }
442
443 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
444 {
445         int i;
446         for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
447                 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
448                         return 0;
449                 udelay(1);
450         }
451         return -ETIMEDOUT;
452 }
453
454 static inline void emc_timing_update(void)
455 {
456         int err;
457
458         emc_writel(0x1, EMC_TIMING_CONTROL);
459         err = wait_for_update(EMC_STATUS,
460                               EMC_STATUS_TIMING_UPDATE_STALLED, false);
461         if (err) {
462                 pr_err("%s: timing update error: %d", __func__, err);
463                 BUG();
464         }
465 }
466
467 static inline void auto_cal_disable(void)
468 {
469         int err;
470
471         emc_writel(0, EMC_AUTO_CAL_INTERVAL);
472         err = wait_for_update(EMC_AUTO_CAL_STATUS,
473                               EMC_AUTO_CAL_STATUS_ACTIVE, false);
474         if (err) {
475                 pr_err("%s: disable auto-cal error: %d", __func__, err);
476                 BUG();
477         }
478 }
479
480 static inline bool dqs_preset(const struct tegra11_emc_table *next_timing,
481                               const struct tegra11_emc_table *last_timing)
482 {
483         bool ret = false;
484
485 #define DQS_SET(reg, bit)                                                     \
486         do {                                                                  \
487                 if ((next_timing->burst_regs[EMC_##reg##_INDEX] &             \
488                      EMC_##reg##_##bit##_ENABLE) &&                           \
489                     (!(last_timing->burst_regs[EMC_##reg##_INDEX] &           \
490                        EMC_##reg##_##bit##_ENABLE)))   {                      \
491                         emc_writel(last_timing->burst_regs[EMC_##reg##_INDEX] \
492                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg);  \
493                         ret = true;                                           \
494                 }                                                             \
495         } while (0)
496
497
498 #define DQS_SET_TRIM(reg, bit, ch)                                             \
499         do {                                                                   \
500                 if ((next_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX]    \
501                      & EMC_##reg##_##bit##_ENABLE) &&                          \
502                     (!(last_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX]  \
503                        & EMC_##reg##_##bit##_ENABLE)))   {                     \
504                         emc##ch##_writel(last_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX] \
505                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg);   \
506                         ret = true;                                            \
507                 }                                                              \
508         } while (0)
509
510         DQS_SET(XM2DQSPADCTRL2, VREF);
511
512         return ret;
513 }
514
515 static inline void overwrite_mrs_wait_cnt(
516         const struct tegra11_emc_table *next_timing,
517         bool zcal_long)
518 {
519         u32 reg;
520         u32 cnt = 512;
521
522         /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
523            for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
524            expected operation length. Reduce the latter by the overlapping
525            zq-calibration, if any */
526         if (zcal_long)
527                 cnt -= dram_dev_num * 256;
528
529         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
530                 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
531                 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
532         if (cnt < reg)
533                 cnt = reg;
534
535         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
536                 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
537         reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
538                 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
539
540         emc_writel(reg, EMC_MRS_WAIT_CNT);
541 }
542
543 static inline int get_dll_change(const struct tegra11_emc_table *next_timing,
544                                  const struct tegra11_emc_table *last_timing)
545 {
546         bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
547         bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
548
549         if (next_dll_enabled == last_dll_enabled)
550                 return DLL_CHANGE_NONE;
551         else if (next_dll_enabled)
552                 return DLL_CHANGE_ON;
553         else
554                 return DLL_CHANGE_OFF;
555 }
556
557 static inline void set_dram_mode(const struct tegra11_emc_table *next_timing,
558                                  const struct tegra11_emc_table *last_timing,
559                                  int dll_change)
560 {
561         if (dram_type == DRAM_TYPE_DDR3) {
562                 /* first mode_1, then mode_2, then mode_reset*/
563                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
564                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
565                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
566                         ccfifo_writel(next_timing->emc_mode_2, EMC_EMRS2);
567
568                 if ((next_timing->emc_mode_reset !=
569                      last_timing->emc_mode_reset) ||
570                     (dll_change == DLL_CHANGE_ON)) {
571                         u32 reg = next_timing->emc_mode_reset &
572                                 (~EMC_MODE_SET_DLL_RESET);
573                         if (dll_change == DLL_CHANGE_ON) {
574                                 reg |= EMC_MODE_SET_DLL_RESET;
575                                 reg |= EMC_MODE_SET_LONG_CNT;
576                         }
577                         ccfifo_writel(reg, EMC_MRS);
578                 }
579         } else {
580                 /* first mode_2, then mode_1; mode_reset is not applicable */
581                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
582                         ccfifo_writel(next_timing->emc_mode_2, EMC_MRW2);
583                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
584                         ccfifo_writel(next_timing->emc_mode_1, EMC_MRW);
585                 if (next_timing->emc_mode_4 != last_timing->emc_mode_4)
586                         ccfifo_writel(next_timing->emc_mode_4, EMC_MRW4);
587         }
588 }
589
590 static inline void do_clock_change(u32 clk_setting)
591 {
592         int err;
593
594         mc_readl(MC_EMEM_ADR_CFG);      /* completes prev writes */
595         writel(clk_setting, (u32)clk_base + emc->reg);
596         readl((u32)clk_base + emc->reg);/* completes prev write */
597
598         err = wait_for_update(EMC_INTSTATUS,
599                               EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
600         if (err) {
601                 pr_err("%s: clock change completion error: %d", __func__, err);
602                 BUG();
603         }
604 }
605
606 static noinline void emc_set_clock(const struct tegra11_emc_table *next_timing,
607                                    const struct tegra11_emc_table *last_timing,
608                                    u32 clk_setting)
609 {
610 #ifndef EMULATE_CLOCK_SWITCH
611         int i, dll_change, pre_wait;
612         bool dyn_sref_enabled, zcal_long;
613
614         u32 emc_cfg_reg = emc_readl(EMC_CFG);
615
616         dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
617         dll_change = get_dll_change(next_timing, last_timing);
618         zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
619                 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
620
621         /* FIXME: remove steps enumeration below? */
622
623         /* 1. clear clkchange_complete interrupts */
624         emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
625
626         /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
627            possible self-refresh entry/exit and/or dqs vref settled - waiting
628            before the clock change decreases worst case change stall time */
629         pre_wait = 0;
630         if (dyn_sref_enabled) {
631                 emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
632                 emc_writel(emc_cfg_reg, EMC_CFG);
633                 pre_wait = 5;           /* 5us+ for self-refresh entry/exit */
634         }
635
636         /* 2.5 check dq/dqs vref delay */
637         if (dqs_preset(next_timing, last_timing)) {
638                 if (pre_wait < 3)
639                         pre_wait = 3;   /* 3us+ for dqs vref settled */
640         }
641         if (pre_wait) {
642                 emc_timing_update();
643                 udelay(pre_wait);
644         }
645
646         /* 3. disable auto-cal if vref mode is switching - removed */
647
648         /* 4. program burst shadow registers */
649         for (i = 0; i < next_timing->burst_regs_num; i++) {
650                 if (!burst_reg_addr[i])
651                         continue;
652                 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
653         }
654         for (i = 0; i < next_timing->emc_trimmers_num; i++) {
655                 __raw_writel(next_timing->emc_trimmers_0[i],
656                         (u32)emc0_base + emc_trimmer_offs[i]);
657                 __raw_writel(next_timing->emc_trimmers_1[i],
658                         (u32)emc1_base + emc_trimmer_offs[i]);
659         }
660         emc_cfg_reg &= ~EMC_CFG_UPDATE_MASK;
661         emc_cfg_reg |= next_timing->emc_cfg & EMC_CFG_UPDATE_MASK;
662         emc_writel(emc_cfg_reg, EMC_CFG);
663         wmb();
664         barrier();
665
666         /* 4.1 On ddr3 when DLL is re-started predict MRS long wait count and
667            overwrite DFS table setting */
668         if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
669                 overwrite_mrs_wait_cnt(next_timing, zcal_long);
670
671         /* 5.2 disable auto-refresh to save time after clock change */
672         ccfifo_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
673
674         /* 6. turn Off dll and enter self-refresh on DDR3 */
675         if (dram_type == DRAM_TYPE_DDR3) {
676                 if (dll_change == DLL_CHANGE_OFF)
677                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
678                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num) |
679                               EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
680         }
681
682         /* 7. flow control marker 2 */
683         ccfifo_writel(1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
684
685         /* 8. exit self-refresh on DDR3 */
686         if (dram_type == DRAM_TYPE_DDR3)
687                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
688
689         /* 8.1 re-enable auto-refresh */
690         ccfifo_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
691
692         /* 9. set dram mode registers */
693         set_dram_mode(next_timing, last_timing, dll_change);
694
695         /* 10. issue zcal command if turning zcal On */
696         if (zcal_long) {
697                 ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
698                 if (dram_dev_num > 1)
699                         ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
700         }
701
702         /* 10.1 dummy write to RO register to remove stall after change */
703         ccfifo_writel(0, EMC_CCFIFO_STATUS);
704
705         /* 11.5 program burst_up_down registers if emc rate is going down */
706         if (next_timing->rate < last_timing->rate) {
707                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
708                         __raw_writel(next_timing->burst_up_down_regs[i],
709                                 burst_up_down_reg_addr[i]);
710                 wmb();
711         }
712
713         /* 12-14. read any MC register to ensure the programming is done
714            change EMC clock source register wait for clk change completion */
715         do_clock_change(clk_setting);
716
717         /* 14.1 re-enable auto-refresh - moved to ccfifo in 8.1 */
718
719         /* 14.2 program burst_up_down registers if emc rate is going up */
720         if (next_timing->rate > last_timing->rate) {
721                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
722                         __raw_writel(next_timing->burst_up_down_regs[i],
723                                 burst_up_down_reg_addr[i]);
724                 wmb();
725         }
726
727         /* 15. set auto-cal interval */
728         if (next_timing->rev >= 0x42)
729                 emc_writel(next_timing->emc_acal_interval,
730                            EMC_AUTO_CAL_INTERVAL);
731
732         /* 16. restore dynamic self-refresh */
733         if (next_timing->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
734                 emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
735                 emc_writel(emc_cfg_reg, EMC_CFG);
736         }
737
738         /* 17. set zcal wait count */
739         emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
740
741         /* 18. update restored timing */
742         udelay(2);
743         emc_timing_update();
744 #else
745         /* FIXME: implement */
746         pr_info("tegra11_emc: Configuring EMC rate %lu (setting: 0x%x)\n",
747                 next_timing->rate, clk_setting);
748 #endif
749 }
750
751 static inline void emc_get_timing(struct tegra11_emc_table *timing)
752 {
753         int i;
754
755         /* burst and trimmers updates depends on previous state; burst_up_down
756            are stateless */
757         for (i = 0; i < timing->burst_regs_num; i++) {
758                 if (burst_reg_addr[i])
759                         timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
760                 else
761                         timing->burst_regs[i] = 0;
762         }
763         for (i = 0; i < timing->emc_trimmers_num; i++) {
764                 timing->emc_trimmers_0[i] =
765                         __raw_readl((u32)emc0_base + emc_trimmer_offs[i]);
766                 timing->emc_trimmers_1[i] =
767                         __raw_readl((u32)emc1_base + emc_trimmer_offs[i]);
768         }
769         timing->emc_acal_interval = 0;
770         timing->emc_zcal_cnt_long = 0;
771         timing->emc_mode_reset = 0;
772         timing->emc_mode_1 = 0;
773         timing->emc_mode_2 = 0;
774         timing->emc_mode_4 = 0;
775         timing->emc_cfg = emc_readl(EMC_CFG);
776         timing->rate = clk_get_rate_locked(emc) / 1000;
777 }
778
779 /* The EMC registers have shadow registers. When the EMC clock is updated
780  * in the clock controller, the shadow registers are copied to the active
781  * registers, allowing glitchless memory bus frequency changes.
782  * This function updates the shadow registers for a new clock frequency,
783  * and relies on the clock lock on the emc clock to avoid races between
784  * multiple frequency changes. In addition access lock prevents concurrent
785  * access to EMC registers from reading MRR registers */
786 int tegra_emc_set_rate(unsigned long rate)
787 {
788         int i;
789         u32 clk_setting;
790         const struct tegra11_emc_table *last_timing;
791         unsigned long flags;
792         s64 last_change_delay;
793
794         if (!tegra_emc_table)
795                 return -EINVAL;
796
797         /* Table entries specify rate in kHz */
798         rate = rate / 1000;
799
800         i = get_start_idx(rate);
801         for (; i < tegra_emc_table_size; i++) {
802                 if (tegra_emc_clk_sel[i].input == NULL)
803                         continue;       /* invalid entry */
804
805                 if (tegra_emc_table[i].rate == rate)
806                         break;
807         }
808
809         if (i >= tegra_emc_table_size)
810                 return -EINVAL;
811
812         if (!emc_timing) {
813                 /* can not assume that boot timing matches dfs table even
814                    if boot frequency matches one of the table nodes */
815                 emc_get_timing(&start_timing);
816                 last_timing = &start_timing;
817         }
818         else
819                 last_timing = emc_timing;
820
821         clk_setting = tegra_emc_clk_sel[i].value;
822
823         last_change_delay = ktime_us_delta(ktime_get(), clkchange_time);
824         if ((last_change_delay >= 0) && (last_change_delay < clkchange_delay))
825                 udelay(clkchange_delay - (int)last_change_delay);
826
827         spin_lock_irqsave(&emc_access_lock, flags);
828         emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
829         clkchange_time = ktime_get();
830         emc_timing = &tegra_emc_table[i];
831         spin_unlock_irqrestore(&emc_access_lock, flags);
832
833         emc_last_stats_update(i);
834
835         pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
836
837         return 0;
838 }
839
840 long tegra_emc_round_rate_updown(unsigned long rate, bool up)
841 {
842         int i;
843         unsigned long table_rate;
844
845         if (!tegra_emc_table)
846                 return clk_get_rate_locked(emc); /* no table - no rate change */
847
848         if (!emc_enable)
849                 return -EINVAL;
850
851         pr_debug("%s: %lu\n", __func__, rate);
852
853         /* Table entries specify rate in kHz */
854         rate = rate / 1000;
855
856         i = get_start_idx(rate);
857         for (; i < tegra_emc_table_size; i++) {
858                 if (tegra_emc_clk_sel[i].input == NULL)
859                         continue;       /* invalid entry */
860
861                 table_rate = tegra_emc_table[i].rate;
862                 if (table_rate >= rate) {
863                         if (!up && i && (table_rate > rate)) {
864                                 i--;
865                                 table_rate = tegra_emc_table[i].rate;
866                         }
867                         pr_debug("%s: using %lu\n", __func__, table_rate);
868                         last_round_idx = i;
869                         return table_rate * 1000;
870                 }
871         }
872
873         return -EINVAL;
874 }
875
876 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
877 {
878         int i;
879
880         if (!tegra_emc_table) {
881                 if (rate == clk_get_rate_locked(emc)) {
882                         *div_value = emc->div - 2;
883                         return emc->parent;
884                 }
885                 return NULL;
886         }
887
888         pr_debug("%s: %lu\n", __func__, rate);
889
890         /* Table entries specify rate in kHz */
891         rate = rate / 1000;
892
893         i = get_start_idx(rate);
894         for (; i < tegra_emc_table_size; i++) {
895                 if (tegra_emc_table[i].rate == rate) {
896                         struct clk *p = tegra_emc_clk_sel[i].input;
897
898                         if (p && (tegra_emc_clk_sel[i].input_rate ==
899                                   clk_get_rate(p))) {
900                                 *div_value = (tegra_emc_clk_sel[i].value &
901                                         EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
902                                 return p;
903                         }
904                 }
905         }
906         return NULL;
907 }
908
909 bool tegra_emc_is_parent_ready(unsigned long rate, struct clk **parent,
910                 unsigned long *parent_rate, unsigned long *backup_rate)
911 {
912
913         int i;
914         struct clk *p = NULL;
915         unsigned long p_rate = 0;
916
917         if (!tegra_emc_table)
918                 return true;
919
920         pr_debug("%s: %lu\n", __func__, rate);
921
922         /* Table entries specify rate in kHz */
923         rate = rate / 1000;
924
925         i = get_start_idx(rate);
926         for (; i < tegra_emc_table_size; i++) {
927                 if (tegra_emc_table[i].rate == rate) {
928                         p = tegra_emc_clk_sel[i].input;
929                         if (!p)
930                                 continue;       /* invalid entry */
931
932                         p_rate = tegra_emc_clk_sel[i].input_rate;
933                         if (p_rate == clk_get_rate(p))
934                                 return true;
935                         break;
936                 }
937         }
938
939         /* Table match not found - "non existing parent" is ready */
940         if (!p)
941                 return true;
942
943 #ifdef CONFIG_TEGRA_PLLM_SCALED
944         /*
945          * Table match found, but parent is not ready - check if backup entry
946          * was found during initialization, and return the respective backup
947          * rate
948          */
949         if (emc->shared_bus_backup.input &&
950             (emc->shared_bus_backup.input != p)) {
951                 *parent = p;
952                 *parent_rate = p_rate;
953                 *backup_rate = emc->shared_bus_backup.bus_rate;
954                 return false;
955         }
956 #else
957         /*
958          * Table match found, but parent is not ready - continue search
959          * for backup rate: min rate above requested that has different
960          * parent source (since only pll_c is scaled and may not be ready,
961          * any other parent can provide backup)
962          */
963         *parent = p;
964         *parent_rate = p_rate;
965
966         for (i++; i < tegra_emc_table_size; i++) {
967                 p = tegra_emc_clk_sel[i].input;
968                 if (!p)
969                         continue;       /* invalid entry */
970
971                 if (p != (*parent)) {
972                         *backup_rate = tegra_emc_table[i].rate * 1000;
973                         return false;
974                 }
975         }
976 #endif
977         /* Parent is not ready, and no backup found */
978         *backup_rate = -EINVAL;
979         return false;
980 }
981
982 static inline const struct clk_mux_sel *get_emc_input(u32 val)
983 {
984         const struct clk_mux_sel *sel;
985
986         for (sel = emc->inputs; sel->input != NULL; sel++) {
987                 if (sel->value == val)
988                         break;
989         }
990         return sel;
991 }
992
993 static int find_matching_input(const struct tegra11_emc_table *table,
994                         struct clk *pll_c, struct emc_sel *emc_clk_sel)
995 {
996         u32 div_value = (table->src_sel_reg & EMC_CLK_DIV_MASK) >>
997                 EMC_CLK_DIV_SHIFT;
998         u32 src_value = (table->src_sel_reg & EMC_CLK_SOURCE_MASK) >>
999                 EMC_CLK_SOURCE_SHIFT;
1000         unsigned long input_rate = 0;
1001         unsigned long table_rate = table->rate * 1000; /* table rate in kHz */
1002         const struct clk_mux_sel *sel = get_emc_input(src_value);
1003
1004 #ifdef CONFIG_TEGRA_PLLM_SCALED
1005         struct clk *scalable_pll = emc->parent; /* pll_m is a boot parent */
1006 #else
1007         struct clk *scalable_pll = pll_c;
1008 #endif
1009         pr_info_once("tegra: %s is selected as scalable EMC clock source\n",
1010                      scalable_pll->name);
1011
1012         if (div_value & 0x1) {
1013                 pr_warn("tegra: invalid odd divider for EMC rate %lu\n",
1014                         table_rate);
1015                 return -EINVAL;
1016         }
1017         if (!sel->input) {
1018                 pr_warn("tegra: no matching input found for EMC rate %lu\n",
1019                         table_rate);
1020                 return -EINVAL;
1021         }
1022         if (div_value && (table->src_sel_reg & EMC_CLK_LOW_JITTER_ENABLE)) {
1023                 pr_warn("tegra: invalid LJ path for EMC rate %lu\n",
1024                         table_rate);
1025                 return -EINVAL;
1026         }
1027         if (!(table->src_sel_reg & EMC_CLK_MC_SAME_FREQ) !=
1028             !(MC_EMEM_ARB_MISC0_EMC_SAME_FREQ &
1029               table->burst_regs[MC_EMEM_ARB_MISC0_INDEX])) {
1030                 pr_warn("tegra: ambiguous EMC to MC ratio for EMC rate %lu\n",
1031                         table_rate);
1032                 return -EINVAL;
1033         }
1034
1035 #ifndef CONFIG_TEGRA_DUAL_CBUS
1036         if (sel->input == pll_c) {
1037                 pr_warn("tegra: %s is cbus source: no EMC rate %lu support\n",
1038                         sel->input->name, table_rate);
1039                 return -EINVAL;
1040         }
1041 #endif
1042
1043         if (sel->input == scalable_pll) {
1044                 input_rate = table_rate * (1 + div_value / 2);
1045         } else {
1046                 /* all other sources are fixed, must exactly match the rate */
1047                 input_rate = clk_get_rate(sel->input);
1048                 if (input_rate != (table_rate * (1 + div_value / 2))) {
1049                         pr_warn("tegra: EMC rate %lu does not match %s rate %lu\n",
1050                                 table_rate, sel->input->name, input_rate);
1051                         return -EINVAL;
1052                 }
1053         }
1054
1055 #ifdef CONFIG_TEGRA_PLLM_SCALED
1056                 if (sel->input == pll_c) {
1057                         /* maybe overwritten in a loop - end up at max rate
1058                            from pll_c */
1059                         emc->shared_bus_backup.input = pll_c;
1060                         emc->shared_bus_backup.bus_rate = table_rate;
1061                 }
1062 #endif
1063         /* Get ready emc clock selection settings for this table rate */
1064         emc_clk_sel->input = sel->input;
1065         emc_clk_sel->input_rate = input_rate;
1066         emc_clk_sel->value = table->src_sel_reg;
1067
1068         return 0;
1069 }
1070
1071 static void adjust_emc_dvfs_table(const struct tegra11_emc_table *table,
1072                                   int table_size)
1073 {
1074         int i, j;
1075         unsigned long rate;
1076
1077         for (i = 0; i < MAX_DVFS_FREQS; i++) {
1078                 int mv = emc->dvfs->millivolts[i];
1079                 if (!mv)
1080                         break;
1081
1082                 /* For each dvfs voltage find maximum supported rate;
1083                    use 1MHz placeholder if not found */
1084                 for (rate = 1000, j = 0; j < table_size; j++) {
1085                         if (tegra_emc_clk_sel[j].input == NULL)
1086                                 continue;       /* invalid entry */
1087
1088                         if ((mv >= table[j].emc_min_mv) &&
1089                             (rate < table[j].rate))
1090                                 rate = table[j].rate;
1091                 }
1092                 /* Table entries specify rate in kHz */
1093                 emc->dvfs->freqs[i] = rate * 1000;
1094         }
1095 }
1096
1097 #ifdef CONFIG_TEGRA_PLLM_SCALED
1098 /* When pll_m is scaled, pll_c must provide backup rate;
1099    if not - remove rates that require pll_m scaling */
1100 static int purge_emc_table(unsigned long max_rate)
1101 {
1102         int i;
1103         int ret = 0;
1104
1105         if (emc->shared_bus_backup.input)
1106                 return ret;
1107
1108         pr_warn("tegra: selected pll_m scaling option but no backup source:\n");
1109         pr_warn("       removed not supported entries from the table:\n");
1110
1111         /* made all entries with non matching rate invalid */
1112         for (i = 0; i < tegra_emc_table_size; i++) {
1113                 struct emc_sel *sel = &tegra_emc_clk_sel[i];
1114                 if (sel->input) {
1115                         if (clk_get_rate(sel->input) != sel->input_rate) {
1116                                 pr_warn("       EMC rate %lu\n",
1117                                         tegra_emc_table[i].rate * 1000);
1118                                 sel->input = NULL;
1119                                 sel->input_rate = 0;
1120                                 sel->value = 0;
1121                                 if (max_rate == tegra_emc_table[i].rate)
1122                                         ret = -EINVAL;
1123                         }
1124                 }
1125         }
1126         return ret;
1127 }
1128 #else
1129 /* When pll_m is fixed @ max EMC rate, it always provides backup for pll_c */
1130 #define purge_emc_table(max_rate) (0)
1131 #endif
1132
1133 #ifdef CONFIG_OF
1134 static struct device_node *tegra_emc_ramcode_devnode(struct device_node *np)
1135 {
1136         struct device_node *iter;
1137         u32 reg;
1138
1139         for_each_child_of_node(np, iter) {
1140                 if (of_property_read_u32(np, "nvidia,ram-code", &reg))
1141                         continue;
1142                 if (reg == tegra_bct_strapping)
1143                         return of_node_get(iter);
1144         }
1145
1146         return NULL;
1147 }
1148
1149 static struct tegra11_emc_pdata *tegra_emc_dt_parse_pdata(
1150                 struct platform_device *pdev)
1151 {
1152         struct device_node *np = pdev->dev.of_node;
1153         struct device_node *tnp, *iter;
1154         struct tegra11_emc_pdata *pdata;
1155         int ret, i, num_tables;
1156
1157         if (!np)
1158                 return NULL;
1159
1160         if (of_find_property(np, "nvidia,use-ram-code", NULL)) {
1161                 tnp = tegra_emc_ramcode_devnode(np);
1162                 if (!tnp)
1163                         dev_warn(&pdev->dev,
1164                                 "can't find emc table for ram-code 0x%02x\n",
1165                                         tegra_bct_strapping);
1166         } else
1167                 tnp = of_node_get(np);
1168
1169         if (!tnp)
1170                 return NULL;
1171
1172         num_tables = 0;
1173         for_each_child_of_node(tnp, iter)
1174                 if (of_device_is_compatible(iter, "nvidia,tegra11-emc-table"))
1175                         num_tables++;
1176
1177         if (!num_tables) {
1178                 pdata = NULL;
1179                 goto out;
1180         }
1181
1182         pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
1183         pdata->tables = devm_kzalloc(&pdev->dev,
1184                                 sizeof(*pdata->tables) * num_tables,
1185                                         GFP_KERNEL);
1186
1187         i = 0;
1188         for_each_child_of_node(tnp, iter) {
1189                 u32 u;
1190                 const char *source_name;
1191
1192                 ret = of_property_read_u32(iter, "nvidia,revision", &u);
1193                 if (ret) {
1194                         dev_err(&pdev->dev, "no revision in %s\n",
1195                                 iter->full_name);
1196                         continue;
1197                 }
1198                 pdata->tables[i].rev = u;
1199
1200                 ret = of_property_read_u32(iter, "clock-frequency", &u);
1201                 if (ret) {
1202                         dev_err(&pdev->dev, "no clock-frequency in %s\n",
1203                                 iter->full_name);
1204                         continue;
1205                 }
1206                 pdata->tables[i].rate = u;
1207
1208                 ret = of_property_read_u32(iter, "nvidia,emc-min-mv", &u);
1209                 if (ret) {
1210                         dev_err(&pdev->dev, "no emc-min-mv in %s\n",
1211                                 iter->full_name);
1212                         continue;
1213                 }
1214                 pdata->tables[i].emc_min_mv = u;
1215
1216                 ret = of_property_read_string(iter,
1217                                         "nvidia,source", &source_name);
1218                 if (ret) {
1219                         dev_err(&pdev->dev, "no source name in %s\n",
1220                                 iter->full_name);
1221                         continue;
1222                 }
1223                 pdata->tables[i].src_name = source_name;
1224
1225                 ret = of_property_read_u32(iter, "nvidia,src-sel-reg", &u);
1226                 if (ret) {
1227                         dev_err(&pdev->dev, "no src-sel-reg in %s\n",
1228                                 iter->full_name);
1229                         continue;
1230                 }
1231                 pdata->tables[i].src_sel_reg = u;
1232
1233                 ret = of_property_read_u32(iter, "nvidia,burst-regs-num", &u);
1234                 if (ret) {
1235                         dev_err(&pdev->dev, "no burst-regs-num in %s\n",
1236                                 iter->full_name);
1237                         continue;
1238                 }
1239                 pdata->tables[i].burst_regs_num = u;
1240
1241                 ret = of_property_read_u32(iter, "nvidia,emc-trimmers-num", &u);
1242                 if (ret) {
1243                         dev_err(&pdev->dev, "no emc-trimmers-num in %s\n",
1244                                 iter->full_name);
1245                         continue;
1246                 }
1247                 pdata->tables[i].emc_trimmers_num = u;
1248
1249                 ret = of_property_read_u32(iter,
1250                                         "nvidia,burst-up-down-regs-num", &u);
1251                 if (ret) {
1252                         dev_err(&pdev->dev, "no burst-up-down-regs-num in %s\n",
1253                                 iter->full_name);
1254                         continue;
1255                 }
1256                 pdata->tables[i].burst_up_down_regs_num = u;
1257
1258                 ret = of_property_read_u32_array(iter, "nvidia,emc-registers",
1259                                         pdata->tables[i].burst_regs,
1260                                         pdata->tables[i].burst_regs_num);
1261                 if (ret) {
1262                         dev_err(&pdev->dev,
1263                                 "malformed emc-registers property in %s\n",
1264                                 iter->full_name);
1265                         continue;
1266                 }
1267
1268                 ret = of_property_read_u32_array(iter, "nvidia,emc-trimmers-0",
1269                                         pdata->tables[i].emc_trimmers_0,
1270                                         pdata->tables[i].emc_trimmers_num);
1271                 if (ret) {
1272                         dev_err(&pdev->dev,
1273                                 "malformed emc-trimmers-0 property in %s\n",
1274                                 iter->full_name);
1275                         continue;
1276                 }
1277
1278                 ret = of_property_read_u32_array(iter, "nvidia,emc-trimmers-1",
1279                                         pdata->tables[i].emc_trimmers_1,
1280                                         pdata->tables[i].emc_trimmers_num);
1281                 if (ret) {
1282                         dev_err(&pdev->dev,
1283                                 "malformed emc-trimmers-1 property in %s\n",
1284                                 iter->full_name);
1285                         continue;
1286                 }
1287
1288                 ret = of_property_read_u32_array(iter,
1289                                 "nvidia,emc-burst-up-down-regs",
1290                                 pdata->tables[i].burst_up_down_regs,
1291                                 pdata->tables[i].burst_up_down_regs_num);
1292                 if (ret) {
1293                         dev_err(&pdev->dev,
1294                                 "malformed emc-burst-up-down-regs property in %s\n",
1295                                 iter->full_name);
1296                         continue;
1297                 }
1298
1299                 ret = of_property_read_u32(iter, "nvidia,emc-zcal-cnt-long",
1300                                         &pdata->tables[i].emc_zcal_cnt_long);
1301                 if (ret) {
1302                         dev_err(&pdev->dev,
1303                                 "malformed emc-zcal-cnt-long property in %s\n",
1304                                 iter->full_name);
1305                         continue;
1306                 }
1307
1308                 ret = of_property_read_u32(iter, "nvidia,emc-acal-interval",
1309                                         &pdata->tables[i].emc_acal_interval);
1310                 if (ret) {
1311                         dev_err(&pdev->dev,
1312                                 "malformed emc-acal-interval property in %s\n",
1313                                 iter->full_name);
1314                         continue;
1315                 }
1316
1317                 ret = of_property_read_u32(iter, "nvidia,emc-cfg",
1318                                         &pdata->tables[i].emc_cfg);
1319                 if (ret) {
1320                         dev_err(&pdev->dev,
1321                                 "malformed emc-cfg property in %s\n",
1322                                 iter->full_name);
1323                         continue;
1324                 }
1325
1326                 ret = of_property_read_u32(iter, "nvidia,emc-mode-reset",
1327                                         &pdata->tables[i].emc_mode_reset);
1328                 if (ret) {
1329                         dev_err(&pdev->dev,
1330                                 "malformed emc-mode-reset property in %s\n",
1331                                 iter->full_name);
1332                         continue;
1333                 }
1334
1335                 ret = of_property_read_u32(iter, "nvidia,emc-mode-1",
1336                                         &pdata->tables[i].emc_mode_1);
1337                 if (ret) {
1338                         dev_err(&pdev->dev,
1339                                 "malformed emc-mode-1 property in %s\n",
1340                                 iter->full_name);
1341                         continue;
1342                 }
1343
1344                 ret = of_property_read_u32(iter, "nvidia,emc-mode-2",
1345                                         &pdata->tables[i].emc_mode_2);
1346                 if (ret) {
1347                         dev_err(&pdev->dev,
1348                                 "malformed emc-mode-2 property in %s\n",
1349                                 iter->full_name);
1350                         continue;
1351                 }
1352
1353                 ret = of_property_read_u32(iter, "nvidia,emc-mode-4",
1354                                         &pdata->tables[i].emc_mode_4);
1355                 if (ret) {
1356                         dev_err(&pdev->dev,
1357                                 "malformed emc-mode-4 property in %s\n",
1358                                 iter->full_name);
1359                         continue;
1360                 }
1361
1362                 of_property_read_u32(iter, "nvidia,emc-clock-latency-change",
1363                                         &pdata->tables[i].clock_change_latency);
1364                 i++;
1365         }
1366         pdata->num_tables = i;
1367
1368 out:
1369         of_node_put(tnp);
1370         return pdata;
1371 }
1372 #else
1373 static struct tegra_emc_pdata *tegra_emc_dt_parse_pdata(
1374                                         struct platform_device *pdev)
1375 {
1376         return NULL;
1377 }
1378 #endif
1379
1380 static int init_emc_table(const struct tegra11_emc_table *table, int table_size)
1381 {
1382         int i, mv;
1383         u32 reg;
1384         bool max_entry = false;
1385         bool emc_max_dvfs_sel = get_emc_max_dvfs();
1386         unsigned long boot_rate, max_rate;
1387         struct clk *pll_c = tegra_get_clock_by_name("pll_c");
1388
1389         emc_stats.clkchange_count = 0;
1390         spin_lock_init(&emc_stats.spinlock);
1391         emc_stats.last_update = get_jiffies_64();
1392         emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
1393
1394         if ((dram_type != DRAM_TYPE_DDR3) && (dram_type != DRAM_TYPE_LPDDR2)) {
1395                 pr_err("tegra: not supported DRAM type %u\n", dram_type);
1396                 return -ENODATA;
1397         }
1398
1399         if (emc->parent != tegra_get_clock_by_name("pll_m")) {
1400                 pr_err("tegra: boot parent %s is not supported by EMC DFS\n",
1401                         emc->parent->name);
1402                 return -ENODATA;
1403         }
1404
1405         if (!table || !table_size) {
1406                 pr_err("tegra: EMC DFS table is empty\n");
1407                 return -ENODATA;
1408         }
1409
1410         boot_rate = clk_get_rate(emc) / 1000;
1411         max_rate = clk_get_rate(emc->parent) / 1000;
1412
1413         tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
1414         switch (table[0].rev) {
1415         case 0x40:
1416         case 0x41:
1417         case 0x42:
1418                 start_timing.burst_regs_num = table[0].burst_regs_num;
1419                 start_timing.emc_trimmers_num = table[0].emc_trimmers_num;
1420                 break;
1421         default:
1422                 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
1423                         table[0].rev);
1424                 return -ENODATA;
1425         }
1426
1427         /* Match EMC source/divider settings with table entries */
1428         for (i = 0; i < tegra_emc_table_size; i++) {
1429                 unsigned long table_rate = table[i].rate;
1430
1431                 /* Skip "no-rate" entry, or entry violating ascending order */
1432                 if (!table_rate ||
1433                     (i && (table_rate <= table[i-1].rate)))
1434                         continue;
1435
1436                 BUG_ON(table[i].rev != table[0].rev);
1437
1438                 if (find_matching_input(&table[i], pll_c,
1439                                         &tegra_emc_clk_sel[i]))
1440                         continue;
1441
1442                 if (table_rate == boot_rate)
1443                         emc_stats.last_sel = i;
1444
1445                 if (emc_max_dvfs_sel) {
1446                         /* EMC max rate = max table entry above boot pll_m */
1447                         if (table_rate >= max_rate) {
1448                                 max_rate = table_rate;
1449                                 max_entry = true;
1450                         }
1451                 } else if (table_rate == max_rate) {
1452                         /* EMC max rate = boot pll_m rate */
1453                         max_entry = true;
1454                         break;
1455                 }
1456         }
1457
1458         /* Validate EMC rate and voltage limits */
1459         if (!max_entry) {
1460                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
1461                        " %lu kHz is not found\n", max_rate);
1462                 return -ENODATA;
1463         }
1464
1465         tegra_emc_table = table;
1466
1467         /*
1468          * Purge rates that cannot be reached because table does not specify
1469          * proper backup source. If maximum rate was purged, fall back on boot
1470          * pll_m rate as maximum limit. In any case propagate new maximum limit
1471          * down stream to shared users, and check it against nominal voltage.
1472          */
1473         if (purge_emc_table(max_rate))
1474                 max_rate = clk_get_rate(emc->parent) / 1000;
1475         tegra_init_max_rate(emc, max_rate * 1000);
1476
1477         if (emc->dvfs) {
1478                 adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
1479                 mv = tegra_dvfs_predict_millivolts(emc, max_rate * 1000);
1480                 if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
1481                         tegra_emc_table = NULL;
1482                         pr_err("tegra: invalid EMC DFS table: maximum rate %lu"
1483                                " kHz does not match nominal voltage %d\n",
1484                                max_rate, emc->dvfs->max_millivolts);
1485                         return -ENODATA;
1486                 }
1487         }
1488
1489         pr_info("tegra: validated EMC DFS table\n");
1490
1491         /* Configure clock change mode according to dram type */
1492         reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
1493         reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
1494                 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
1495         emc_writel(reg, EMC_CFG_2);
1496         return 0;
1497 }
1498
1499 static int __devinit tegra11_emc_probe(struct platform_device *pdev)
1500 {
1501         struct tegra11_emc_pdata *pdata;
1502         struct resource *res;
1503
1504         if (tegra_emc_table)
1505                 return -EINVAL;
1506
1507         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1508         if (!res) {
1509                 dev_err(&pdev->dev, "missing register base\n");
1510                 return -ENOMEM;
1511         }
1512
1513         pdata = pdev->dev.platform_data;
1514
1515         if (!pdata)
1516                 pdata = (struct tegra11_emc_pdata *)tegra_emc_dt_parse_pdata(pdev);
1517
1518         if (!pdata) {
1519                 dev_err(&pdev->dev, "missing platform data\n");
1520                 return -ENODATA;
1521         }
1522
1523         return init_emc_table(pdata->tables, pdata->num_tables);
1524 }
1525
1526 static struct of_device_id tegra11_emc_of_match[] __devinitdata = {
1527         { .compatible = "nvidia,tegra11-emc", },
1528         { },
1529 };
1530
1531 static struct platform_driver tegra11_emc_driver = {
1532         .driver         = {
1533                 .name   = "tegra-emc",
1534                 .owner  = THIS_MODULE,
1535                 .of_match_table = tegra11_emc_of_match,
1536         },
1537         .probe          = tegra11_emc_probe,
1538 };
1539
1540 int __init tegra11_emc_init(void)
1541 {
1542         int ret = platform_driver_register(&tegra11_emc_driver);
1543         if (!ret) {
1544                 if (dram_type == DRAM_TYPE_LPDDR2)
1545                         tegra_emc_iso_usage_table_init(
1546                                 tegra11_lpddr3_emc_iso_usage,
1547                                 ARRAY_SIZE(tegra11_lpddr3_emc_iso_usage));
1548                 else if (dram_type == DRAM_TYPE_DDR3)
1549                         tegra_emc_iso_usage_table_init(
1550                                 tegra11_ddr3_emc_iso_usage,
1551                                 ARRAY_SIZE(tegra11_ddr3_emc_iso_usage));
1552
1553                 if (emc_enable) {
1554                         unsigned long rate = tegra_emc_round_rate_updown(
1555                                 emc->boot_rate, false);
1556                         if (!IS_ERR_VALUE(rate))
1557                                 tegra_clk_preset_emc_monitor(rate);
1558                 }
1559         }
1560         return ret;
1561 }
1562
1563 void tegra_emc_timing_invalidate(void)
1564 {
1565         emc_timing = NULL;
1566 }
1567
1568 void tegra_emc_dram_type_init(struct clk *c)
1569 {
1570         emc = c;
1571
1572         dram_type = (emc_readl(EMC_FBIO_CFG5) &
1573                      EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
1574
1575         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
1576 }
1577
1578 int tegra_emc_get_dram_type(void)
1579 {
1580         return dram_type;
1581 }
1582
1583 static u32 soc_to_dram_bit_swap(u32 soc_val, u32 dram_mask, u32 dram_shift)
1584 {
1585         int bit;
1586         u32 dram_val = 0;
1587
1588         /* tegra clocks definitions use shifted mask always */
1589         if (!dram_to_soc_bit_map)
1590                 return soc_val & dram_mask;
1591
1592         for (bit = dram_shift; bit < 32; bit++) {
1593                 u32 dram_bit_mask = 0x1 << bit;
1594                 u32 soc_bit_mask = dram_to_soc_bit_map[bit];
1595
1596                 if (!(dram_bit_mask & dram_mask))
1597                         break;
1598
1599                 if (soc_bit_mask & soc_val)
1600                         dram_val |= dram_bit_mask;
1601         }
1602
1603         return dram_val;
1604 }
1605
1606 static int emc_read_mrr(int dev, int addr)
1607 {
1608         int ret;
1609         u32 val, emc_cfg;
1610
1611         if (dram_type != DRAM_TYPE_LPDDR2)
1612                 return -ENODEV;
1613
1614         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, false);
1615         if (ret)
1616                 return ret;
1617
1618         emc_cfg = emc_readl(EMC_CFG);
1619         if (emc_cfg & EMC_CFG_DRAM_ACPD) {
1620                 emc_writel(emc_cfg & ~EMC_CFG_DRAM_ACPD, EMC_CFG);
1621                 emc_timing_update();
1622         }
1623
1624         val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
1625         val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
1626         emc_writel(val, EMC_MRR);
1627
1628         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, true);
1629         if (emc_cfg & EMC_CFG_DRAM_ACPD) {
1630                 emc_writel(emc_cfg, EMC_CFG);
1631                 emc_timing_update();
1632         }
1633         if (ret)
1634                 return ret;
1635
1636         val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
1637         return val;
1638 }
1639
1640 int tegra_emc_get_dram_temperature(void)
1641 {
1642         int mr4;
1643         unsigned long flags;
1644
1645         spin_lock_irqsave(&emc_access_lock, flags);
1646
1647         mr4 = emc_read_mrr(0, 4);
1648         if (IS_ERR_VALUE(mr4)) {
1649                 spin_unlock_irqrestore(&emc_access_lock, flags);
1650                 return mr4;
1651         }
1652         spin_unlock_irqrestore(&emc_access_lock, flags);
1653
1654         mr4 = soc_to_dram_bit_swap(
1655                 mr4, LPDDR2_MR4_TEMP_MASK, LPDDR2_MR4_TEMP_SHIFT);
1656         return mr4;
1657 }
1658
1659 static inline int bw_calc_get_freq_idx(u32 *bw_calc_freqs,
1660                 u32 freq_max_num, unsigned long bw)
1661 {
1662         int idx = 0;
1663
1664         if (!bw_calc_freqs)
1665                 return -EINVAL;
1666
1667         if (!freq_max_num)
1668                 return -EINVAL;
1669
1670         if (bw > bw_calc_freqs[freq_max_num-1] * KHZ)
1671                 idx = freq_max_num;
1672
1673         for (; idx < freq_max_num; idx++) {
1674                 u32 freq = bw_calc_freqs[idx] * KHZ;
1675                 if (bw < freq) {
1676                         if (idx)
1677                                 idx--;
1678                         break;
1679                 } else if (bw == freq)
1680                         break;
1681         }
1682
1683         return idx;
1684 }
1685
1686 static u8 iso_share_calc_t114_lpddr3_default(unsigned long iso_bw)
1687 {
1688         int freq_idx = bw_calc_get_freq_idx(bw_calc_freqs_lpddr3,
1689                 ARRAY_SIZE(bw_calc_freqs_lpddr3), iso_bw);
1690         return tegra11_lpddr3_emc_usage_share_default[freq_idx];
1691 }
1692
1693 static u8 iso_share_calc_t114_lpddr3_dc(unsigned long iso_bw)
1694 {
1695         int freq_idx = bw_calc_get_freq_idx(bw_calc_freqs_lpddr3,
1696                 ARRAY_SIZE(bw_calc_freqs_lpddr3), iso_bw);
1697         return tegra11_lpddr3_emc_usage_share_dc[freq_idx];
1698 }
1699
1700 static u8 iso_share_calc_t114_ddr3_default(unsigned long iso_bw)
1701 {
1702         int freq_idx = bw_calc_get_freq_idx(bw_calc_freqs_ddr3,
1703                 ARRAY_SIZE(bw_calc_freqs_ddr3), iso_bw);
1704         return tegra11_ddr3_emc_usage_share_default[freq_idx];
1705 }
1706
1707 static u8 iso_share_calc_t114_ddr3_dc(unsigned long iso_bw)
1708 {
1709         int freq_idx = bw_calc_get_freq_idx(bw_calc_freqs_ddr3,
1710                 ARRAY_SIZE(bw_calc_freqs_ddr3), iso_bw);
1711         return tegra11_ddr3_emc_usage_share_dc[freq_idx];
1712 }
1713
1714 #ifdef CONFIG_DEBUG_FS
1715
1716 static struct dentry *emc_debugfs_root;
1717
1718 #define INFO_CALC_REV_OFFSET 1
1719 #define INFO_SCRIPT_REV_OFFSET 2
1720 #define INFO_FREQ_OFFSET 3
1721
1722 static int emc_table_info_show(struct seq_file *s, void *data)
1723 {
1724         int i;
1725         const u32 *info;
1726         u32 freq, calc_rev, script_rev;
1727         const struct tegra11_emc_table *entry;
1728         bool found = false;
1729
1730         if (!tegra_emc_table) {
1731                 seq_printf(s, "EMC DFS table is not installed\n");
1732                 return 0;
1733         }
1734
1735         for (i = 0; i < tegra_emc_table_size; i++) {
1736                 entry = &tegra_emc_table[i];
1737                 info =
1738                 &entry->burst_up_down_regs[entry->burst_up_down_regs_num];
1739
1740                 seq_printf(s, "%s: ", tegra_emc_clk_sel[i].input != NULL ?
1741                            "accepted" : "rejected");
1742
1743                 /* system validation tag for metadata */
1744                 if (*info != 0x4E564441) {
1745                         seq_printf(s, "emc dvfs frequency %6lu\n", entry->rate);
1746                         continue;
1747                 }
1748
1749                 found = true;
1750
1751                 calc_rev = *(info + INFO_CALC_REV_OFFSET);
1752                 script_rev = *(info + INFO_SCRIPT_REV_OFFSET);
1753                 freq = *(info + INFO_FREQ_OFFSET);
1754
1755                 seq_printf(s, "emc dvfs frequency %6u: ", freq);
1756                 seq_printf(s, "calc_rev: %02u.%02u.%02u.%02u ",
1757                            (calc_rev >> 24) & 0xff,
1758                            (calc_rev >> 16) & 0xff,
1759                            (calc_rev >>  8) & 0xff,
1760                            (calc_rev >>  0) & 0xff);
1761                 seq_printf(s, "script_rev: %02u.%02u.%02u.%02u\n",
1762                            (script_rev >> 24) & 0xff,
1763                            (script_rev >> 16) & 0xff,
1764                            (script_rev >>  8) & 0xff,
1765                            (script_rev >>  0) & 0xff);
1766         }
1767
1768         if (!found)
1769                 seq_printf(s, "no metdata in EMC DFS table\n");
1770
1771         return 0;
1772 }
1773
1774 static int emc_table_info_open(struct inode *inode, struct file *file)
1775 {
1776         return single_open(file, emc_table_info_show, inode->i_private);
1777 }
1778
1779 static const struct file_operations emc_table_info_fops = {
1780         .open           = emc_table_info_open,
1781         .read           = seq_read,
1782         .llseek         = seq_lseek,
1783         .release        = single_release,
1784 };
1785
1786 static int emc_stats_show(struct seq_file *s, void *data)
1787 {
1788         int i;
1789
1790         emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
1791
1792         seq_printf(s, "%-10s %-10s \n", "rate kHz", "time");
1793         for (i = 0; i < tegra_emc_table_size; i++) {
1794                 if (tegra_emc_clk_sel[i].input == NULL)
1795                         continue;       /* invalid entry */
1796
1797                 seq_printf(s, "%-10lu %-10llu \n", tegra_emc_table[i].rate,
1798                            cputime64_to_clock_t(emc_stats.time_at_clock[i]));
1799         }
1800         seq_printf(s, "%-15s %llu\n", "transitions:",
1801                    emc_stats.clkchange_count);
1802         seq_printf(s, "%-15s %llu\n", "time-stamp:",
1803                    cputime64_to_clock_t(emc_stats.last_update));
1804
1805         return 0;
1806 }
1807
1808 static int emc_stats_open(struct inode *inode, struct file *file)
1809 {
1810         return single_open(file, emc_stats_show, inode->i_private);
1811 }
1812
1813 static const struct file_operations emc_stats_fops = {
1814         .open           = emc_stats_open,
1815         .read           = seq_read,
1816         .llseek         = seq_lseek,
1817         .release        = single_release,
1818 };
1819
1820 static int dram_temperature_get(void *data, u64 *val)
1821 {
1822         *val = tegra_emc_get_dram_temperature();
1823         return 0;
1824 }
1825 DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
1826                         NULL, "%lld\n");
1827
1828 static int efficiency_get(void *data, u64 *val)
1829 {
1830         *val = tegra_emc_bw_efficiency;
1831         return 0;
1832 }
1833 static int efficiency_set(void *data, u64 val)
1834 {
1835         tegra_emc_bw_efficiency = (val > 100) ? 100 : val;
1836         if (emc)
1837                 tegra_clk_shared_bus_update(emc);
1838
1839         return 0;
1840 }
1841 DEFINE_SIMPLE_ATTRIBUTE(efficiency_fops, efficiency_get,
1842                         efficiency_set, "%llu\n");
1843
1844 static int __init tegra_emc_debug_init(void)
1845 {
1846         emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
1847         if (!emc_debugfs_root)
1848                 return -ENOMEM;
1849
1850         if (!debugfs_create_file(
1851                 "table_info", S_IRUGO, emc_debugfs_root, NULL,
1852                 &emc_table_info_fops))
1853                 goto err_out;
1854
1855         if (!tegra_emc_table)
1856                 return 0;
1857
1858         if (!debugfs_create_file(
1859                 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
1860                 goto err_out;
1861
1862         if (!debugfs_create_u32("clkchange_delay", S_IRUGO | S_IWUSR,
1863                 emc_debugfs_root, (u32 *)&clkchange_delay))
1864                 goto err_out;
1865
1866         if (!debugfs_create_file("dram_temperature", S_IRUGO, emc_debugfs_root,
1867                                  NULL, &dram_temperature_fops))
1868                 goto err_out;
1869
1870         if (!debugfs_create_file("efficiency", S_IRUGO | S_IWUSR,
1871                                  emc_debugfs_root, NULL, &efficiency_fops))
1872                 goto err_out;
1873
1874         if (tegra_emc_iso_usage_debugfs_init(emc_debugfs_root))
1875                 goto err_out;
1876
1877         return 0;
1878
1879 err_out:
1880         debugfs_remove_recursive(emc_debugfs_root);
1881         return -ENOMEM;
1882 }
1883
1884 late_initcall(tegra_emc_debug_init);
1885 #endif