2 * arch/arm/mach-tegra/tegra11_emc.c
4 * Copyright (c) 2011-2013, NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/kernel.h>
22 #include <linux/clk.h>
23 #include <linux/err.h>
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/platform_device.h>
29 #include <linux/platform_data/tegra_emc.h>
30 #include <linux/debugfs.h>
31 #include <linux/seq_file.h>
32 #include <linux/hrtimer.h>
33 #include <linux/pasr.h>
35 #include <asm/cputime.h>
41 #include "tegra11_emc.h"
42 #include "tegra_emc_dt_parse.h"
44 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
45 static bool emc_enable = true;
47 static bool emc_enable;
49 module_param(emc_enable, bool, 0644);
51 static int pasr_enable;
53 static u32 bw_calc_freqs[] = {
54 40, 60, 80, 100, 120, 140, 160, 180, 200, 220, 240, 260, 280, 300
57 static u32 tegra11_lpddr3_emc_usage_share_default[] = {
58 35, 38, 40, 41, 42, 43, 43, 45, 45, 45, 46, 47, 48, 48, 50
61 static u32 tegra11_lpddr3_emc_usage_share_dc[] = {
62 47, 52, 55, 57, 58, 59, 60, 62, 62, 63, 64, 66, 67, 68, 70
65 static u8 iso_share_calc_t114_lpddr3_default(unsigned long iso_bw);
66 static u8 iso_share_calc_t114_lpddr3_dc(unsigned long iso_bw);
68 u8 tegra_emc_bw_efficiency = 80;
70 static struct emc_iso_usage tegra11_ddr3_emc_iso_usage[] = {
71 { BIT(EMC_USER_DC1), 80},
72 { BIT(EMC_USER_DC2), 80},
73 { BIT(EMC_USER_DC1) | BIT(EMC_USER_DC2), 45},
74 { BIT(EMC_USER_DC1) | BIT(EMC_USER_VI), 45},
75 { BIT(EMC_USER_DC2) | BIT(EMC_USER_VI), 45},
78 static struct emc_iso_usage tegra11_lpddr3_emc_iso_usage[] = {
81 80, iso_share_calc_t114_lpddr3_dc
85 80, iso_share_calc_t114_lpddr3_dc
88 BIT(EMC_USER_DC1) | BIT(EMC_USER_DC2),
89 45, iso_share_calc_t114_lpddr3_default
92 BIT(EMC_USER_DC1) | BIT(EMC_USER_VI),
93 45, iso_share_calc_t114_lpddr3_default
96 BIT(EMC_USER_DC1) | BIT(EMC_USER_MSENC),
97 50, iso_share_calc_t114_lpddr3_default
100 BIT(EMC_USER_DC1) | BIT(EMC_USER_3D),
101 50, iso_share_calc_t114_lpddr3_default
104 BIT(EMC_USER_DC1) | BIT(EMC_USER_VDE),
105 45, iso_share_calc_t114_lpddr3_default
108 BIT(EMC_USER_DC2) | BIT(EMC_USER_VI),
109 45, iso_share_calc_t114_lpddr3_default
112 BIT(EMC_USER_DC2) | BIT(EMC_USER_MSENC),
113 50, iso_share_calc_t114_lpddr3_default
116 BIT(EMC_USER_DC2) | BIT(EMC_USER_3D),
117 50, iso_share_calc_t114_lpddr3_default
120 BIT(EMC_USER_DC2) | BIT(EMC_USER_VDE),
121 45, iso_share_calc_t114_lpddr3_default
126 #define TEGRA_EMC_ISO_USE_FREQ_MAX_NUM 14
127 #define PLL_C_DIRECT_FLOOR 333500000
128 #define EMC_STATUS_UPDATE_TIMEOUT 100
129 #define TEGRA_EMC_TABLE_MAX_SIZE 16
131 #define TEGRA_EMC_MODE_REG_17 0x00110000
132 #define TEGRA_EMC_MRW_DEV_SHIFT 30
133 #define TEGRA_EMC_MRW_DEV1 2
134 #define TEGRA_EMC_MRW_DEV2 1
142 #define EMC_CLK_DIV_SHIFT 0
143 #define EMC_CLK_DIV_MASK (0xFF << EMC_CLK_DIV_SHIFT)
144 #define EMC_CLK_SOURCE_SHIFT 29
145 #define EMC_CLK_SOURCE_MASK (0x7 << EMC_CLK_SOURCE_SHIFT)
146 #define EMC_CLK_LOW_JITTER_ENABLE (0x1 << 31)
147 #define EMC_CLK_MC_SAME_FREQ (0x1 << 16)
149 /* FIXME: actual Tegar11 list */
150 #define BURST_REG_LIST \
151 DEFINE_REG(TEGRA_EMC_BASE, EMC_RC), \
152 DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC), \
153 DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC_SLR), \
154 DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS), \
155 DEFINE_REG(TEGRA_EMC_BASE, EMC_RP), \
156 DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W), \
157 DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R), \
158 DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P), \
159 DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P), \
160 DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD), \
161 DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD), \
162 DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD), \
163 DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT), \
164 DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT), \
165 DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV), \
166 DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV_MASK), \
167 DEFINE_REG(TEGRA_EMC_BASE, EMC_IBDLY), \
168 DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_EXTRA), \
169 DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_2), \
170 DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST), \
171 DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV_MASK), \
172 DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH), \
173 DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM), \
174 DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT), \
175 DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR), \
176 DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD), \
177 DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN), \
178 DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN), \
179 DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN), \
180 DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN), \
181 DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR), \
182 DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL), \
183 DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE), \
184 DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKESR), \
185 DEFINE_REG(TEGRA_EMC_BASE, EMC_TPD), \
186 DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW), \
187 DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB), \
188 DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE), \
189 DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP), \
190 DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW), \
191 DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_EXTRA), \
192 DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE), \
193 DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ), \
194 DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5), \
195 DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL), \
196 DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD), \
197 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4), \
198 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5), \
199 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6), \
200 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7), \
201 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4), \
202 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5), \
203 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6), \
204 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7), \
205 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4), \
206 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5), \
207 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6), \
208 DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7), \
209 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL), \
210 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL4), \
211 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2), \
212 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2), \
213 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL), \
214 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL), \
215 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL), \
216 DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2), \
217 DEFINE_REG(TEGRA_EMC_BASE, EMC_DSR_VTTGEN_DRV), \
218 DEFINE_REG(TEGRA_EMC_BASE, EMC_TXDSRVTTGEN), \
219 DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE), \
220 DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_TERM_CTRL), \
221 DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL), \
222 DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT), \
223 DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT), \
224 DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT2), \
225 DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG2), \
226 DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG3), \
227 DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT), \
228 DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION), \
229 DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL), \
230 DEFINE_REG(TEGRA_EMC_BASE, EMC_CA_TRAINING_TIMING_CNTL1), \
231 DEFINE_REG(TEGRA_EMC_BASE, EMC_CA_TRAINING_TIMING_CNTL2), \
233 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG), \
234 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
235 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD), \
236 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP), \
237 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC), \
238 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS), \
239 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW), \
240 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD), \
241 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE), \
242 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE), \
243 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R), \
244 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W), \
245 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W), \
246 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R), \
247 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS), \
248 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS), \
249 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0), \
250 DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE), \
251 DEFINE_REG(TEGRA_EMC_BASE, EMC_SEL_DPD_CTRL),
253 #define BURST_UP_DOWN_REG_LIST \
254 DEFINE_REG(TEGRA_MC_BASE, MC_PTSA_GRANT_DECREMENT), \
255 DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_G2_0), \
256 DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_G2_1), \
257 DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_0), \
258 DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV2_0), \
259 DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_2), \
260 DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_1), \
261 DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV2_1), \
262 DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NV_3), \
263 DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_EPP_0), \
264 DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_EPP_1),
266 #define EMC_TRIMMERS_REG_LIST \
267 DEFINE_REG(0, EMC_CDB_CNTL_1), \
268 DEFINE_REG(0, EMC_FBIO_CFG6), \
269 DEFINE_REG(0, EMC_QUSE), \
270 DEFINE_REG(0, EMC_EINPUT), \
271 DEFINE_REG(0, EMC_EINPUT_DURATION), \
272 DEFINE_REG(0, EMC_DLL_XFORM_DQS0), \
273 DEFINE_REG(0, EMC_QSAFE), \
274 DEFINE_REG(0, EMC_DLL_XFORM_QUSE0), \
275 DEFINE_REG(0, EMC_RDV), \
276 DEFINE_REG(0, EMC_XM2DQSPADCTRL4), \
277 DEFINE_REG(0, EMC_XM2DQSPADCTRL3), \
278 DEFINE_REG(0, EMC_DLL_XFORM_DQ0), \
279 DEFINE_REG(0, EMC_AUTO_CAL_CONFIG), \
280 DEFINE_REG(0, EMC_DLL_XFORM_ADDR0), \
281 DEFINE_REG(0, EMC_XM2CLKPADCTRL2), \
282 DEFINE_REG(0, EMC_DLI_TRIM_TXDQS0), \
283 DEFINE_REG(0, EMC_DLL_XFORM_ADDR1), \
284 DEFINE_REG(0, EMC_DLL_XFORM_ADDR2), \
285 DEFINE_REG(0, EMC_DLL_XFORM_DQS1), \
286 DEFINE_REG(0, EMC_DLL_XFORM_DQS2), \
287 DEFINE_REG(0, EMC_DLL_XFORM_DQS3), \
288 DEFINE_REG(0, EMC_DLL_XFORM_DQ1), \
289 DEFINE_REG(0, EMC_DLL_XFORM_DQ2), \
290 DEFINE_REG(0, EMC_DLL_XFORM_DQ3), \
291 DEFINE_REG(0, EMC_DLI_TRIM_TXDQS1), \
292 DEFINE_REG(0, EMC_DLI_TRIM_TXDQS2), \
293 DEFINE_REG(0, EMC_DLI_TRIM_TXDQS3), \
294 DEFINE_REG(0, EMC_DLL_XFORM_QUSE1), \
295 DEFINE_REG(0, EMC_DLL_XFORM_QUSE2), \
296 DEFINE_REG(0, EMC_DLL_XFORM_QUSE3),
299 #define DEFINE_REG(base, reg) ((base) ? (IO_ADDRESS((base)) + (reg)) : 0)
300 static void __iomem *burst_reg_addr[TEGRA11_EMC_MAX_NUM_REGS] = {
303 #ifndef EMULATE_CLOCK_SWITCH
304 static void __iomem *burst_up_down_reg_addr[TEGRA11_EMC_MAX_NUM_REGS] = {
305 BURST_UP_DOWN_REG_LIST
311 #define DEFINE_REG(base, reg) (reg)
312 #ifndef EMULATE_CLOCK_SWITCH
313 static u32 emc_trimmer_offs[TEGRA11_EMC_MAX_NUM_REGS] = {
314 EMC_TRIMMERS_REG_LIST
320 #define DEFINE_REG(base, reg) reg##_INDEX
326 #define DEFINE_REG(base, reg) reg##_TRIM_INDEX
328 EMC_TRIMMERS_REG_LIST
336 unsigned long input_rate;
338 static struct emc_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
339 static struct tegra11_emc_table start_timing;
340 static const struct tegra11_emc_table *emc_timing;
341 static unsigned long dram_over_temp_state = DRAM_OVER_TEMP_NONE;
343 static ktime_t clkchange_time;
344 static int clkchange_delay = 100;
346 static const struct tegra11_emc_table *tegra_emc_table;
347 static int tegra_emc_table_size;
349 static u32 dram_dev_num;
350 static u32 dram_type = -1;
352 static struct clk *emc;
355 cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
362 static DEFINE_SPINLOCK(emc_access_lock);
364 static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
365 static void __iomem *emc0_base = IO_ADDRESS(TEGRA_EMC0_BASE);
366 static void __iomem *emc1_base = IO_ADDRESS(TEGRA_EMC1_BASE);
367 static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
368 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
370 static inline void emc_writel(u32 val, unsigned long addr)
372 writel(val, emc_base + addr);
374 static inline void emc0_writel(u32 val, unsigned long addr)
376 writel(val, emc0_base + addr);
378 static inline void emc1_writel(u32 val, unsigned long addr)
380 writel(val, emc1_base + addr);
382 static inline u32 emc_readl(unsigned long addr)
384 return readl(emc_base + addr);
386 static inline void mc_writel(u32 val, unsigned long addr)
388 writel(val, mc_base + addr);
390 static inline u32 mc_readl(unsigned long addr)
392 return readl(mc_base + addr);
395 static inline void ccfifo_writel(u32 val, unsigned long addr)
397 writel(val, emc_base + EMC_CCFIFO_DATA);
398 writel(addr, emc_base + EMC_CCFIFO_ADDR);
401 static int last_round_idx;
402 static inline int get_start_idx(unsigned long rate)
404 if (tegra_emc_table[last_round_idx].rate == rate)
405 return last_round_idx;
409 static void emc_last_stats_update(int last_sel)
412 u64 cur_jiffies = get_jiffies_64();
414 spin_lock_irqsave(&emc_stats.spinlock, flags);
416 if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
417 emc_stats.time_at_clock[emc_stats.last_sel] =
418 emc_stats.time_at_clock[emc_stats.last_sel] +
419 (cur_jiffies - emc_stats.last_update);
421 emc_stats.last_update = cur_jiffies;
423 if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
424 emc_stats.clkchange_count++;
425 emc_stats.last_sel = last_sel;
427 spin_unlock_irqrestore(&emc_stats.spinlock, flags);
430 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
433 for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
434 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
441 static inline void emc_timing_update(void)
445 emc_writel(0x1, EMC_TIMING_CONTROL);
446 err = wait_for_update(EMC_STATUS,
447 EMC_STATUS_TIMING_UPDATE_STALLED, false);
449 pr_err("%s: timing update error: %d", __func__, err);
454 static inline void auto_cal_disable(void)
458 emc_writel(0, EMC_AUTO_CAL_INTERVAL);
459 err = wait_for_update(EMC_AUTO_CAL_STATUS,
460 EMC_AUTO_CAL_STATUS_ACTIVE, false);
462 pr_err("%s: disable auto-cal error: %d", __func__, err);
467 static inline void set_over_temp_timing(
468 const struct tegra11_emc_table *next_timing, unsigned long state)
470 #define REFRESH_SPEEDUP(val) \
472 val = ((val) & 0xFFFF0000) | (((val) & 0xFFFF) >> 2); \
475 u32 ref = next_timing->burst_regs[EMC_REFRESH_INDEX];
476 u32 pre_ref = next_timing->burst_regs[EMC_PRE_REFRESH_REQ_CNT_INDEX];
477 u32 dsr_cntrl = next_timing->burst_regs[EMC_DYN_SELF_REF_CONTROL_INDEX];
480 case DRAM_OVER_TEMP_NONE:
482 case DRAM_OVER_TEMP_REFRESH_X2:
483 case DRAM_OVER_TEMP_REFRESH_X4:
484 case DRAM_OVER_TEMP_THROTTLE:
485 REFRESH_SPEEDUP(ref);
486 REFRESH_SPEEDUP(pre_ref);
487 REFRESH_SPEEDUP(dsr_cntrl);
490 WARN(1, "%s: Failed to set dram over temp state %lu\n",
495 __raw_writel(ref, burst_reg_addr[EMC_REFRESH_INDEX]);
496 __raw_writel(pre_ref, burst_reg_addr[EMC_PRE_REFRESH_REQ_CNT_INDEX]);
497 __raw_writel(dsr_cntrl, burst_reg_addr[EMC_DYN_SELF_REF_CONTROL_INDEX]);
500 static inline bool dqs_preset(const struct tegra11_emc_table *next_timing,
501 const struct tegra11_emc_table *last_timing)
505 #define DQS_SET(reg, bit) \
507 if ((next_timing->burst_regs[EMC_##reg##_INDEX] & \
508 EMC_##reg##_##bit##_ENABLE) && \
509 (!(last_timing->burst_regs[EMC_##reg##_INDEX] & \
510 EMC_##reg##_##bit##_ENABLE))) { \
511 emc_writel(last_timing->burst_regs[EMC_##reg##_INDEX] \
512 | EMC_##reg##_##bit##_ENABLE, EMC_##reg); \
518 #define DQS_SET_TRIM(reg, bit, ch) \
520 if ((next_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX] \
521 & EMC_##reg##_##bit##_ENABLE) && \
522 (!(last_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX] \
523 & EMC_##reg##_##bit##_ENABLE))) { \
524 emc##ch##_writel(last_timing->emc_trimmers_##ch[EMC_##reg##_TRIM_INDEX] \
525 | EMC_##reg##_##bit##_ENABLE, EMC_##reg); \
530 DQS_SET(XM2DQSPADCTRL2, VREF);
535 static inline void overwrite_mrs_wait_cnt(
536 const struct tegra11_emc_table *next_timing,
542 /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
543 for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
544 expected operation length. Reduce the latter by the overlapping
545 zq-calibration, if any */
547 cnt -= dram_dev_num * 256;
549 reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
550 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
551 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
555 reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
556 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
557 reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
558 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
560 emc_writel(reg, EMC_MRS_WAIT_CNT);
563 static inline int get_dll_change(const struct tegra11_emc_table *next_timing,
564 const struct tegra11_emc_table *last_timing)
566 bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
567 bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
569 if (next_dll_enabled == last_dll_enabled)
570 return DLL_CHANGE_NONE;
571 else if (next_dll_enabled)
572 return DLL_CHANGE_ON;
574 return DLL_CHANGE_OFF;
577 static inline void set_dram_mode(const struct tegra11_emc_table *next_timing,
578 const struct tegra11_emc_table *last_timing,
581 if (dram_type == DRAM_TYPE_DDR3) {
582 /* first mode_1, then mode_2, then mode_reset*/
583 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
584 ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
585 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
586 ccfifo_writel(next_timing->emc_mode_2, EMC_EMRS2);
588 if ((next_timing->emc_mode_reset !=
589 last_timing->emc_mode_reset) ||
590 (dll_change == DLL_CHANGE_ON)) {
591 u32 reg = next_timing->emc_mode_reset &
592 (~EMC_MODE_SET_DLL_RESET);
593 if (dll_change == DLL_CHANGE_ON) {
594 reg |= EMC_MODE_SET_DLL_RESET;
595 reg |= EMC_MODE_SET_LONG_CNT;
597 ccfifo_writel(reg, EMC_MRS);
600 /* first mode_2, then mode_1; mode_reset is not applicable */
601 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
602 ccfifo_writel(next_timing->emc_mode_2, EMC_MRW2);
603 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
604 ccfifo_writel(next_timing->emc_mode_1, EMC_MRW);
605 if (next_timing->emc_mode_4 != last_timing->emc_mode_4)
606 ccfifo_writel(next_timing->emc_mode_4, EMC_MRW4);
610 static inline void do_clock_change(u32 clk_setting)
614 mc_readl(MC_EMEM_ADR_CFG); /* completes prev writes */
615 writel(clk_setting, clk_base + emc->reg);
616 readl(clk_base + emc->reg);/* completes prev write */
618 err = wait_for_update(EMC_INTSTATUS,
619 EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
621 pr_err("%s: clock change completion error: %d", __func__, err);
626 static noinline void emc_set_clock(const struct tegra11_emc_table *next_timing,
627 const struct tegra11_emc_table *last_timing,
630 #ifndef EMULATE_CLOCK_SWITCH
631 int i, dll_change, pre_wait;
632 bool dyn_sref_enabled, zcal_long;
634 u32 emc_cfg_reg = emc_readl(EMC_CFG);
636 dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
637 dll_change = get_dll_change(next_timing, last_timing);
638 zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
639 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
641 /* FIXME: remove steps enumeration below? */
643 /* 1. clear clkchange_complete interrupts */
644 emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
646 /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
647 possible self-refresh entry/exit and/or dqs vref settled - waiting
648 before the clock change decreases worst case change stall time */
650 if (dyn_sref_enabled) {
651 emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
652 emc_writel(emc_cfg_reg, EMC_CFG);
653 pre_wait = 5; /* 5us+ for self-refresh entry/exit */
656 /* 2.5 check dq/dqs vref delay */
657 if (dqs_preset(next_timing, last_timing)) {
659 pre_wait = 3; /* 3us+ for dqs vref settled */
666 /* 3. disable auto-cal if vref mode is switching - removed */
668 /* 4. program burst shadow registers */
669 for (i = 0; i < next_timing->burst_regs_num; i++) {
670 if (!burst_reg_addr[i])
672 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
674 for (i = 0; i < next_timing->emc_trimmers_num; i++) {
675 __raw_writel(next_timing->emc_trimmers_0[i],
676 emc0_base + emc_trimmer_offs[i]);
677 __raw_writel(next_timing->emc_trimmers_1[i],
678 emc1_base + emc_trimmer_offs[i]);
680 if ((dram_type == DRAM_TYPE_LPDDR2) &&
681 (dram_over_temp_state != DRAM_OVER_TEMP_NONE))
682 set_over_temp_timing(next_timing, dram_over_temp_state);
684 emc_cfg_reg &= ~EMC_CFG_UPDATE_MASK;
685 emc_cfg_reg |= next_timing->emc_cfg & EMC_CFG_UPDATE_MASK;
686 emc_writel(emc_cfg_reg, EMC_CFG);
690 /* 4.1 On ddr3 when DLL is re-started predict MRS long wait count and
691 overwrite DFS table setting */
692 if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
693 overwrite_mrs_wait_cnt(next_timing, zcal_long);
695 /* 5.2 disable auto-refresh to save time after clock change */
696 ccfifo_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
698 /* 6. turn Off dll and enter self-refresh on DDR3 */
699 if (dram_type == DRAM_TYPE_DDR3) {
700 if (dll_change == DLL_CHANGE_OFF)
701 ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
702 ccfifo_writel(DRAM_BROADCAST(dram_dev_num) |
703 EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
706 /* 7. flow control marker 2 */
707 ccfifo_writel(1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
709 /* 8. exit self-refresh on DDR3 */
710 if (dram_type == DRAM_TYPE_DDR3)
711 ccfifo_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
713 /* 8.1 re-enable auto-refresh */
714 ccfifo_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
716 /* 9. set dram mode registers */
717 set_dram_mode(next_timing, last_timing, dll_change);
719 /* 10. issue zcal command if turning zcal On */
721 ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
722 if (dram_dev_num > 1)
723 ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
726 /* 10.1 dummy write to RO register to remove stall after change */
727 ccfifo_writel(0, EMC_CCFIFO_STATUS);
729 /* 11.5 program burst_up_down registers if emc rate is going down */
730 if (next_timing->rate < last_timing->rate) {
731 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
732 __raw_writel(next_timing->burst_up_down_regs[i],
733 burst_up_down_reg_addr[i]);
737 /* 12-14. read any MC register to ensure the programming is done
738 change EMC clock source register wait for clk change completion */
739 do_clock_change(clk_setting);
741 /* 14.1 re-enable auto-refresh - moved to ccfifo in 8.1 */
743 /* 14.2 program burst_up_down registers if emc rate is going up */
744 if (next_timing->rate > last_timing->rate) {
745 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
746 __raw_writel(next_timing->burst_up_down_regs[i],
747 burst_up_down_reg_addr[i]);
751 /* 15. set auto-cal interval */
752 if (next_timing->rev >= 0x42)
753 emc_writel(next_timing->emc_acal_interval,
754 EMC_AUTO_CAL_INTERVAL);
756 /* 16. restore dynamic self-refresh */
757 if (next_timing->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
758 emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
759 emc_writel(emc_cfg_reg, EMC_CFG);
762 /* 17. set zcal wait count */
763 emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
765 /* 18. update restored timing */
769 /* FIXME: implement */
770 pr_info("tegra11_emc: Configuring EMC rate %lu (setting: 0x%x)\n",
771 next_timing->rate, clk_setting);
775 static inline void emc_get_timing(struct tegra11_emc_table *timing)
779 /* burst and trimmers updates depends on previous state; burst_up_down
781 for (i = 0; i < timing->burst_regs_num; i++) {
782 if (burst_reg_addr[i])
783 timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
785 timing->burst_regs[i] = 0;
787 for (i = 0; i < timing->emc_trimmers_num; i++) {
788 timing->emc_trimmers_0[i] =
789 __raw_readl(emc0_base + emc_trimmer_offs[i]);
790 timing->emc_trimmers_1[i] =
791 __raw_readl(emc1_base + emc_trimmer_offs[i]);
793 timing->emc_acal_interval = 0;
794 timing->emc_zcal_cnt_long = 0;
795 timing->emc_mode_reset = 0;
796 timing->emc_mode_1 = 0;
797 timing->emc_mode_2 = 0;
798 timing->emc_mode_4 = 0;
799 timing->emc_cfg = emc_readl(EMC_CFG);
800 timing->rate = clk_get_rate_locked(emc) / 1000;
803 /* The EMC registers have shadow registers. When the EMC clock is updated
804 * in the clock controller, the shadow registers are copied to the active
805 * registers, allowing glitchless memory bus frequency changes.
806 * This function updates the shadow registers for a new clock frequency,
807 * and relies on the clock lock on the emc clock to avoid races between
808 * multiple frequency changes. In addition access lock prevents concurrent
809 * access to EMC registers from reading MRR registers */
810 int tegra_emc_set_rate(unsigned long rate)
814 const struct tegra11_emc_table *last_timing;
816 s64 last_change_delay;
818 if (!tegra_emc_table)
821 /* Table entries specify rate in kHz */
824 i = get_start_idx(rate);
825 for (; i < tegra_emc_table_size; i++) {
826 if (tegra_emc_clk_sel[i].input == NULL)
827 continue; /* invalid entry */
829 if (tegra_emc_table[i].rate == rate)
833 if (i >= tegra_emc_table_size)
837 /* can not assume that boot timing matches dfs table even
838 if boot frequency matches one of the table nodes */
839 emc_get_timing(&start_timing);
840 last_timing = &start_timing;
843 last_timing = emc_timing;
845 clk_setting = tegra_emc_clk_sel[i].value;
847 last_change_delay = ktime_us_delta(ktime_get(), clkchange_time);
848 if ((last_change_delay >= 0) && (last_change_delay < clkchange_delay))
849 udelay(clkchange_delay - (int)last_change_delay);
851 spin_lock_irqsave(&emc_access_lock, flags);
852 emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
853 clkchange_time = ktime_get();
854 emc_timing = &tegra_emc_table[i];
855 spin_unlock_irqrestore(&emc_access_lock, flags);
857 emc_last_stats_update(i);
859 pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
864 long tegra_emc_round_rate_updown(unsigned long rate, bool up)
867 unsigned long table_rate;
869 if (!tegra_emc_table)
870 return clk_get_rate_locked(emc); /* no table - no rate change */
875 pr_debug("%s: %lu\n", __func__, rate);
877 /* Table entries specify rate in kHz */
880 i = get_start_idx(rate);
881 for (; i < tegra_emc_table_size; i++) {
882 if (tegra_emc_clk_sel[i].input == NULL)
883 continue; /* invalid entry */
885 table_rate = tegra_emc_table[i].rate;
886 if (table_rate >= rate) {
887 if (!up && i && (table_rate > rate)) {
889 table_rate = tegra_emc_table[i].rate;
891 pr_debug("%s: using %lu\n", __func__, table_rate);
893 return table_rate * 1000;
900 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
904 if (!tegra_emc_table) {
905 if (rate == clk_get_rate_locked(emc)) {
906 *div_value = emc->div - 2;
912 pr_debug("%s: %lu\n", __func__, rate);
914 /* Table entries specify rate in kHz */
917 i = get_start_idx(rate);
918 for (; i < tegra_emc_table_size; i++) {
919 if (tegra_emc_table[i].rate == rate) {
920 struct clk *p = tegra_emc_clk_sel[i].input;
922 if (p && (tegra_emc_clk_sel[i].input_rate ==
924 *div_value = (tegra_emc_clk_sel[i].value &
925 EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
933 bool tegra_emc_is_parent_ready(unsigned long rate, struct clk **parent,
934 unsigned long *parent_rate, unsigned long *backup_rate)
938 struct clk *p = NULL;
939 unsigned long p_rate = 0;
941 if (!tegra_emc_table)
944 pr_debug("%s: %lu\n", __func__, rate);
946 /* Table entries specify rate in kHz */
949 i = get_start_idx(rate);
950 for (; i < tegra_emc_table_size; i++) {
951 if (tegra_emc_table[i].rate == rate) {
952 p = tegra_emc_clk_sel[i].input;
954 continue; /* invalid entry */
956 p_rate = tegra_emc_clk_sel[i].input_rate;
957 if (p_rate == clk_get_rate(p))
963 /* Table match not found - "non existing parent" is ready */
967 #ifdef CONFIG_TEGRA_PLLM_SCALED
969 * Table match found, but parent is not ready - check if backup entry
970 * was found during initialization, and return the respective backup
973 if (emc->shared_bus_backup.input &&
974 (emc->shared_bus_backup.input != p)) {
976 *parent_rate = p_rate;
977 *backup_rate = emc->shared_bus_backup.bus_rate;
982 * Table match found, but parent is not ready - continue search
983 * for backup rate: min rate above requested that has different
984 * parent source (since only pll_c is scaled and may not be ready,
985 * any other parent can provide backup)
988 *parent_rate = p_rate;
990 for (i++; i < tegra_emc_table_size; i++) {
991 p = tegra_emc_clk_sel[i].input;
993 continue; /* invalid entry */
995 if (p != (*parent)) {
996 *backup_rate = tegra_emc_table[i].rate * 1000;
1001 /* Parent is not ready, and no backup found */
1002 *backup_rate = -EINVAL;
1006 static inline const struct clk_mux_sel *get_emc_input(u32 val)
1008 const struct clk_mux_sel *sel;
1010 for (sel = emc->inputs; sel->input != NULL; sel++) {
1011 if (sel->value == val)
1017 static int find_matching_input(const struct tegra11_emc_table *table,
1018 struct clk *pll_c, struct emc_sel *emc_clk_sel)
1020 u32 div_value = (table->src_sel_reg & EMC_CLK_DIV_MASK) >>
1022 u32 src_value = (table->src_sel_reg & EMC_CLK_SOURCE_MASK) >>
1023 EMC_CLK_SOURCE_SHIFT;
1024 unsigned long input_rate = 0;
1025 unsigned long table_rate = table->rate * 1000; /* table rate in kHz */
1026 const struct clk_mux_sel *sel = get_emc_input(src_value);
1028 #ifdef CONFIG_TEGRA_PLLM_SCALED
1029 struct clk *scalable_pll = emc->parent; /* pll_m is a boot parent */
1031 struct clk *scalable_pll = pll_c;
1033 pr_info_once("tegra: %s is selected as scalable EMC clock source\n",
1034 scalable_pll->name);
1036 if (div_value & 0x1) {
1037 pr_warn("tegra: invalid odd divider for EMC rate %lu\n",
1042 pr_warn("tegra: no matching input found for EMC rate %lu\n",
1046 if (div_value && (table->src_sel_reg & EMC_CLK_LOW_JITTER_ENABLE)) {
1047 pr_warn("tegra: invalid LJ path for EMC rate %lu\n",
1051 if (!(table->src_sel_reg & EMC_CLK_MC_SAME_FREQ) !=
1052 !(MC_EMEM_ARB_MISC0_EMC_SAME_FREQ &
1053 table->burst_regs[MC_EMEM_ARB_MISC0_INDEX])) {
1054 pr_warn("tegra: ambiguous EMC to MC ratio for EMC rate %lu\n",
1059 #ifndef CONFIG_TEGRA_DUAL_CBUS
1060 if (sel->input == pll_c) {
1061 pr_warn("tegra: %s is cbus source: no EMC rate %lu support\n",
1062 sel->input->name, table_rate);
1067 if (sel->input == scalable_pll) {
1068 input_rate = table_rate * (1 + div_value / 2);
1070 /* all other sources are fixed, must exactly match the rate */
1071 input_rate = clk_get_rate(sel->input);
1072 if (input_rate != (table_rate * (1 + div_value / 2))) {
1073 pr_warn("tegra: EMC rate %lu does not match %s rate %lu\n",
1074 table_rate, sel->input->name, input_rate);
1079 #ifdef CONFIG_TEGRA_PLLM_SCALED
1080 if (sel->input == pll_c) {
1081 /* maybe overwritten in a loop - end up at max rate
1083 emc->shared_bus_backup.input = pll_c;
1084 emc->shared_bus_backup.bus_rate = table_rate;
1087 /* Get ready emc clock selection settings for this table rate */
1088 emc_clk_sel->input = sel->input;
1089 emc_clk_sel->input_rate = input_rate;
1090 emc_clk_sel->value = table->src_sel_reg;
1095 static void adjust_emc_dvfs_table(const struct tegra11_emc_table *table,
1101 for (i = 0; i < MAX_DVFS_FREQS; i++) {
1102 int mv = emc->dvfs->millivolts[i];
1106 /* For each dvfs voltage find maximum supported rate;
1107 use 1MHz placeholder if not found */
1108 for (rate = 1000, j = 0; j < table_size; j++) {
1109 if (tegra_emc_clk_sel[j].input == NULL)
1110 continue; /* invalid entry */
1112 if ((mv >= table[j].emc_min_mv) &&
1113 (rate < table[j].rate))
1114 rate = table[j].rate;
1116 /* Table entries specify rate in kHz */
1117 emc->dvfs->freqs[i] = rate * 1000;
1121 #ifdef CONFIG_TEGRA_PLLM_SCALED
1122 /* When pll_m is scaled, pll_c must provide backup rate;
1123 if not - remove rates that require pll_m scaling */
1124 static int purge_emc_table(unsigned long max_rate)
1129 if (emc->shared_bus_backup.input)
1132 pr_warn("tegra: selected pll_m scaling option but no backup source:\n");
1133 pr_warn(" removed not supported entries from the table:\n");
1135 /* made all entries with non matching rate invalid */
1136 for (i = 0; i < tegra_emc_table_size; i++) {
1137 struct emc_sel *sel = &tegra_emc_clk_sel[i];
1139 if (clk_get_rate(sel->input) != sel->input_rate) {
1140 pr_warn(" EMC rate %lu\n",
1141 tegra_emc_table[i].rate * 1000);
1143 sel->input_rate = 0;
1145 if (max_rate == tegra_emc_table[i].rate)
1153 /* When pll_m is fixed @ max EMC rate, it always provides backup for pll_c */
1154 #define purge_emc_table(max_rate) (0)
1157 static int init_emc_table(const struct tegra11_emc_table *table, int table_size)
1161 bool max_entry = false;
1162 bool emc_max_dvfs_sel = get_emc_max_dvfs();
1163 unsigned long boot_rate, max_rate;
1164 struct clk *pll_c = tegra_get_clock_by_name("pll_c");
1166 emc_stats.clkchange_count = 0;
1167 spin_lock_init(&emc_stats.spinlock);
1168 emc_stats.last_update = get_jiffies_64();
1169 emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
1171 if ((dram_type != DRAM_TYPE_DDR3) && (dram_type != DRAM_TYPE_LPDDR2)) {
1172 pr_err("tegra: not supported DRAM type %u\n", dram_type);
1176 if (emc->parent != tegra_get_clock_by_name("pll_m")) {
1177 pr_err("tegra: boot parent %s is not supported by EMC DFS\n",
1182 if (!table || !table_size) {
1183 pr_err("tegra: EMC DFS table is empty\n");
1187 boot_rate = clk_get_rate(emc) / 1000;
1188 max_rate = clk_get_rate(emc->parent) / 1000;
1190 tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
1191 switch (table[0].rev) {
1195 start_timing.burst_regs_num = table[0].burst_regs_num;
1196 start_timing.emc_trimmers_num = table[0].emc_trimmers_num;
1199 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
1204 /* Match EMC source/divider settings with table entries */
1205 for (i = 0; i < tegra_emc_table_size; i++) {
1206 unsigned long table_rate = table[i].rate;
1208 /* Skip "no-rate" entry, or entry violating ascending order */
1210 (i && (table_rate <= table[i-1].rate)))
1213 BUG_ON(table[i].rev != table[0].rev);
1215 if (find_matching_input(&table[i], pll_c,
1216 &tegra_emc_clk_sel[i]))
1219 if (table_rate == boot_rate)
1220 emc_stats.last_sel = i;
1222 if (emc_max_dvfs_sel) {
1223 /* EMC max rate = max table entry above boot pll_m */
1224 if (table_rate >= max_rate) {
1225 max_rate = table_rate;
1228 } else if (table_rate == max_rate) {
1229 /* EMC max rate = boot pll_m rate */
1235 /* Validate EMC rate and voltage limits */
1237 pr_err("tegra: invalid EMC DFS table: entry for max rate"
1238 " %lu kHz is not found\n", max_rate);
1242 tegra_emc_table = table;
1245 * Purge rates that cannot be reached because table does not specify
1246 * proper backup source. If maximum rate was purged, fall back on boot
1247 * pll_m rate as maximum limit. In any case propagate new maximum limit
1248 * down stream to shared users, and check it against nominal voltage.
1250 if (purge_emc_table(max_rate))
1251 max_rate = clk_get_rate(emc->parent) / 1000;
1252 tegra_init_max_rate(emc, max_rate * 1000);
1255 adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
1256 mv = tegra_dvfs_predict_peak_millivolts(emc, max_rate * 1000);
1257 if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
1258 tegra_emc_table = NULL;
1259 pr_err("tegra: invalid EMC DFS table: maximum rate %lu"
1260 " kHz does not match nominal voltage %d\n",
1261 max_rate, emc->dvfs->max_millivolts);
1266 pr_info("tegra: validated EMC DFS table\n");
1268 /* Configure clock change mode according to dram type */
1269 reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
1270 reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
1271 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
1272 emc_writel(reg, EMC_CFG_2);
1277 /* Check if the attached memory device uses LPDDR3 protocol.
1278 * Bit 8 (enable LPDDR3 write preamble toggle) of EMC_FBIO_SPARE is enabled
1281 static bool tegra11_is_lpddr3(void)
1283 return emc_readl(EMC_FBIO_SPARE) & BIT(8);
1286 static void tegra11_pasr_apply_mask(u16 *mem_reg, void *cookie)
1289 int device = (int)cookie;
1291 val = TEGRA_EMC_MODE_REG_17 | *mem_reg;
1292 val |= device << TEGRA_EMC_MRW_DEV_SHIFT;
1294 emc0_writel(val, EMC_MRW);
1295 emc1_writel(val, EMC_MRW);
1297 pr_debug("%s: cookie = %d mem_reg = 0x%04x val = 0x%08x\n", __func__,
1298 (int)cookie, *mem_reg, val);
1301 static int tegra11_pasr_enable(const char *arg, const struct kernel_param *kp)
1303 unsigned int old_pasr_enable;
1307 if (!tegra11_is_lpddr3())
1310 old_pasr_enable = pasr_enable;
1311 param_set_int(arg, kp);
1313 if (old_pasr_enable == pasr_enable)
1316 /* Cookie represents the device number to write to MRW register.
1317 * 0x2 to for only dev0, 0x1 for dev1.
1319 if (pasr_enable == 0) {
1322 cookie = (void *)(int)TEGRA_EMC_MRW_DEV1;
1323 if (!pasr_register_mask_function(TEGRA_DRAM_BASE,
1325 tegra11_pasr_apply_mask(&mem_reg, cookie);
1327 cookie = (void *)(int)TEGRA_EMC_MRW_DEV2;
1328 if (!pasr_register_mask_function(TEGRA_DRAM_BASE + SZ_1G,
1330 tegra11_pasr_apply_mask(&mem_reg, cookie);
1332 cookie = (void *)(int)2;
1333 pasr_register_mask_function(0x80000000,
1334 &tegra11_pasr_apply_mask, cookie);
1336 cookie = (void *)(int)1;
1337 pasr_register_mask_function(0xC0000000,
1338 &tegra11_pasr_apply_mask, cookie);
1344 static struct kernel_param_ops tegra11_pasr_enable_ops = {
1345 .set = tegra11_pasr_enable,
1346 .get = param_get_int,
1348 module_param_cb(pasr_enable, &tegra11_pasr_enable_ops, &pasr_enable, 0644);
1351 static int tegra11_emc_probe(struct platform_device *pdev)
1353 struct tegra11_emc_pdata *pdata;
1354 struct resource *res;
1358 if (tegra_emc_table)
1361 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1363 dev_err(&pdev->dev, "missing register base\n");
1367 pdata = pdev->dev.platform_data;
1370 pdata = tegra_emc_dt_parse_pdata(pdev);
1373 dev_err(&pdev->dev, "missing platform data\n");
1377 return init_emc_table(pdata->tables, pdata->num_tables);
1380 static struct of_device_id tegra11_emc_of_match[] = {
1381 { .compatible = "nvidia,tegra11-emc", },
1385 static struct platform_driver tegra11_emc_driver = {
1387 .name = "tegra-emc",
1388 .owner = THIS_MODULE,
1389 .of_match_table = tegra11_emc_of_match,
1391 .probe = tegra11_emc_probe,
1394 int __init tegra11_emc_init(void)
1396 int ret = platform_driver_register(&tegra11_emc_driver);
1398 if (dram_type == DRAM_TYPE_LPDDR2)
1399 tegra_emc_iso_usage_table_init(
1400 tegra11_lpddr3_emc_iso_usage,
1401 ARRAY_SIZE(tegra11_lpddr3_emc_iso_usage));
1402 else if (dram_type == DRAM_TYPE_DDR3)
1403 tegra_emc_iso_usage_table_init(
1404 tegra11_ddr3_emc_iso_usage,
1405 ARRAY_SIZE(tegra11_ddr3_emc_iso_usage));
1407 unsigned long rate = tegra_emc_round_rate_updown(
1408 emc->boot_rate, false);
1409 if (!IS_ERR_VALUE(rate))
1410 tegra_clk_preset_emc_monitor(rate);
1416 void tegra_emc_timing_invalidate(void)
1421 void tegra_emc_dram_type_init(struct clk *c)
1425 dram_type = (emc_readl(EMC_FBIO_CFG5) &
1426 EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
1428 dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
1431 int tegra_emc_get_dram_type(void)
1436 static int emc_read_mrr(int dev, int addr)
1441 if (dram_type != DRAM_TYPE_LPDDR2)
1444 ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, false);
1448 val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
1449 val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
1450 emc_writel(val, EMC_MRR);
1452 ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, true);
1456 val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
1460 int tegra_emc_get_dram_temperature(void)
1463 unsigned long flags;
1465 spin_lock_irqsave(&emc_access_lock, flags);
1467 mr4 = emc_read_mrr(0, 4);
1468 if (IS_ERR_VALUE(mr4)) {
1469 spin_unlock_irqrestore(&emc_access_lock, flags);
1472 spin_unlock_irqrestore(&emc_access_lock, flags);
1474 mr4 = (mr4 & LPDDR2_MR4_TEMP_MASK) >> LPDDR2_MR4_TEMP_SHIFT;
1478 int tegra_emc_set_over_temp_state(unsigned long state)
1480 unsigned long flags;
1482 if (dram_type != DRAM_TYPE_LPDDR2)
1485 if (state > DRAM_OVER_TEMP_THROTTLE)
1488 spin_lock_irqsave(&emc_access_lock, flags);
1490 /* Update refresh timing if state changed */
1491 if (emc_timing && (dram_over_temp_state != state)) {
1492 set_over_temp_timing(emc_timing, state);
1493 emc_timing_update();
1494 if (state != DRAM_OVER_TEMP_NONE)
1495 emc_writel(EMC_REF_FORCE_CMD, EMC_REF);
1496 dram_over_temp_state = state;
1498 spin_unlock_irqrestore(&emc_access_lock, flags);
1502 static inline int bw_calc_get_freq_idx(unsigned long bw)
1506 if (bw > bw_calc_freqs[TEGRA_EMC_ISO_USE_FREQ_MAX_NUM-1] * MHZ)
1507 idx = TEGRA_EMC_ISO_USE_FREQ_MAX_NUM;
1509 for (; idx < TEGRA_EMC_ISO_USE_FREQ_MAX_NUM; idx++) {
1510 u32 freq = bw_calc_freqs[idx] * MHZ;
1515 } else if (bw == freq)
1522 static u8 iso_share_calc_t114_lpddr3_default(unsigned long iso_bw)
1524 int freq_idx = bw_calc_get_freq_idx(iso_bw);
1525 return tegra11_lpddr3_emc_usage_share_default[freq_idx];
1528 static u8 iso_share_calc_t114_lpddr3_dc(unsigned long iso_bw)
1530 int freq_idx = bw_calc_get_freq_idx(iso_bw);
1531 return tegra11_lpddr3_emc_usage_share_dc[freq_idx];
1534 #ifdef CONFIG_DEBUG_FS
1536 static struct dentry *emc_debugfs_root;
1538 #define INFO_CALC_REV_OFFSET 1
1539 #define INFO_SCRIPT_REV_OFFSET 2
1540 #define INFO_FREQ_OFFSET 3
1542 static int emc_table_info_show(struct seq_file *s, void *data)
1546 u32 freq, calc_rev, script_rev;
1547 const struct tegra11_emc_table *entry;
1550 if (!tegra_emc_table) {
1551 seq_printf(s, "EMC DFS table is not installed\n");
1555 for (i = 0; i < tegra_emc_table_size; i++) {
1556 entry = &tegra_emc_table[i];
1558 &entry->burst_up_down_regs[entry->burst_up_down_regs_num];
1560 seq_printf(s, "%s: ", tegra_emc_clk_sel[i].input != NULL ?
1561 "accepted" : "rejected");
1563 /* system validation tag for metadata */
1564 if (*info != 0x4E564441) {
1565 seq_printf(s, "emc dvfs frequency %6lu\n", entry->rate);
1571 calc_rev = *(info + INFO_CALC_REV_OFFSET);
1572 script_rev = *(info + INFO_SCRIPT_REV_OFFSET);
1573 freq = *(info + INFO_FREQ_OFFSET);
1575 seq_printf(s, "emc dvfs frequency %6u: ", freq);
1576 seq_printf(s, "calc_rev: %02u.%02u.%02u.%02u ",
1577 (calc_rev >> 24) & 0xff,
1578 (calc_rev >> 16) & 0xff,
1579 (calc_rev >> 8) & 0xff,
1580 (calc_rev >> 0) & 0xff);
1581 seq_printf(s, "script_rev: %02u.%02u.%02u.%02u\n",
1582 (script_rev >> 24) & 0xff,
1583 (script_rev >> 16) & 0xff,
1584 (script_rev >> 8) & 0xff,
1585 (script_rev >> 0) & 0xff);
1589 seq_printf(s, "no metdata in EMC DFS table\n");
1594 static int emc_table_info_open(struct inode *inode, struct file *file)
1596 return single_open(file, emc_table_info_show, inode->i_private);
1599 static const struct file_operations emc_table_info_fops = {
1600 .open = emc_table_info_open,
1602 .llseek = seq_lseek,
1603 .release = single_release,
1606 static int emc_stats_show(struct seq_file *s, void *data)
1610 emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
1612 seq_printf(s, "%-10s %-10s \n", "rate kHz", "time");
1613 for (i = 0; i < tegra_emc_table_size; i++) {
1614 if (tegra_emc_clk_sel[i].input == NULL)
1615 continue; /* invalid entry */
1617 seq_printf(s, "%-10lu %-10llu \n", tegra_emc_table[i].rate,
1618 cputime64_to_clock_t(emc_stats.time_at_clock[i]));
1620 seq_printf(s, "%-15s %llu\n", "transitions:",
1621 emc_stats.clkchange_count);
1622 seq_printf(s, "%-15s %llu\n", "time-stamp:",
1623 cputime64_to_clock_t(emc_stats.last_update));
1628 static int emc_stats_open(struct inode *inode, struct file *file)
1630 return single_open(file, emc_stats_show, inode->i_private);
1633 static const struct file_operations emc_stats_fops = {
1634 .open = emc_stats_open,
1636 .llseek = seq_lseek,
1637 .release = single_release,
1640 static int dram_temperature_get(void *data, u64 *val)
1642 *val = tegra_emc_get_dram_temperature();
1645 DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
1648 static int over_temp_state_get(void *data, u64 *val)
1650 *val = dram_over_temp_state;
1653 static int over_temp_state_set(void *data, u64 val)
1655 tegra_emc_set_over_temp_state(val);
1658 DEFINE_SIMPLE_ATTRIBUTE(over_temp_state_fops, over_temp_state_get,
1659 over_temp_state_set, "%llu\n");
1661 static int efficiency_get(void *data, u64 *val)
1663 *val = tegra_emc_bw_efficiency;
1666 static int efficiency_set(void *data, u64 val)
1668 tegra_emc_bw_efficiency = (val > 100) ? 100 : val;
1670 tegra_clk_shared_bus_update(emc);
1674 DEFINE_SIMPLE_ATTRIBUTE(efficiency_fops, efficiency_get,
1675 efficiency_set, "%llu\n");
1677 static int __init tegra_emc_debug_init(void)
1679 emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
1680 if (!emc_debugfs_root)
1683 if (!debugfs_create_file(
1684 "table_info", S_IRUGO, emc_debugfs_root, NULL,
1685 &emc_table_info_fops))
1688 if (!tegra_emc_table)
1691 if (!debugfs_create_file(
1692 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
1695 if (!debugfs_create_u32("clkchange_delay", S_IRUGO | S_IWUSR,
1696 emc_debugfs_root, (u32 *)&clkchange_delay))
1699 if (!debugfs_create_file("dram_temperature", S_IRUGO, emc_debugfs_root,
1700 NULL, &dram_temperature_fops))
1703 if (!debugfs_create_file("over_temp_state", S_IRUGO | S_IWUSR,
1704 emc_debugfs_root, NULL, &over_temp_state_fops))
1707 if (!debugfs_create_file("efficiency", S_IRUGO | S_IWUSR,
1708 emc_debugfs_root, NULL, &efficiency_fops))
1711 if (tegra_emc_iso_usage_debugfs_init(emc_debugfs_root))
1717 debugfs_remove_recursive(emc_debugfs_root);
1721 late_initcall(tegra_emc_debug_init);