arm64: Tegra210: Cleanup tegra21_emc_init()
[linux-3.10.git] / drivers / platform / tegra / mc / tegra21_emc.c
1 /*
2  * drivers/platform/tegra/tegra21_emc.c
3  *
4  * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/clk.h>
18 #include <linux/err.h>
19 #include <linux/io.h>
20 #include <linux/of.h>
21 #include <linux/module.h>
22 #include <linux/delay.h>
23 #include <linux/debugfs.h>
24 #include <linux/seq_file.h>
25 #include <linux/hrtimer.h>
26 #include <linux/pasr.h>
27 #include <linux/slab.h>
28 #include <linux/platform_device.h>
29 #include <linux/tegra-soc.h>
30 #include <linux/platform_data/tegra_emc_pdata.h>
31
32 #include <asm/cputime.h>
33
34 #include <tegra/tegra21_emc.h>
35 #include <tegra/mc-regs-t21x.h>
36
37 #include <mach/nct.h>
38
39 #include <linux/platform/tegra/clock.h>
40 #include "board.h"
41 #include <linux/platform/tegra/dvfs.h>
42 #include "iomap.h"
43 #include "tegra_emc_dt_parse.h"
44 #include "devices.h"
45 #include <linux/platform/tegra/common.h>
46 #include "../nvdumper/nvdumper-footprint.h"
47
48 #define DVFS_CLOCK_CHANGE_VERSION       2108
49 #define EMC_PRELOCK_VERSION             2101
50
51 /*
52  * Enable flags for more verbosity.
53  */
54 #define INFO            (1 << 0)
55 #define STEPS           (1 << 1)
56 #define SUB_STEPS       (1 << 2)
57 #define PRELOCK         (1 << 3)
58 #define PRELOCK_STEPS   (1 << 4)
59 #define ACTIVE_EN       (1 << 5)
60 #define PRAMP_UP        (1 << 6)
61 #define PRAMP_DN        (1 << 7)
62 #define EMC_REGISTERS   (1 << 28)
63 #define CCFIFO          (1 << 29)
64 #define REGS            (1 << 30)
65 #define REG_LISTS       (1 << 31)
66
67 #define emc_cc_dbg(mask, fmt, ...)                                      \
68         do {                                                            \
69                 if (mask & emc_dbg_mask)                                \
70                         pr_info("%s: " fmt, __func__, ##__VA_ARGS__);   \
71         } while (0)
72
73 #if 0
74 static unsigned int emc_dbg_mask = INFO | STEPS | SUB_STEPS | PRELOCK |
75         PRELOCK_STEPS | ACTIVE_EN | PRAMP_UP | PRAMP_DN | EMC_REGISTERS |
76         CCFIFO | REGS;
77 #else
78 static unsigned int emc_dbg_mask;
79 #endif
80
81 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
82 static bool emc_enable = true;
83 #else
84 static bool emc_enable;
85 #endif
86 module_param(emc_enable, bool, 0644);
87
88 /* TODO: cleanup to not use iomap.h */
89 static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
90 static void __iomem *emc1_base = IO_ADDRESS(TEGRA_EMC1_BASE); /* Second chan. */
91 static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
92 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
93
94 #ifdef CONFIG_PASR
95 static int pasr_enable;
96 #endif
97
98 u8 tegra_emc_bw_efficiency = 80;
99
100 static u32 iso_bw_table[] = {
101         5, 10, 20, 30, 40, 60, 80, 100, 120, 140, 160, 180,
102         200, 250, 300, 350, 400, 450, 500, 550, 600, 650, 700
103 };
104
105 /*
106  * These tables list the ISO efficiency (in percent) at the corresponding entry
107  * in the iso_bw_table. iso_bw_table is in MHz.
108  */
109 static u32 tegra21_lpddr3_iso_efficiency_os_idle[] = {
110         64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
111         64, 63, 60, 54, 45, 45, 45, 45, 45, 45, 45
112 };
113 static u32 tegra21_lpddr3_iso_efficiency_general[] = {
114         60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
115         60, 59, 59, 58, 57, 56, 55, 54, 54, 54, 54
116 };
117
118 static u32 tegra21_lpddr4_iso_efficiency_os_idle[] = {
119         56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56,
120         56, 56, 56, 56, 56, 56, 56, 56, 56, 49, 45
121 };
122 static u32 tegra21_lpddr4_iso_efficiency_general[] = {
123         56, 55, 55, 54, 54, 53, 51, 50, 49, 48, 47, 46,
124         45, 45, 45, 45, 45, 45, 45, 45, 45, 45, 45
125 };
126
127 static u32 tegra21_ddr3_iso_efficiency_os_idle[] = {
128         65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65,
129         65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65
130 };
131 static u32 tegra21_ddr3_iso_efficiency_general[] = {
132         60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60,
133         60, 59, 59, 58, 57, 56, 55, 54, 54, 54, 54
134 };
135
136 static u8 get_iso_bw_os_idle(unsigned long iso_bw);
137 static u8 get_iso_bw_general(unsigned long iso_bw);
138
139 static struct emc_iso_usage tegra21_emc_iso_usage[] = {
140         {
141                 BIT(EMC_USER_DC1),
142                 80, get_iso_bw_os_idle
143         },
144         {
145                 BIT(EMC_USER_DC1) | BIT(EMC_USER_DC2),
146                 50, get_iso_bw_general
147         },
148         {
149                 BIT(EMC_USER_DC1) | BIT(EMC_USER_VI),
150                 50, get_iso_bw_general
151         },
152         {
153                 BIT(EMC_USER_DC1) | BIT(EMC_USER_DC2) | BIT(EMC_USER_VI),
154                 50, get_iso_bw_general
155         },
156 };
157
158 #define MHZ 1000000
159 #define TEGRA_EMC_ISO_USE_FREQ_MAX_NUM  12
160 #define PLL_C_DIRECT_FLOOR              333500000
161 #define EMC_STATUS_UPDATE_TIMEOUT       1000
162 #define TEGRA_EMC_TABLE_MAX_SIZE        16
163
164 #define TEGRA_EMC_MODE_REG_17   0x00110000
165 #define TEGRA_EMC_MRW_DEV_SHIFT 30
166 #define TEGRA_EMC_MRW_DEV1      2
167 #define TEGRA_EMC_MRW_DEV2      1
168
169 #define MC_EMEM_DEV_SIZE_MASK   0xF
170 #define MC_EMEM_DEV_SIZE_SHIFT  16
171
172 #define CLK_RST_CONTROLLER_CLK_SOURCE_EMC       0x19c
173 #define EMC_CLK_EMC_2X_CLK_SRC_SHIFT            29
174 #define EMC_CLK_EMC_2X_CLK_SRC_MASK             \
175         (0x7 << EMC_CLK_EMC_2X_CLK_SRC_SHIFT)
176 #define EMC_CLK_SOURCE_PLLM                     0x0
177 #define EMC_CLK_SOURCE_PLLM_LJ                  0x4
178 #define EMC_CLK_SOURCE_PLLMB                    0x6
179 #define EMC_CLK_SOURCE_PLLMB_LJ                 0x5
180 #define EMC_CLK_FORCE_CC_TRIGGER                (0x1 << 27)
181 #define EMC_CLK_MC_EMC_SAME_FREQ                (0x1 << 16)
182 #define EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT        0
183 #define EMC_CLK_EMC_2X_CLK_DIVISOR_MASK         \
184         (0xff << EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT)
185
186 #define CLK_RST_CONTROLLER_CLK_OUT_ENB_X_SET    0x284
187 #define CLK_RST_CONTROLLER_CLK_OUT_ENB_X_CLR    0x288
188
189 #define CLK_OUT_ENB_X_CLK_ENB_EMC_DLL           (1 << 14)
190
191 #define CLK_RST_CONTROLLER_CLK_SOURCE_EMC_DLL   0x664
192 #define DLL_CLK_EMC_DLL_CLK_SRC_SHIFT           29
193 #define DLL_CLK_EMC_DLL_CLK_SRC_MASK            \
194         (0x7 << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT)
195 #define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT      10
196 #define DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK       \
197         (0x3 << DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT)
198 #define PLLM_VCOA                               0
199 #define PLLM_VCOB                               1
200 #define EMC_DLL_SWITCH_OUT                      2
201 #define DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT       0
202 #define DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK        \
203         (0xff << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT)
204
205 #define BURST_PERCH_LIST                        \
206         DEFINE_REG(TEGRA_EMC0_BASE, EMC_MRW10), \
207         DEFINE_REG(TEGRA_EMC1_BASE, EMC_MRW10), \
208         DEFINE_REG(TEGRA_EMC0_BASE, EMC_MRW11), \
209         DEFINE_REG(TEGRA_EMC1_BASE, EMC_MRW11), \
210         DEFINE_REG(TEGRA_EMC0_BASE, EMC_MRW12), \
211         DEFINE_REG(TEGRA_EMC1_BASE, EMC_MRW12), \
212         DEFINE_REG(TEGRA_EMC0_BASE, EMC_MRW13), \
213         DEFINE_REG(TEGRA_EMC1_BASE, EMC_MRW13), \
214
215
216 #define BURST_REG_LIST                                                  \
217         DEFINE_REG(TEGRA_EMC_BASE, EMC_RC),                             \
218         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC),                            \
219         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFCPB),                          \
220         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFCTRL2),                       \
221         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC_SLR),                        \
222         DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS),                            \
223         DEFINE_REG(TEGRA_EMC_BASE, EMC_RP),                             \
224         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W),                            \
225         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R),                            \
226         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P),                            \
227         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P),                            \
228         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2R),                            \
229         DEFINE_REG(TEGRA_EMC_BASE, EMC_TPPD),                           \
230         DEFINE_REG(TEGRA_EMC_BASE, EMC_CCDMW),                          \
231         DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD),                         \
232         DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD),                         \
233         DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD),                            \
234         DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT),                           \
235         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT),                           \
236         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV_CHK),                        \
237         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV),                            \
238         DEFINE_REG(TEGRA_EMC_BASE, EMC_WSV),                            \
239         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEV),                            \
240         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV_MASK),                       \
241         DEFINE_REG(TEGRA_EMC_BASE, EMC_WS_DURATION),                    \
242         DEFINE_REG(TEGRA_EMC_BASE, EMC_WE_DURATION),                    \
243         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE),                           \
244         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_WIDTH),                     \
245         DEFINE_REG(TEGRA_EMC_BASE, EMC_IBDLY),                          \
246         DEFINE_REG(TEGRA_EMC_BASE, EMC_OBDLY),                          \
247         DEFINE_REG(TEGRA_EMC_BASE, EMC_EINPUT),                         \
248         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRW6),                           \
249         DEFINE_REG(TEGRA_EMC_BASE, EMC_EINPUT_DURATION),                \
250         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_EXTRA),                   \
251         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_WIDTH),                   \
252         DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST),                           \
253         DEFINE_REG(TEGRA_EMC_BASE, EMC_QSAFE),                          \
254         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV),                            \
255         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV_MASK),                       \
256         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV_EARLY),                      \
257         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV_EARLY_MASK),                 \
258         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH),                        \
259         DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM),              \
260         DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT),            \
261         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR),                        \
262         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD),                        \
263         DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN),                      \
264         DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN),                       \
265         DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN),                        \
266         DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN),                        \
267         DEFINE_REG(TEGRA_EMC_BASE, EMC_CKE2PDEN),                       \
268         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2CKE),                       \
269         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2MRR),                       \
270         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR),                           \
271         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL),                        \
272         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE),                           \
273         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKESR),                         \
274         DEFINE_REG(TEGRA_EMC_BASE, EMC_TPD),                            \
275         DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW),                           \
276         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB),                          \
277         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE),                     \
278         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP),                       \
279         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRW7),                           \
280         DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW),                         \
281         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE),                      \
282         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5),                      \
283         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG7),                      \
284         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL),                    \
285         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD),             \
286         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_RXRT),                 \
287         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_PIPE_1),                     \
288         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_PIPE_2),                     \
289         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK0_4),       \
290         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK0_5),       \
291         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK1_4),       \
292         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK1_5),       \
293         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRW8),                           \
294         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_4), \
295         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_5), \
296         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_0), \
297         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_1), \
298         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_2), \
299         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_3), \
300         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_4), \
301         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK0_5), \
302         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_0), \
303         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_1), \
304         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_2), \
305         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_3), \
306         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_4), \
307         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQS_RANK1_5), \
308         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_LONG_CMD_0),         \
309         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_LONG_CMD_1),         \
310         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_LONG_CMD_2),         \
311         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_LONG_CMD_3),         \
312         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_LONG_CMD_4),         \
313         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_SHORT_CMD_0),        \
314         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_SHORT_CMD_1),        \
315         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_SHORT_CMD_2),        \
316         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_3), \
317         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_3), \
318         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_3), \
319         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_3), \
320         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_3), \
321         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_3), \
322         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_3), \
323         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_3), \
324         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_3), \
325         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_3), \
326         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_3), \
327         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_3), \
328         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_3), \
329         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_3), \
330         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_3), \
331         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_3), \
332         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_3), \
333         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_3), \
334         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_3), \
335         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_3), \
336         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_0), \
337         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_1), \
338         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_2), \
339         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD0_3), \
340         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_0), \
341         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_1), \
342         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_2), \
343         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD1_3), \
344         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_0), \
345         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_1), \
346         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_2), \
347         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD2_3), \
348         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_0), \
349         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_1), \
350         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_2), \
351         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_CMD3_3), \
352         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXDSRVTTGEN),                    \
353         DEFINE_REG(TEGRA_EMC_BASE, EMC_FDPD_CTRL_DQ),                   \
354         DEFINE_REG(TEGRA_EMC_BASE, EMC_FDPD_CTRL_CMD),                  \
355         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE),                     \
356         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL),                  \
357         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT),                  \
358         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT),                   \
359         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT2),                  \
360         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CHANNEL),               \
361         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_CFG_0),                      \
362         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_CFG_1),                      \
363         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_AUTOCAL_CFG_COMMON),      \
364         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_ZCTRL),                   \
365         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG),                            \
366         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_PIPE),                       \
367         DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL),           \
368         DEFINE_REG(TEGRA_EMC_BASE, EMC_QPOP),                           \
369         DEFINE_REG(TEGRA_EMC_BASE, EMC_DQS_BRLSHFT_0),                  \
370         DEFINE_REG(TEGRA_EMC_BASE, EMC_DQS_BRLSHFT_1),                  \
371         DEFINE_REG(TEGRA_EMC_BASE, EMC_CMD_BRLSHFT_2),                  \
372         DEFINE_REG(TEGRA_EMC_BASE, EMC_CMD_BRLSHFT_3),                  \
373         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_PAD_CFG_CTRL),            \
374         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DATA_PAD_RX_CTRL),        \
375         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_CMD_PAD_RX_CTRL),         \
376         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DATA_RX_TERM_MODE),       \
377         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_CMD_RX_TERM_MODE),        \
378         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_CMD_PAD_TX_CTRL),         \
379         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DATA_PAD_TX_CTRL),        \
380         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_COMMON_PAD_TX_CTRL),      \
381         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_VTTGEN_CTRL_0),           \
382         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_VTTGEN_CTRL_1),           \
383         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_VTTGEN_CTRL_2),           \
384         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_BRICK_CTRL_RFU1),         \
385         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_CMD_BRICK_CTRL_FDPD),     \
386         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_BRICK_CTRL_RFU2),         \
387         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DATA_BRICK_CTRL_FDPD),    \
388         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_BG_BIAS_CTRL_0),          \
389         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_3),                          \
390         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_PWRD_0),               \
391         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_PWRD_1),               \
392         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_PWRD_2),               \
393         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_PWRD_3),               \
394         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_PWRD_4),               \
395         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_PWRD_5),               \
396         DEFINE_REG(TEGRA_EMC_BASE, EMC_CONFIG_SAMPLE_DELAY),            \
397         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_SEL_CLK_SRC_0),        \
398         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_SEL_CLK_SRC_1),        \
399         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_SEL_CLK_SRC_2),        \
400         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_SEL_CLK_SRC_3),        \
401         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_SEL_CLK_SRC_4),        \
402         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_TX_SEL_CLK_SRC_5),        \
403         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_BYPASS),             \
404         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_PWRD_0),             \
405         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_PWRD_1),             \
406         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_DDLL_PWRD_2),             \
407         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_CMD_CTRL_0),              \
408         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_CMD_CTRL_1),              \
409         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_CMD_CTRL_2),              \
410         DEFINE_REG(TEGRA_EMC_BASE, EMC_TR_TIMING_0),                    \
411         DEFINE_REG(TEGRA_EMC_BASE, EMC_TR_DVFS),                        \
412         DEFINE_REG(TEGRA_EMC_BASE, EMC_TR_CTRL_1),                      \
413         DEFINE_REG(TEGRA_EMC_BASE, EMC_TR_RDV),                         \
414         DEFINE_REG(TEGRA_EMC_BASE, EMC_TR_QPOP),                        \
415         DEFINE_REG(TEGRA_EMC_BASE, EMC_TR_RDV_MASK),                    \
416         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRW14),                          \
417         DEFINE_REG(TEGRA_EMC_BASE, EMC_TR_QSAFE),                       \
418         DEFINE_REG(TEGRA_EMC_BASE, EMC_TR_QRST),                        \
419         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_CTRL),                  \
420         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_SETTLE),                \
421         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_VREF_SETTLE),           \
422         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_CA_FINE_CTRL),          \
423         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_CA_CTRL_MISC),          \
424         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_CA_CTRL_MISC1),         \
425         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_CA_VREF_CTRL),          \
426         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_QUSE_CORS_CTRL),        \
427         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_QUSE_FINE_CTRL),        \
428         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_QUSE_CTRL_MISC),        \
429         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_QUSE_VREF_CTRL),        \
430         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_READ_FINE_CTRL),        \
431         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_READ_CTRL_MISC),        \
432         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_READ_VREF_CTRL),        \
433         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_WRITE_FINE_CTRL),       \
434         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_WRITE_CTRL_MISC),       \
435         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_WRITE_VREF_CTRL),       \
436         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRAINING_MPC),                   \
437         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRW15)
438
439
440 #define TRIM_PERCH_REG_LIST                                     \
441         DEFINE_REG(TEGRA_EMC0_BASE, EMC_CMD_BRLSHFT_0),         \
442         DEFINE_REG(TEGRA_EMC1_BASE, EMC_CMD_BRLSHFT_1),         \
443         DEFINE_REG(TEGRA_EMC0_BASE, EMC_DATA_BRLSHFT_0),        \
444         DEFINE_REG(TEGRA_EMC1_BASE, EMC_DATA_BRLSHFT_0),        \
445         DEFINE_REG(TEGRA_EMC0_BASE, EMC_DATA_BRLSHFT_1),        \
446         DEFINE_REG(TEGRA_EMC1_BASE, EMC_DATA_BRLSHFT_1),        \
447         DEFINE_REG(TEGRA_EMC0_BASE, EMC_QUSE_BRLSHFT_0),        \
448         DEFINE_REG(TEGRA_EMC1_BASE, EMC_QUSE_BRLSHFT_1),        \
449         DEFINE_REG(TEGRA_EMC0_BASE, EMC_QUSE_BRLSHFT_2),        \
450         DEFINE_REG(TEGRA_EMC1_BASE, EMC_QUSE_BRLSHFT_3)
451
452 #define TRIM_REG_LIST                                                   \
453         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_0), \
454         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_1), \
455         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_2), \
456         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_LONG_DQS_RANK0_3), \
457         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_0), \
458         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_1), \
459         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_2), \
460         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_LONG_DQS_RANK1_3), \
461         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_0), \
462         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_1), \
463         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE0_2), \
464         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_0), \
465         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_1), \
466         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE1_2), \
467         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_0), \
468         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_1), \
469         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE2_2), \
470         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_0), \
471         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_1), \
472         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE3_2), \
473         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_0), \
474         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_1), \
475         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE4_2), \
476         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_0), \
477         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_1), \
478         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE5_2), \
479         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_0), \
480         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_1), \
481         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE6_2), \
482         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_0), \
483         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_1), \
484         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK0_BYTE7_2), \
485         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_0), \
486         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_1), \
487         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE0_2), \
488         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_0), \
489         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_1), \
490         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE1_2), \
491         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_0), \
492         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_1), \
493         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE2_2), \
494         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_0), \
495         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_1), \
496         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE3_2), \
497         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_0), \
498         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_1), \
499         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE4_2), \
500         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_0), \
501         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_1), \
502         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE5_2), \
503         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_0), \
504         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_1), \
505         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE6_2), \
506         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_0), \
507         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_1), \
508         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_DDLL_SHORT_DQ_RANK1_BYTE7_2), \
509         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_VREF_DQS_0),           \
510         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_VREF_DQS_1),           \
511         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_VREF_DQ_0),            \
512         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_IB_VREF_DQ_1),            \
513         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0), \
514         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1), \
515         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2), \
516         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_3), \
517         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_4), \
518         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_5), \
519         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_0), \
520         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_1), \
521         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_2), \
522         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_LONG_DQ_RANK1_3), \
523         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_0), \
524         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_1), \
525         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE0_2), \
526         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_0), \
527         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_1), \
528         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE1_2), \
529         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_0), \
530         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_1), \
531         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE2_2), \
532         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_0), \
533         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_1), \
534         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE3_2), \
535         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_0), \
536         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_1), \
537         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE4_2), \
538         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_0), \
539         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_1), \
540         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE5_2), \
541         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_0), \
542         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_1), \
543         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE6_2), \
544         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_0), \
545         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_1), \
546         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_BYTE7_2), \
547         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_0), \
548         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_1), \
549         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD0_2), \
550         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_0), \
551         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_1), \
552         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD1_2), \
553         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_0), \
554         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_1), \
555         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD2_2), \
556         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_0), \
557         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_1), \
558         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK0_CMD3_2), \
559         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_0), \
560         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_1), \
561         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE0_2), \
562         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_0), \
563         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_1), \
564         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE1_2), \
565         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_0), \
566         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_1), \
567         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE2_2), \
568         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_0), \
569         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_1), \
570         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE3_2), \
571         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_0), \
572         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_1), \
573         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE4_2), \
574         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_0), \
575         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_1), \
576         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE5_2), \
577         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_0), \
578         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_1), \
579         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE6_2), \
580         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_0), \
581         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_1), \
582         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_OB_DDLL_SHORT_DQ_RANK1_BYTE7_2), \
583         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK0_0),       \
584         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK0_1),       \
585         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK0_2),       \
586         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK0_3),       \
587         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK1_0),       \
588         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK1_1),       \
589         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK1_2),       \
590         DEFINE_REG(TEGRA_EMC_BASE, EMC_PMACRO_QUSE_DDLL_RANK1_3)
591
592 #define BURST_MC_REG_LIST                                               \
593         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG),                     \
594         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ),         \
595         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_REFPB_HP_CTRL),           \
596         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_REFPB_BANK_CTRL),         \
597         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD),              \
598         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP),               \
599         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC),               \
600         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS),              \
601         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW),              \
602         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD),              \
603         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE),          \
604         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE),          \
605         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R),              \
606         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W),              \
607         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W),              \
608         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_CCDMW),            \
609         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R),              \
610         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RFCPB),            \
611         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS),                \
612         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS),               \
613         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0),                   \
614         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC1),                   \
615         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC2),                   \
616         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE),          \
617         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DHYST_CTRL),              \
618         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_0),    \
619         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_1),    \
620         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_2),    \
621         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_3),    \
622         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_4),    \
623         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_5),    \
624         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_6),    \
625         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DHYST_TIMEOUT_UTIL_7),    \
626
627 #define BURST_UP_DOWN_REG_LIST                                          \
628         DEFINE_REG(TEGRA_MC_BASE, MC_MLL_MPCORER_PTSA_RATE),            \
629         DEFINE_REG(TEGRA_MC_BASE, MC_FTOP_PTSA_RATE),                   \
630         DEFINE_REG(TEGRA_MC_BASE, MC_PTSA_GRANT_DECREMENT),             \
631         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_XUSB_0),         \
632         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_XUSB_1),         \
633         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_TSEC_0),         \
634         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCA_0),       \
635         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCAA_0),      \
636         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMC_0),        \
637         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCAB_0),      \
638         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_PPCS_0),         \
639         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_PPCS_1),         \
640         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_MPCORE_0),       \
641         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_HC_0),           \
642         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_HC_1),           \
643         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_AVPC_0),         \
644         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_GPU_0),          \
645         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_GPU2_0),         \
646         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NVENC_0),        \
647         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_NVDEC_0),        \
648         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VIC_0),          \
649         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VI2_0),          \
650         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2_0),         \
651         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2_1)
652
653 #define VREF_PERCH_REG_LIST                                             \
654         DEFINE_REG(TEGRA_EMC0_BASE, EMC_TRAINING_OPT_DQS_IB_VREF_RANK0), \
655         DEFINE_REG(TEGRA_EMC1_BASE, EMC_TRAINING_OPT_DQS_IB_VREF_RANK0), \
656         DEFINE_REG(TEGRA_EMC0_BASE, EMC_TRAINING_OPT_DQS_IB_VREF_RANK1), \
657         DEFINE_REG(TEGRA_EMC1_BASE, EMC_TRAINING_OPT_DQS_IB_VREF_RANK1), \
658
659 #define DEFINE_REG(base, reg) ((base) ? (IO_ADDRESS((base)) + (reg)) : 0)
660
661 /*
662  * Currently these are the IO virtual mapped addresses. Once iomap.h is removed
663  * and the DT is used for loading the register addresses these will turn into
664  * register offset by default and will be updated with the real address during
665  * init.
666  */
667 static void __iomem *burst_reg_off[] = {
668         BURST_REG_LIST
669 };
670 static void __iomem *burst_perch_reg_off[] = {
671         BURST_PERCH_LIST
672 };
673 static void __iomem *vref_reg_off[] = {
674         VREF_PERCH_REG_LIST
675 };
676 static void __iomem *trim_reg_off[] = {
677         TRIM_REG_LIST
678 };
679 static void __iomem *trim_perch_reg_off[] = {
680         TRIM_PERCH_REG_LIST
681 };
682
683 /*
684  * The MC registers that the clock change will modify.
685  */
686 static void __iomem *la_scale_off_regs[] = {
687         BURST_UP_DOWN_REG_LIST
688 };
689 static void __iomem *burst_mc_reg_off[] = {
690         BURST_MC_REG_LIST
691 };
692
693 #undef DEFINE_REG
694
695 #define DEFINE_REG(base, reg)   reg##_INDEX
696 enum {
697         BURST_REG_LIST
698 };
699
700 enum {
701         BURST_MC_REG_LIST
702 };
703
704 enum {
705         TRIM_REG_LIST
706 };
707
708
709 struct emc_sel {
710         struct clk      *input;
711         u32             value;
712         unsigned long   input_rate;
713 };
714 static struct emc_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
715 static struct emc_sel tegra_emc_clk_sel_b[TEGRA_EMC_TABLE_MAX_SIZE];
716 static struct tegra21_emc_table start_timing;
717 static const struct tegra21_emc_table *emc_timing;
718 static unsigned long dram_over_temp_state = DRAM_OVER_TEMP_NONE;
719
720 static ktime_t clkchange_time;
721 static int clkchange_delay = 100;
722
723 static const struct tegra21_emc_table *tegra_emc_table;
724 static const struct tegra21_emc_table *tegra_emc_table_derated;
725 static int tegra_emc_table_size;
726
727 static u32 dram_dev_num;
728 static u32 dram_type = -1;
729
730 static struct clk *emc;
731
732 static struct {
733         cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
734         int last_sel;
735         u64 last_update;
736         u64 clkchange_count;
737         spinlock_t spinlock;
738 } emc_stats;
739
740 static DEFINE_SPINLOCK(emc_access_lock);
741
742 static inline void emc_writel(u32 val, unsigned long addr)
743 {
744         emc_cc_dbg(REGS, "reg write 0x%08x => 0x%p\n", val, emc_base + addr);
745         writel(val, emc_base + addr);
746 }
747
748 static inline void emc1_writel(u32 val, unsigned long addr)
749 {
750         writel(val, emc1_base + addr);
751 }
752
753 static inline u32 emc_readl(unsigned long addr)
754 {
755         u32 val;
756
757         val = readl(emc_base + addr);
758         emc_cc_dbg(REGS, "reg read 0x%p => 0x%08x\n", emc_base + addr, val);
759
760         return val;
761 }
762
763 static inline u32 emc1_readl(unsigned long addr)
764 {
765         u32 val;
766
767         val = readl(emc1_base + addr);
768         emc_cc_dbg(REGS, "reg read (emc1) 0x%p => 0x%08x\n",
769                    emc_base + addr, val);
770
771         return val;
772 }
773
774 static inline void mc_writel(u32 val, unsigned long addr)
775 {
776         writel(val, mc_base + addr);
777 }
778
779 static inline u32 mc_readl(unsigned long addr)
780 {
781         return readl(mc_base + addr);
782 }
783
784 static int ccfifo_index;
785 static inline void ccfifo_writel(u32 val, unsigned long addr, u32 delay)
786 {
787         /* Index into CCFIFO - for keeping track of how many writes we
788          * generate. */
789
790         emc_cc_dbg(CCFIFO, "[%d] (%u) 0x%08x => 0x%03lx\n",
791                    ccfifo_index, delay, val, addr);
792         ccfifo_index++;
793
794         writel(val, emc_base + EMC_CCFIFO_DATA);
795         writel((addr & 0xffff) | ((delay & 0x7fff) << 16) | (1 << 31),
796                emc_base + EMC_CCFIFO_ADDR);
797 }
798
799 static inline u32 disable_emc_sel_dpd_ctrl(u32 inreg)
800 {
801         u32 mod_reg = inreg;
802         mod_reg &= ~(EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN);
803         mod_reg &= ~(EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN);
804         if (dram_type == DRAM_TYPE_DDR3)
805                 mod_reg &= ~(EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN);
806         mod_reg &= ~(EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN);
807         mod_reg &= ~(EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN);
808         return mod_reg;
809 }
810
811 static int last_round_idx;
812 static inline int get_start_idx(unsigned long rate)
813 {
814         if (tegra_emc_table[last_round_idx].rate == rate)
815                 return last_round_idx;
816         return 0;
817 }
818 static void emc_last_stats_update(int last_sel)
819 {
820         unsigned long flags;
821         u64 cur_jiffies = get_jiffies_64();
822
823         spin_lock_irqsave(&emc_stats.spinlock, flags);
824
825         if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
826                 emc_stats.time_at_clock[emc_stats.last_sel] =
827                         emc_stats.time_at_clock[emc_stats.last_sel] +
828                         (cur_jiffies - emc_stats.last_update);
829
830         emc_stats.last_update = cur_jiffies;
831
832         if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
833                 emc_stats.clkchange_count++;
834                 emc_stats.last_sel = last_sel;
835         }
836         spin_unlock_irqrestore(&emc_stats.spinlock, flags);
837 }
838
839 /*
840  * Necessary for the dram_timing_regs array. These are not actually registers.
841  * They are just used for computing values to put into the real timing
842  * registers.
843  */
844 static const struct tegra21_emc_table *get_timing_from_freq(unsigned long freq)
845 {
846         int i;
847
848         for (i = 0; i < tegra_emc_table_size; i++)
849                 if (tegra_emc_table[i].rate == freq)
850                         return &tegra_emc_table[i];
851
852         return NULL;
853 }
854
855 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state,
856                            int chan)
857 {
858         int i, err = -ETIMEDOUT;
859         int old_dbg_mask;
860         u32 reg;
861
862         emc_cc_dbg(REGS, "Polling 0x%08x (chan=%d) for 0x%08x => 0x%08x\n",
863                    status_reg, chan, bit_mask, updated_state);
864
865         /* Turn off REGS to hide a potentially huge number of prints. */
866         old_dbg_mask = emc_dbg_mask;
867         emc_dbg_mask &= ~REGS;
868
869         for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
870                 reg = chan ? emc1_readl(status_reg) : emc_readl(status_reg);
871                 if (!!(reg & bit_mask) == updated_state) {
872                         err = 0;
873                         goto done;
874                 }
875                 udelay(1);
876         }
877
878 done:
879         emc_dbg_mask = old_dbg_mask;
880         emc_cc_dbg(REGS, "Polling cycles: %d\n", i);
881         return err;
882 }
883
884 static inline void emc_timing_update(int dual_chan)
885 {
886         int err = 0;
887
888         emc_writel(0x1, EMC_TIMING_CONTROL);
889         err |= wait_for_update(EMC_EMC_STATUS,
890                                EMC_EMC_STATUS_TIMING_UPDATE_STALLED, false, 0);
891         if (dual_chan)
892                 err |= wait_for_update(EMC_EMC_STATUS,
893                                EMC_EMC_STATUS_TIMING_UPDATE_STALLED, false, 1);
894         if (err) {
895                 pr_err("%s: timing update error: %d", __func__, err);
896                 BUG();
897         }
898 }
899
900 static inline void set_over_temp_timing(
901         const struct tegra21_emc_table *next_timing, unsigned long state)
902 {
903 #define REFRESH_X2      1
904 #define REFRESH_X4      2
905 #define REFRESH_SPEEDUP(val, speedup)                                   \
906         do {                                    \
907                 val = ((val) & 0xFFFF0000) |                            \
908                         (((val) & 0xFFFF) >> (speedup));                \
909         } while (0)
910
911         u32 ref = next_timing->burst_regs[EMC_REFRESH_INDEX];
912         u32 pre_ref = next_timing->burst_regs[EMC_PRE_REFRESH_REQ_CNT_INDEX];
913         u32 dsr_cntrl = next_timing->burst_regs[EMC_DYN_SELF_REF_CONTROL_INDEX];
914
915         switch (state) {
916         case DRAM_OVER_TEMP_NONE:
917                 break;
918         case DRAM_OVER_TEMP_REFRESH_X2:
919                 REFRESH_SPEEDUP(ref, REFRESH_X2);
920                 REFRESH_SPEEDUP(pre_ref, REFRESH_X2);
921                 REFRESH_SPEEDUP(dsr_cntrl, REFRESH_X2);
922                 break;
923         case DRAM_OVER_TEMP_REFRESH_X4:
924         case DRAM_OVER_TEMP_THROTTLE:
925                 REFRESH_SPEEDUP(ref, REFRESH_X4);
926                 REFRESH_SPEEDUP(pre_ref, REFRESH_X4);
927                 REFRESH_SPEEDUP(dsr_cntrl, REFRESH_X4);
928                 break;
929         default:
930         WARN(1, "%s: Failed to set dram over temp state %lu\n",
931                 __func__, state);
932         return;
933         }
934
935         __raw_writel(ref, burst_reg_off[EMC_REFRESH_INDEX]);
936         __raw_writel(pre_ref, burst_reg_off[EMC_PRE_REFRESH_REQ_CNT_INDEX]);
937         __raw_writel(dsr_cntrl, burst_reg_off[EMC_DYN_SELF_REF_CONTROL_INDEX]);
938         wmb();
939 }
940
941 static inline void overwrite_mrs_wait_cnt(
942         const struct tegra21_emc_table *next_timing,
943         bool zcal_long)
944 {
945         u32 reg;
946         u32 cnt = 512;
947
948         /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
949            for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
950            expected operation length. Reduce the latter by the overlapping
951            zq-calibration, if any */
952         if (zcal_long)
953                 cnt -= dram_dev_num * 256;
954
955         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
956                 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
957                 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
958         if (cnt < reg)
959                 cnt = reg;
960
961         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
962                 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
963         reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
964                 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
965
966         emc_writel(reg, EMC_MRS_WAIT_CNT);
967 }
968
969 static inline void do_clock_change(u32 clk_setting)
970 {
971         int err;
972
973         mc_readl(MC_EMEM_ADR_CFG);      /* completes prev writes */
974         emc_readl(EMC_INTSTATUS);
975
976         writel(clk_setting, clk_base + emc->reg);
977         readl(clk_base + emc->reg);     /* completes prev write */
978
979         err = wait_for_update(EMC_INTSTATUS,
980                               EMC_INTSTATUS_CLKCHANGE_COMPLETE, true, 0);
981         if (err) {
982                 pr_err("%s: clock change completion error: %d", __func__, err);
983                 BUG();
984         }
985 }
986
987 static inline void emc_set_shadow_bypass(int set)
988 {
989         u32 emc_dbg = emc_readl(EMC_DBG);
990
991         emc_cc_dbg(ACTIVE_EN, "Setting write mux: %s\n",
992                    set ? "ACTIVE" : "SHADOW");
993
994         if (set)
995                 emc_writel(emc_dbg | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
996         else
997                 emc_writel(emc_dbg & ~EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG);
998 }
999
1000 static inline u32 get_dll_state(const struct tegra21_emc_table *next_timing)
1001 {
1002         bool next_dll_enabled;
1003
1004         next_dll_enabled = !(next_timing->emc_emrs & 0x1);
1005         if (next_dll_enabled)
1006                 return DLL_ON;
1007         else
1008                 return DLL_OFF;
1009 }
1010
1011 /*
1012  * This function computes the division of two fixed point numbers which have
1013  * three decimal places of precision. The result is then ceil()ed and converted
1014  * to a regular integer.
1015  */
1016 static inline u32 div_o3(u32 a, u32 b)
1017 {
1018         u32 result = a / b;
1019
1020         if ((b * result) < a)
1021                 return result + 1;
1022         else
1023                 return result;
1024 }
1025
1026 /*
1027  * Source clock period is in picoseconds. Returns the ramp down wait time in
1028  * picoseconds.
1029  */
1030 noinline u32 do_dvfs_power_ramp_down(u32 clk, int flip_backward,
1031                              const struct tegra21_emc_table *last_timing,
1032                              const struct tegra21_emc_table *next_timing)
1033 {
1034         u32 ramp_down_wait = 0;
1035         u32 pmacro_cmd_pad;
1036         u32 pmacro_dq_pad;
1037         u32 pmacro_rfu1;
1038         u32 pmacro_cfg5;
1039         u32 pmacro_common_tx;
1040         u32 seq_wait;
1041
1042         emc_cc_dbg(PRAMP_DN, "flip_backward = %d\n", flip_backward);
1043
1044         if (flip_backward) {
1045                 pmacro_cmd_pad   = next_timing->
1046                         burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
1047                 pmacro_dq_pad    = next_timing->
1048                         burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
1049                 pmacro_rfu1      = next_timing->
1050                         burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
1051                 pmacro_cfg5      = next_timing->
1052                         burst_regs[EMC_FBIO_CFG5_INDEX];
1053                 pmacro_common_tx = next_timing->
1054                         burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
1055         } else {
1056                 pmacro_cmd_pad   = last_timing->
1057                         burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
1058                 pmacro_dq_pad    = last_timing->
1059                         burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
1060                 pmacro_rfu1      = last_timing->
1061                         burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
1062                 pmacro_cfg5      = last_timing->
1063                         burst_regs[EMC_FBIO_CFG5_INDEX];
1064                 pmacro_common_tx = last_timing->
1065                         burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
1066         }
1067
1068         pmacro_cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
1069
1070         ccfifo_writel(pmacro_cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 0);
1071         ccfifo_writel(pmacro_cfg5 | EMC_FBIO_CFG5_CMD_TX_DIS, EMC_FBIO_CFG5,
1072                       12);
1073         ramp_down_wait = 12 * clk;
1074
1075         seq_wait = (100000 / clk) + 1;
1076
1077         if (clk < (1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD)) {
1078                 emc_cc_dbg(PRAMP_DN, "clk < FGCG_HIGH_SPEED_THRESHOLD;\n");
1079                 emc_cc_dbg(PRAMP_DN, "  %u vs %u\n", clk,
1080                            1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD);
1081
1082                 if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
1083                         emc_cc_dbg(PRAMP_DN, "clk < IOBRICK_DCC_THRESHOLD;\n");
1084                         emc_cc_dbg(PRAMP_DN, "  %u vs %u\n", clk,
1085                            1000000 / IOBRICK_DCC_THRESHOLD);
1086
1087                         pmacro_cmd_pad &=
1088                                 ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
1089                                   EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
1090                         pmacro_cmd_pad |=
1091                                 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
1092                                 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
1093                         ccfifo_writel(pmacro_cmd_pad,
1094                                       EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
1095                         ramp_down_wait += 100000;
1096
1097                         pmacro_dq_pad &=
1098                               ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
1099                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
1100                         pmacro_dq_pad |=
1101                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
1102                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
1103                         ccfifo_writel(pmacro_dq_pad,
1104                                       EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
1105                         ccfifo_writel(pmacro_rfu1 & ~0x01120112,
1106                                       EMC_PMACRO_BRICK_CTRL_RFU1, 0);
1107                 } else {
1108                         emc_cc_dbg(PRAMP_DN, "clk > IOBRICK_DCC_THRESHOLD\n");
1109                         ccfifo_writel(pmacro_rfu1 & ~0x01120112,
1110                                       EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
1111                         ramp_down_wait += 100000;
1112                 }
1113
1114                 ccfifo_writel(pmacro_rfu1 & ~0x01bf01bf,
1115                               EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
1116                 ramp_down_wait += 100000;
1117
1118                 if (clk < (1000000 / IOBRICK_DCC_THRESHOLD)) {
1119                         pmacro_cmd_pad &=
1120                                 ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
1121                                   EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC |
1122                                   EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
1123                                   EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC);
1124                         ccfifo_writel(pmacro_cmd_pad,
1125                                       EMC_PMACRO_CMD_PAD_TX_CTRL, seq_wait);
1126                         ramp_down_wait += 100000;
1127
1128                         pmacro_dq_pad &=
1129                               ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
1130                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC |
1131                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
1132                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC);
1133                         ccfifo_writel(pmacro_dq_pad,
1134                                       EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
1135                         ccfifo_writel(pmacro_rfu1 & ~0x07ff07ff,
1136                                       EMC_PMACRO_BRICK_CTRL_RFU1, 0);
1137                 } else {
1138                         ccfifo_writel(pmacro_rfu1 & ~0x07ff07ff,
1139                                       EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait);
1140                         ramp_down_wait += 100000;
1141                 }
1142         } else {
1143                 emc_cc_dbg(PRAMP_DN, "clk > FGCG_HIGH_SPEED_THRESHOLD\n");
1144                 ccfifo_writel(pmacro_rfu1 & ~0xffff07ff,
1145                               EMC_PMACRO_BRICK_CTRL_RFU1, seq_wait + 19);
1146                 ramp_down_wait += 100000 + (20 * clk);
1147         }
1148
1149         if (clk < (1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD)) {
1150                 emc_cc_dbg(PRAMP_DN, "clk < FGCG_MID_SPEED_THRESHOLD;\n");
1151                 emc_cc_dbg(PRAMP_DN, "  %u vs %u\n", clk,
1152                            1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD);
1153
1154                 ramp_down_wait += 100000;
1155                 ccfifo_writel(pmacro_common_tx & ~0x5,
1156                               EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
1157                 ramp_down_wait += 100000;
1158                 ccfifo_writel(pmacro_common_tx & ~0xf,
1159                               EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
1160                 ramp_down_wait += 100000;
1161                 ccfifo_writel(0, 0, seq_wait);
1162                 ramp_down_wait += 100000;
1163         } else {
1164                 emc_cc_dbg(PRAMP_DN, "clk > FGCG_MID_SPEED_THRESHOLD\n");
1165                 ccfifo_writel(pmacro_common_tx & ~0xf,
1166                               EMC_PMACRO_COMMON_PAD_TX_CTRL, seq_wait);
1167         }
1168
1169         return ramp_down_wait;
1170 }
1171
1172 /*
1173  * Similar to do_dvfs_power_ramp_down() except this does the power ramp up.
1174  */
1175 noinline u32 do_dvfs_power_ramp_up(u32 clk, int flip_backward,
1176                                    const struct tegra21_emc_table *last_timing,
1177                                    const struct tegra21_emc_table *next_timing)
1178 {
1179         u32 pmacro_cmd_pad;
1180         u32 pmacro_dq_pad;
1181         u32 pmacro_rfu1;
1182         u32 pmacro_cfg5;
1183         u32 pmacro_common_tx;
1184         u32 ramp_up_wait = 0;
1185
1186         if (flip_backward) {
1187                 pmacro_cmd_pad   = last_timing->
1188                         burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
1189                 pmacro_dq_pad    = last_timing->
1190                         burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
1191                 pmacro_rfu1      = last_timing->
1192                         burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
1193                 pmacro_cfg5      = last_timing->burst_regs[EMC_FBIO_CFG5_INDEX];
1194                 pmacro_common_tx = last_timing->
1195                         burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
1196         } else {
1197                 pmacro_cmd_pad   = next_timing->
1198                         burst_regs[EMC_PMACRO_CMD_PAD_TX_CTRL_INDEX];
1199                 pmacro_dq_pad    = next_timing->
1200                         burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
1201                 pmacro_rfu1      = next_timing->
1202                         burst_regs[EMC_PMACRO_BRICK_CTRL_RFU1_INDEX];
1203                 pmacro_cfg5      = next_timing->
1204                         burst_regs[EMC_FBIO_CFG5_INDEX];
1205                 pmacro_common_tx = next_timing->
1206                         burst_regs[EMC_PMACRO_COMMON_PAD_TX_CTRL_INDEX];
1207         }
1208         pmacro_cmd_pad |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
1209
1210         if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
1211                 ccfifo_writel(pmacro_common_tx & 0xa,
1212                               EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
1213                 ccfifo_writel(pmacro_common_tx & 0xf,
1214                               EMC_PMACRO_COMMON_PAD_TX_CTRL,
1215                               (100000 / clk) + 1);
1216                 ramp_up_wait += 100000;
1217         } else {
1218                 ccfifo_writel(pmacro_common_tx | 0x8,
1219                               EMC_PMACRO_COMMON_PAD_TX_CTRL, 0);
1220         }
1221
1222         if (clk < 1000000 / DVFS_FGCG_HIGH_SPEED_THRESHOLD) {
1223                 if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
1224                         pmacro_cmd_pad |=
1225                                 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
1226                                 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC;
1227                         pmacro_cmd_pad &=
1228                                 ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
1229                                   EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
1230                         ccfifo_writel(pmacro_cmd_pad,
1231                                       EMC_PMACRO_CMD_PAD_TX_CTRL,
1232                                       (100000 / clk) + 1);
1233                         ramp_up_wait += 100000;
1234
1235                         pmacro_dq_pad |=
1236                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
1237                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC;
1238                         pmacro_dq_pad &=
1239                                ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
1240                                  EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
1241                         ccfifo_writel(pmacro_dq_pad,
1242                                       EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
1243                         ccfifo_writel(pmacro_rfu1 & 0xfe40fe40,
1244                                       EMC_PMACRO_BRICK_CTRL_RFU1, 0);
1245                 } else {
1246                         ccfifo_writel(pmacro_rfu1 & 0xfe40fe40,
1247                                       EMC_PMACRO_BRICK_CTRL_RFU1,
1248                                       (100000 / clk) + 1);
1249                         ramp_up_wait += 100000;
1250                 }
1251
1252                 ccfifo_writel(pmacro_rfu1 & 0xfeedfeed,
1253                               EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
1254                 ramp_up_wait += 100000;
1255
1256                 if (clk < 1000000 / IOBRICK_DCC_THRESHOLD) {
1257                         pmacro_cmd_pad |=
1258                                 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
1259                                 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
1260                                 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
1261                                 EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC;
1262                         ccfifo_writel(pmacro_cmd_pad,
1263                                       EMC_PMACRO_CMD_PAD_TX_CTRL,
1264                                       (100000 / clk) + 1);
1265                         ramp_up_wait += 100000;
1266
1267                         pmacro_dq_pad |=
1268                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
1269                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
1270                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
1271                                 EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC;
1272                         ccfifo_writel(pmacro_dq_pad,
1273                                       EMC_PMACRO_DATA_PAD_TX_CTRL, 0);
1274                         ccfifo_writel(pmacro_rfu1,
1275                                       EMC_PMACRO_BRICK_CTRL_RFU1, 0);
1276                 } else {
1277                         ccfifo_writel(pmacro_rfu1,
1278                                       EMC_PMACRO_BRICK_CTRL_RFU1,
1279                                       (100000 / clk) + 1);
1280                         ramp_up_wait += 100000;
1281                 }
1282
1283                 ccfifo_writel(pmacro_cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
1284                               EMC_FBIO_CFG5, (100000 / clk) + 10);
1285                 ramp_up_wait += 100000 + (10 * clk);
1286         } else if (clk < 1000000 / DVFS_FGCG_MID_SPEED_THRESHOLD) {
1287                 ccfifo_writel(pmacro_rfu1 | 0x06000600,
1288                               EMC_PMACRO_BRICK_CTRL_RFU1, (100000 / clk) + 1);
1289                 ccfifo_writel(pmacro_cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
1290                               EMC_FBIO_CFG5, (100000 / clk) + 10);
1291                 ramp_up_wait += 100000 + 10 * clk;
1292         } else {
1293                 ccfifo_writel(pmacro_rfu1 | 0x00000600,
1294                               EMC_PMACRO_BRICK_CTRL_RFU1, 0);
1295                 ccfifo_writel(pmacro_cfg5 & ~EMC_FBIO_CFG5_CMD_TX_DIS,
1296                               EMC_FBIO_CFG5, 12);
1297                 ramp_up_wait += 12 * clk;
1298         }
1299
1300         pmacro_cmd_pad &= ~EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
1301         ccfifo_writel(pmacro_cmd_pad, EMC_PMACRO_CMD_PAD_TX_CTRL, 5);
1302
1303         return ramp_up_wait;
1304 }
1305
1306 /*
1307  * Change the DLL's input clock. Used during the DLL prelock sequence.
1308  */
1309 noinline void change_dll_src(const struct tegra21_emc_table *next_timing,
1310                              u32 clksrc)
1311 {
1312         u32 out_enb_x;
1313         u32 dll_setting = next_timing->dll_clk_src;
1314         u32 emc_clk_src;
1315         u32 emc_clk_div;
1316
1317         out_enb_x = 0;
1318         emc_clk_src = (clksrc & EMC_CLK_EMC_2X_CLK_SRC_MASK) >>
1319                 EMC_CLK_EMC_2X_CLK_SRC_SHIFT;
1320         emc_clk_div = (clksrc & EMC_CLK_EMC_2X_CLK_DIVISOR_MASK) >>
1321                 EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT;
1322
1323         dll_setting &= ~(DLL_CLK_EMC_DLL_CLK_SRC_MASK |
1324                          DLL_CLK_EMC_DLL_CLK_DIVISOR_MASK);
1325         dll_setting |= emc_clk_src << DLL_CLK_EMC_DLL_CLK_SRC_SHIFT;
1326         dll_setting |= emc_clk_div << DLL_CLK_EMC_DLL_CLK_DIVISOR_SHIFT;
1327
1328         /* Low jitter and undivided are the same thing. */
1329         dll_setting &= ~DLL_CLK_EMC_DLL_DDLL_CLK_SEL_MASK;
1330         if (emc_clk_src == EMC_CLK_SOURCE_PLLMB_LJ)
1331                 dll_setting |= (PLLM_VCOB <<
1332                                 DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
1333         else if (emc_clk_src == EMC_CLK_SOURCE_PLLM_LJ)
1334                 dll_setting |= (PLLM_VCOA <<
1335                                 DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
1336         else
1337                 dll_setting |= (EMC_DLL_SWITCH_OUT <<
1338                                 DLL_CLK_EMC_DLL_DDLL_CLK_SEL_SHIFT);
1339
1340         /* Now program the clock source. */
1341         emc_cc_dbg(REGS, "clk source: 0x%08x => 0x%p\n", dll_setting,
1342                    clk_base + CLK_RST_CONTROLLER_CLK_SOURCE_EMC_DLL);
1343         writel(dll_setting, clk_base + CLK_RST_CONTROLLER_CLK_SOURCE_EMC_DLL);
1344
1345         if (next_timing->clk_out_enb_x_0_clk_enb_emc_dll) {
1346                 writel(CLK_OUT_ENB_X_CLK_ENB_EMC_DLL,
1347                        clk_base + CLK_RST_CONTROLLER_CLK_OUT_ENB_X_SET);
1348                 emc_cc_dbg(REGS, "out_enb_x_set: 0x%08x => 0x%p\n",
1349                            CLK_OUT_ENB_X_CLK_ENB_EMC_DLL,
1350                            clk_base + CLK_RST_CONTROLLER_CLK_OUT_ENB_X_SET);
1351         } else {
1352                 writel(CLK_OUT_ENB_X_CLK_ENB_EMC_DLL,
1353                        clk_base + CLK_RST_CONTROLLER_CLK_OUT_ENB_X_CLR);
1354                 emc_cc_dbg(REGS, "out_enb_x_clr: 0x%08x => 0x%p\n",
1355                            CLK_OUT_ENB_X_CLK_ENB_EMC_DLL,
1356                            clk_base + CLK_RST_CONTROLLER_CLK_OUT_ENB_X_CLR);
1357         }
1358 }
1359
1360 /*
1361  * Prelock the DLL.
1362  */
1363 noinline u32 dll_prelock(const struct tegra21_emc_table *next_timing,
1364                          int dvfs_with_training, u32 clksrc)
1365 {
1366         u32 emc_dig_dll_status;
1367         u32 dll_locked;
1368         u32 dll_out;
1369         u32 emc_cfg_dig_dll;
1370         u32 emc_dll_cfg_0;
1371         u32 emc_dll_cfg_1;
1372         u32 ddllcal_ctrl_start_trim_val;
1373         u32 dll_en;
1374         u32 dual_channel_lpddr4_case;
1375         u32 dll_priv_updated;
1376
1377         emc_cc_dbg(PRELOCK, "Prelock starting; version: %d\n",
1378                    EMC_PRELOCK_VERSION);
1379
1380         dual_channel_lpddr4_case =
1381                 !!(emc_readl(EMC_FBIO_CFG7) & EMC_FBIO_CFG7_CH1_ENABLE) &
1382                 !!(emc_readl(EMC_FBIO_CFG7) & EMC_FBIO_CFG7_CH0_ENABLE);
1383
1384         emc_dig_dll_status = 0;
1385         dll_locked = 0;
1386         dll_out = 0;
1387         emc_cfg_dig_dll = 0;
1388         emc_dll_cfg_0 = 0;
1389         emc_dll_cfg_1 = 0;
1390         ddllcal_ctrl_start_trim_val = 0;
1391         dll_en = 0;
1392
1393         emc_cc_dbg(PRELOCK, "Dual channel LPDDR4: %s\n",
1394                    dual_channel_lpddr4_case ? "yes" : "no");
1395         emc_cc_dbg(PRELOCK, "DLL clksrc: 0x%08x\n", clksrc);
1396
1397         /* Step 1:
1398          *   Configure the DLL for prelock.
1399          */
1400         emc_cc_dbg(PRELOCK_STEPS, "Step 1\n");
1401         emc_cfg_dig_dll = emc_readl(EMC_CFG_DIG_DLL) &
1402                 ~EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_MASK;
1403         emc_cfg_dig_dll |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_LOCK_LIMIT_SHIFT);
1404         emc_cfg_dig_dll &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
1405         emc_cfg_dig_dll &= ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK;
1406         emc_cfg_dig_dll |= (3 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
1407         emc_cfg_dig_dll |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
1408         emc_cfg_dig_dll &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
1409         emc_cfg_dig_dll &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
1410
1411         emc_writel(emc_cfg_dig_dll, EMC_CFG_DIG_DLL);
1412         emc_writel(1, EMC_TIMING_CONTROL);
1413
1414         /* Step 2:
1415          *   Update timings.
1416          */
1417         emc_cc_dbg(PRELOCK_STEPS, "Step 2\n");
1418         wait_for_update(EMC_EMC_STATUS,
1419                         EMC_EMC_STATUS_TIMING_UPDATE_STALLED, 0, 0);
1420         if (dual_channel_lpddr4_case)
1421                 wait_for_update(EMC_EMC_STATUS,
1422                                 EMC_EMC_STATUS_TIMING_UPDATE_STALLED, 0, 1);
1423
1424         /* Step 3:
1425          *   Poll channel(s) until DLL_EN is true.
1426          */
1427         emc_cc_dbg(PRELOCK_STEPS, "Step 3\n");
1428         do {
1429                 emc_cfg_dig_dll = emc_readl(EMC_CFG_DIG_DLL);
1430                 dll_en = emc_cfg_dig_dll & EMC_CFG_DIG_DLL_CFG_DLL_EN;
1431         } while (dll_en == 1);
1432
1433         if (dual_channel_lpddr4_case) {
1434                 do {
1435                         emc_cfg_dig_dll = emc1_readl(EMC_CFG_DIG_DLL);
1436                         dll_en = emc_cfg_dig_dll & EMC_CFG_DIG_DLL_CFG_DLL_EN;
1437                 } while (dll_en == 1);
1438         }
1439
1440         /* Step 4:
1441          *   Update DLL calibration filter.
1442          */
1443         emc_cc_dbg(PRELOCK_STEPS, "Step 4\n");
1444         emc_dll_cfg_0 = emc_readl(EMC_DLL_CFG_0);
1445         emc_dll_cfg_0 &= EMC_DLL_CFG_0_DDLLCAL_CTRL_IGNORE_START;
1446         emc_dll_cfg_0 |= EMC_DLL_CFG_0_DDLLCAL_CTRL_DUAL_PASS_LOCK;
1447
1448         emc_dll_cfg_0 &= ~(EMC_DLL_CFG_0_DDLLCAL_CTRL_STEP_SIZE_MASK |
1449                            EMC_DLL_CFG_0_DDLLCAL_CTRL_END_COUNT_MASK |
1450                            EMC_DLL_CFG_0_DDLLCAL_CTRL_FILTER_BITS_MASK |
1451                            EMC_DLL_CFG_0_DDLLCAL_CTRL_SAMPLE_COUNT_MASK |
1452                            EMC_DLL_CFG_0_DDLLCAL_CTRL_SAMPLE_DELAY_MASK |
1453                            EMC_DLL_CFG_0_DDLLCAL_UPDATE_CNT_LIMIT_MASK);
1454
1455         emc_dll_cfg_0 |= (0xf << EMC_DLL_CFG_0_DDLLCAL_CTRL_STEP_SIZE_SHIFT) |
1456                 (0xa << EMC_DLL_CFG_0_DDLLCAL_CTRL_END_COUNT_SHIFT) |
1457                 (0x3 << EMC_DLL_CFG_0_DDLLCAL_CTRL_FILTER_BITS_SHIFT) |
1458                 (0x4 << EMC_DLL_CFG_0_DDLLCAL_CTRL_SAMPLE_COUNT_SHIFT) |
1459                 (0xa << EMC_DLL_CFG_0_DDLLCAL_CTRL_SAMPLE_DELAY_SHIFT) |
1460                 (0xf << EMC_DLL_CFG_0_DDLLCAL_UPDATE_CNT_LIMIT_SHIFT);
1461
1462         emc_writel(emc_dll_cfg_0, EMC_DLL_CFG_0);
1463
1464         if (next_timing->rate >= 400000 && next_timing->rate < 600000)
1465                 ddllcal_ctrl_start_trim_val = 150;
1466         else if (next_timing->rate >= 600000 && next_timing->rate < 800000)
1467                 ddllcal_ctrl_start_trim_val = 100;
1468         else if (next_timing->rate >= 800000 && next_timing->rate < 1000000)
1469                 ddllcal_ctrl_start_trim_val = 70;
1470         else if (next_timing->rate >= 1000000 && next_timing->rate < 1200000)
1471                 ddllcal_ctrl_start_trim_val = 30;
1472         else
1473                 ddllcal_ctrl_start_trim_val = 20;
1474
1475         emc_dll_cfg_1 = emc_readl(EMC_DLL_CFG_1);
1476         emc_dll_cfg_1 &= EMC_DLL_CFG_1_DDLLCAL_CTRL_START_TRIM_MASK;
1477         emc_dll_cfg_1 |= ddllcal_ctrl_start_trim_val;
1478         emc_writel(emc_dll_cfg_1, EMC_DLL_CFG_1);
1479
1480         /* Step 8:
1481          *   (Skipping some steps to get back inline with reference.)
1482          *   Change the DLL clock source.
1483          */
1484         emc_cc_dbg(PRELOCK_STEPS, "Step 8\n");
1485         change_dll_src(next_timing, clksrc);
1486
1487         /* Step 9:
1488          *   Enable the DLL and start the prelock state machine.
1489          */
1490         emc_cc_dbg(PRELOCK_STEPS, "Step 9\n");
1491         emc_cfg_dig_dll = emc_readl(EMC_CFG_DIG_DLL);
1492         emc_cfg_dig_dll |= EMC_CFG_DIG_DLL_CFG_DLL_EN;
1493         emc_writel(emc_cfg_dig_dll, EMC_CFG_DIG_DLL);
1494
1495         emc_timing_update(dual_channel_lpddr4_case ?
1496                           DUAL_CHANNEL : SINGLE_CHANNEL);
1497
1498         do {
1499                 emc_cfg_dig_dll = emc_readl(EMC_CFG_DIG_DLL);
1500                 dll_en = emc_cfg_dig_dll & EMC_CFG_DIG_DLL_CFG_DLL_EN;
1501         } while (dll_en == 0);
1502
1503         if (dual_channel_lpddr4_case) {
1504                 do {
1505                         emc_cfg_dig_dll = emc1_readl(EMC_CFG_DIG_DLL);
1506                         dll_en = emc_cfg_dig_dll & EMC_CFG_DIG_DLL_CFG_DLL_EN;
1507                 } while (dll_en == 0);
1508         }
1509
1510         /* Step 10:
1511          *   Wait for the DLL to lock.
1512          */
1513         emc_cc_dbg(PRELOCK_STEPS, "Step 10\n");
1514         do {
1515                 emc_dig_dll_status = emc_readl(EMC_DIG_DLL_STATUS);
1516                 dll_locked = emc_dig_dll_status & EMC_DIG_DLL_STATUS_DLL_LOCK;
1517                 dll_priv_updated = emc_dig_dll_status &
1518                         EMC_DIG_DLL_STATUS_DLL_PRIV_UPDATED;
1519         } while (!dll_locked || !dll_priv_updated);
1520
1521         /* Step 11:
1522          *   Prelock training specific code - removed. Should it be ??
1523          */
1524
1525         /* Step 12:
1526          *   Done! Return the dll prelock value.
1527          */
1528         emc_cc_dbg(PRELOCK_STEPS, "Step 12\n");
1529         emc_dig_dll_status = emc_readl(EMC_DIG_DLL_STATUS);
1530         return emc_dig_dll_status & EMC_DIG_DLL_STATUS_DLL_OUT_MASK;
1531 }
1532
1533 noinline void dll_disable(int channel_mode)
1534 {
1535         u32 emc_cfg_dig_dll;
1536
1537         emc_cfg_dig_dll = emc_readl(EMC_CFG_DIG_DLL);
1538         emc_cfg_dig_dll &= ~EMC_CFG_DIG_DLL_CFG_DLL_EN;
1539         emc_writel(emc_cfg_dig_dll, EMC_CFG_DIG_DLL);
1540         emc_timing_update(channel_mode);
1541
1542         wait_for_update(EMC_CFG_DIG_DLL, EMC_CFG_DIG_DLL_CFG_DLL_EN, 0, 0);
1543         if (channel_mode == DUAL_CHANNEL)
1544                 wait_for_update(EMC_CFG_DIG_DLL,
1545                                 EMC_CFG_DIG_DLL_CFG_DLL_EN, 0, 1);
1546 }
1547
1548 /*
1549  * Sequence revision: 0
1550  */
1551 noinline void emc_set_clock(const struct tegra21_emc_table *next_timing,
1552                             const struct tegra21_emc_table *last_timing,
1553                             int training, u32 clksrc)
1554 {
1555         /*
1556          * This is the timing table for the source frequency. It does _not_
1557          * necessarily correspond to the actual timing values in the EMC at the
1558          * moment. If the boot BCT differs from the table then this can happen.
1559          * However, we need it for accessing the dram_timing_regs (which are not
1560          * really registers) array for the current frequency.
1561          */
1562         const struct tegra21_emc_table *fake_timing;
1563
1564         u32 i, tmp;
1565
1566         u32 cya_allow_ref_cc = 0, ref_b4_sref_en = 0, cya_issue_pc_ref = 0;
1567
1568         u32 zqcal_before_cc_cutoff = 2400; /* In picoseconds */
1569         u32 ref_delay_mult;
1570         u32 ref_delay;
1571         u32 zq_latch_dvfs_wait_time;
1572         u32 tZQCAL_lpddr4_fc_adj;
1573         /* Scaled by x1000 */
1574         u32 tFC_lpddr4 = 1000 * next_timing->dram_timing_regs[T_FC_LPDDR4];
1575         /* u32 tVRCG_lpddr4 = next_timing->dram_timing_regs[T_FC_LPDDR4]; */
1576         u32 tZQCAL_lpddr4 = 1000000;
1577
1578         u32 dram_type, dram_dev_num, shared_zq_resistor;
1579         u32 channel_mode;
1580         u32 is_lpddr3;
1581
1582         u32 emc_cfg, emc_sel_dpd_ctrl, emc_cfg_reg;
1583
1584         u32 emc_dbg;
1585         u32 emc_zcal_interval;
1586         u32 emc_zcal_wait_cnt_old;
1587         u32 emc_zcal_wait_cnt_new;
1588         u32 emc_dbg_active;
1589         u32 zq_op;
1590         u32 zcal_wait_time_clocks;
1591         u32 zcal_wait_time_ps;
1592
1593         u32 emc_auto_cal_config;
1594         u32 auto_cal_en;
1595
1596         u32 mr13_catr_enable;
1597
1598         u32 ramp_up_wait = 0, ramp_down_wait = 0;
1599
1600         /* In picoseconds. */
1601         u32 source_clock_period;
1602         u32 destination_clock_period;
1603
1604         u32 emc_dbg_o;
1605         u32 emc_cfg_pipe_clk_o;
1606         u32 emc_pin_o;
1607
1608         u32 mr13_flip_fspwr;
1609         u32 mr13_flip_fspop;
1610
1611         u32 opt_zcal_en_cc;
1612         u32 opt_do_sw_qrst = 0;
1613         u32 opt_dvfs_mode;
1614         u32 opt_dll_mode;
1615         u32 opt_cc_short_zcal = 1;
1616         u32 opt_short_zcal = 1;
1617         u32 save_restore_clkstop_pd = 1;
1618
1619         u32 prelock_dll_en = 0, dll_out;
1620
1621         int next_push, next_dq_e_ivref, next_dqs_e_ivref;
1622
1623         u64 emc_mrw6_ab = (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_MRW6;
1624         u64 emc_mrw7_ab = (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_MRW7;
1625         u64 emc_mrw8_ab = (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_MRW8;
1626         u64 emc_mrw9_ab = (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_MRW9;
1627         u64 emc_mrw10_ch0_ab = (u64)IO_ADDRESS(TEGRA_EMC0_BASE) + EMC_MRW10;
1628         u64 emc_mrw10_ch1_ab = (u64)IO_ADDRESS(TEGRA_EMC1_BASE) + EMC_MRW10;
1629         u64 emc_mrw11_ch0_ab = (u64)IO_ADDRESS(TEGRA_EMC0_BASE) + EMC_MRW11;
1630         u64 emc_mrw11_ch1_ab = (u64)IO_ADDRESS(TEGRA_EMC1_BASE) + EMC_MRW11;
1631         u64 emc_mrw12_ch0_ab = (u64)IO_ADDRESS(TEGRA_EMC0_BASE) + EMC_MRW12;
1632         u64 emc_mrw12_ch1_ab = (u64)IO_ADDRESS(TEGRA_EMC1_BASE) + EMC_MRW12;
1633         u64 emc_mrw13_ch0_ab = (u64)IO_ADDRESS(TEGRA_EMC0_BASE) + EMC_MRW13;
1634         u64 emc_mrw13_ch1_ab = (u64)IO_ADDRESS(TEGRA_EMC1_BASE) + EMC_MRW13;
1635         u64 emc_mrw14_ab = (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_MRW14;
1636         u64 emc_mrw15_ab = (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_MRW15;
1637
1638         u64 emc_training_ctrl_ab =
1639                 (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_TRAINING_CTRL;
1640         u64 emc_cfg_ab = (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_CFG;
1641         u64 emc_mrs_wait_cnt_ab =
1642                 (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_MRS_WAIT_CNT;
1643         u64 emc_zcal_wait_cnt_ab =
1644                 (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_ZCAL_INTERVAL;
1645         u64 emc_zcal_interval_ab =
1646                 (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_ZCAL_INTERVAL;
1647         u64 emc_pmacro_autocal_cfg_common_ab =
1648                 (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_PMACRO_AUTOCAL_CFG_COMMON;
1649         u64 emc_pmacro_data_pad_tx_ctrl_ab =
1650                 (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_PMACRO_DATA_PAD_TX_CTRL;
1651         u64 emc_pmacro_cmd_pad_tx_ctrl_ab =
1652                 (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_PMACRO_CMD_PAD_TX_CTRL;
1653         u64 emc_pmacro_brick_ctrl_rfu1_ab =
1654                 (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_PMACRO_BRICK_CTRL_RFU1;
1655         u64 emc_pmacro_common_pad_tx_ctrl_ab =
1656                 (u64)IO_ADDRESS(TEGRA_EMC_BASE) + EMC_PMACRO_COMMON_PAD_TX_CTRL;
1657         u32 opt_war_200024907;
1658         u32 zq_wait_long;
1659         u32 zq_wait_short;
1660
1661         u32 tRTM;
1662         u32 RP_war;
1663         u32 R2P_war;
1664         u32 TRPab_war;
1665
1666         static u32 fsp_for_next_freq;
1667
1668         emc_cc_dbg(INFO, "Running clock change.");
1669         ccfifo_index = 0;
1670
1671         fake_timing = get_timing_from_freq(last_timing->rate);
1672
1673         fsp_for_next_freq = !fsp_for_next_freq;
1674
1675         dram_type = emc_readl(EMC_FBIO_CFG5) &
1676                 EMC_FBIO_CFG5_DRAM_TYPE_MASK >> EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
1677         shared_zq_resistor = last_timing->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX] &
1678                 1 << 31; /* needs def */
1679         channel_mode = !!(last_timing->burst_regs[EMC_FBIO_CFG7_INDEX] &
1680                           1 << 2); /* needs def */
1681         opt_zcal_en_cc = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] &&
1682                           !last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX]) ||
1683                           dram_type == DRAM_TYPE_LPDDR4;
1684         opt_dll_mode = (dram_type == DRAM_TYPE_DDR3) ?
1685                 get_dll_state(next_timing) : DLL_OFF;
1686         is_lpddr3 = (dram_type == DRAM_TYPE_LPDDR2) &&
1687                 next_timing->burst_regs[EMC_FBIO_CFG5_INDEX] &
1688                 1 << 25; /* needs def */
1689         opt_war_200024907 = (dram_type == DRAM_TYPE_LPDDR4);
1690         opt_dvfs_mode = MAN_SR;
1691         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1;
1692
1693         emc_cfg_reg = emc_readl(EMC_CFG);
1694         emc_auto_cal_config = emc_readl(EMC_AUTO_CAL_CONFIG);
1695
1696         source_clock_period = 1000000000 / last_timing->rate;
1697         destination_clock_period = 1000000000 / next_timing->rate;
1698
1699         tZQCAL_lpddr4_fc_adj = (source_clock_period > zqcal_before_cc_cutoff) ?
1700                 tZQCAL_lpddr4 / destination_clock_period :
1701                 (tZQCAL_lpddr4 - tFC_lpddr4) / destination_clock_period;
1702
1703         emc_dbg_o = emc_readl(EMC_DBG);
1704         emc_pin_o = emc_readl(EMC_PIN);
1705         emc_cfg_pipe_clk_o = emc_readl(EMC_CFG_PIPE_CLK);
1706         emc_dbg = emc_dbg_o;
1707
1708         emc_cfg = next_timing->burst_regs[EMC_CFG_INDEX];
1709         emc_cfg &= ~(EMC_CFG_DYN_SELF_REF | EMC_CFG_DRAM_ACPD |
1710                      EMC_CFG_DRAM_CLKSTOP_SR | EMC_CFG_DRAM_CLKSTOP_PD);
1711         emc_sel_dpd_ctrl = next_timing->emc_sel_dpd_ctrl;
1712         emc_sel_dpd_ctrl &= ~(EMC_SEL_DPD_CTRL_CLK_SEL_DPD_EN |
1713                               EMC_SEL_DPD_CTRL_CA_SEL_DPD_EN |
1714                               EMC_SEL_DPD_CTRL_RESET_SEL_DPD_EN |
1715                               EMC_SEL_DPD_CTRL_ODT_SEL_DPD_EN |
1716                               EMC_SEL_DPD_CTRL_DATA_SEL_DPD_EN);
1717
1718         emc_cc_dbg(INFO, "Clock change version: %d\n",
1719                    DVFS_CLOCK_CHANGE_VERSION);
1720         emc_cc_dbg(INFO, "DRAM type = %d\n", dram_type);
1721         emc_cc_dbg(INFO, "DRAM dev #: %d\n", dram_dev_num);
1722         emc_cc_dbg(INFO, "Next EMC clksrc: 0x%08x\n", clksrc);
1723         emc_cc_dbg(INFO, "DLL clksrc:      0x%08x\n", next_timing->dll_clk_src);
1724         emc_cc_dbg(INFO, "last rate: %lu, next rate %lu\n", last_timing->rate,
1725                    next_timing->rate);
1726         emc_cc_dbg(INFO, "last period: %u, next period: %u\n",
1727                    source_clock_period, destination_clock_period);
1728         emc_cc_dbg(INFO, "  shared_zq_resistor: %d\n", !!shared_zq_resistor);
1729         emc_cc_dbg(INFO, "  channel_mode: %d\n", channel_mode);
1730         emc_cc_dbg(INFO, "  opt_dll_mode: %d\n", opt_dll_mode);
1731
1732         /* Step 1:
1733          *   Pre DVFS SW sequence.
1734          */
1735         emc_cc_dbg(STEPS, "Step 1\n");
1736         emc_cc_dbg(SUB_STEPS, "Step 1.1: Bug 200024907 - Patch RP R2P");
1737
1738         if (opt_war_200024907) {
1739                 tRTM = fake_timing->dram_timing_regs[RL] +
1740                         div_o3(3600, source_clock_period) +
1741                         max_t(u32, div_o3(7500, source_clock_period), 8) + 16;
1742
1743                 emc_cc_dbg(INFO, "tRTM = %u, EMC_RP = %u\n", tRTM,
1744                            next_timing->burst_regs[EMC_RP_INDEX]);
1745
1746                 if (last_timing->burst_regs[EMC_RP_INDEX] < tRTM) {
1747                         if (tRTM > 63) {
1748                                 RP_war = 63;
1749                                 TRPab_war = 63;
1750                                 if ((tRTM - 63) >
1751                                     last_timing->burst_regs[EMC_R2P_INDEX])
1752                                         R2P_war = tRTM-63;
1753                                 else
1754                                         R2P_war = last_timing->
1755                                                 burst_regs[EMC_R2P_INDEX];
1756                         } else {
1757                                 R2P_war = last_timing->
1758                                         burst_regs[EMC_R2P_INDEX];
1759                                 RP_war = tRTM;
1760                                 if (last_timing->burst_regs[EMC_TRPAB_INDEX] <
1761                                     RP_war) {
1762                                         emc_cc_dbg(INFO, "Using RP_war\n");
1763                                         TRPab_war = RP_war;
1764                                 } else {
1765                                         emc_cc_dbg(INFO, "Not using RP_war\n");
1766                                         TRPab_war = last_timing->
1767                                                 burst_regs[EMC_TRPAB_INDEX];
1768                                 }
1769
1770                         }
1771
1772                         emc_writel(RP_war, EMC_RP);
1773                         emc_writel(R2P_war, EMC_R2P);
1774                         emc_writel(TRPab_war, EMC_TRPAB);
1775                         emc_timing_update(DUAL_CHANNEL);
1776                 } else {
1777                         emc_cc_dbg(INFO, "Skipped WAR for bug 200024907\n");
1778                 }
1779         }
1780
1781         emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
1782         emc_set_shadow_bypass(ACTIVE);
1783         emc_writel(emc_cfg, EMC_CFG);
1784         emc_writel(emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
1785         emc_writel(emc_cfg_pipe_clk_o | EMC_CFG_PIPE_CLK_CLK_ALWAYS_ON,
1786                    EMC_CFG_PIPE_CLK);
1787         emc_writel(next_timing->emc_fdpd_ctrl_cmd_no_ramp &
1788                    ~EMC_FDPD_CTRL_CMD_NO_RAMP_CMD_DPD_NO_RAMP_ENABLE,
1789                    EMC_FDPD_CTRL_CMD_NO_RAMP);
1790
1791         /* Check if we need to turn on VREF generator. */
1792         if ((((last_timing->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
1793                EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 0) &&
1794              ((next_timing->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
1795                EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF) == 1)) ||
1796             (((last_timing->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
1797                EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) == 0) &&
1798              ((next_timing->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX] &
1799                EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) == 1))) {
1800                 u32 pad_tx_ctrl =
1801                     next_timing->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
1802                 u32 last_pad_tx_ctrl =
1803                     last_timing->burst_regs[EMC_PMACRO_DATA_PAD_TX_CTRL_INDEX];
1804
1805                 next_dqs_e_ivref = pad_tx_ctrl &
1806                         EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF;
1807                 next_dq_e_ivref = pad_tx_ctrl &
1808                         EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF;
1809                 next_push = (last_pad_tx_ctrl &
1810                              ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_E_IVREF &
1811                              ~EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQS_E_IVREF) |
1812                         next_dq_e_ivref | next_dqs_e_ivref;
1813                 emc_writel(next_push, EMC_PMACRO_DATA_PAD_TX_CTRL);
1814                 udelay(30);
1815         }
1816
1817         /* Does this need to be before or after the 30us delay? */
1818         emc_set_shadow_bypass(ASSEMBLY);
1819
1820         /* Step 2:
1821          *   Prelock the DLL.
1822          */
1823         emc_cc_dbg(STEPS, "Step 2\n");
1824         if (next_timing->burst_regs[EMC_CFG_DIG_DLL_INDEX] &
1825             EMC_CFG_DIG_DLL_CFG_DLL_EN) {
1826                 emc_cc_dbg(INFO, "Prelock enabled for target frequency.\n");
1827                 dll_out = dll_prelock(next_timing, 0, clksrc);
1828                 emc_cc_dbg(INFO, "DLL out: 0x%03x\n", dll_out);
1829                 prelock_dll_en = 1;
1830         } else {
1831                 emc_cc_dbg(INFO, "Disabling DLL for target frequency.\n");
1832                 dll_disable(channel_mode);
1833         }
1834
1835         /* Step 3:
1836          *   Prepare autocal for the clock change.
1837          */
1838         emc_cc_dbg(STEPS, "Step 3\n");
1839         emc_auto_cal_config = next_timing->emc_auto_cal_config;
1840         auto_cal_en = emc_auto_cal_config & EMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
1841         emc_auto_cal_config &= ~EMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
1842         emc_auto_cal_config |=  EMC_AUTO_CAL_CONFIG_AUTO_CAL_MEASURE_STALL;
1843         emc_auto_cal_config |=  EMC_AUTO_CAL_CONFIG_AUTO_CAL_UPDATE_STALL;
1844         emc_auto_cal_config |=  auto_cal_en;
1845         emc_writel(emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
1846
1847         emc_set_shadow_bypass(ACTIVE);
1848         emc_writel(next_timing->emc_auto_cal_config2, EMC_AUTO_CAL_CONFIG2);
1849         emc_writel(next_timing->emc_auto_cal_config3, EMC_AUTO_CAL_CONFIG3);
1850         emc_writel(next_timing->emc_auto_cal_config4, EMC_AUTO_CAL_CONFIG4);
1851         emc_writel(next_timing->emc_auto_cal_config5, EMC_AUTO_CAL_CONFIG5);
1852         emc_writel(next_timing->emc_auto_cal_config6, EMC_AUTO_CAL_CONFIG6);
1853         emc_writel(next_timing->emc_auto_cal_config7, EMC_AUTO_CAL_CONFIG7);
1854         emc_writel(next_timing->emc_auto_cal_config8, EMC_AUTO_CAL_CONFIG8);
1855         emc_set_shadow_bypass(ASSEMBLY);
1856
1857         emc_auto_cal_config |= (EMC_AUTO_CAL_CONFIG_AUTO_CAL_COMPUTE_START |
1858                                 auto_cal_en);
1859         emc_writel(emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
1860
1861         /* Step 4:
1862          *   Update EMC_CFG. (??)
1863          */
1864         emc_cc_dbg(STEPS, "Step 4\n");
1865         if (source_clock_period > 50000 && dram_type == DRAM_TYPE_LPDDR4)
1866                 ccfifo_writel(1, EMC_SELF_REF, 0);
1867         else
1868                 emc_writel(next_timing->emc_cfg_2, EMC_CFG_2);
1869
1870         /* Step 5:
1871          *   Prepare reference variables for ZQCAL regs.
1872          */
1873         emc_cc_dbg(STEPS, "Step 5\n");
1874         emc_zcal_interval = 0;
1875         emc_zcal_wait_cnt_old =
1876                 last_timing->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX];
1877         emc_zcal_wait_cnt_new =
1878                 next_timing->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX];
1879         emc_zcal_wait_cnt_old &= ~EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK;
1880         emc_zcal_wait_cnt_new &= ~EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK;
1881
1882         if (dram_type == DRAM_TYPE_LPDDR4)
1883                 zq_wait_long = max((u32)1,
1884                                  div_o3(1000000, destination_clock_period));
1885         else if (dram_type == DRAM_TYPE_LPDDR2 || is_lpddr3)
1886                 zq_wait_long = max(next_timing->min_mrs_wait,
1887                                  div_o3(360000, destination_clock_period)) + 4;
1888         else if (dram_type == DRAM_TYPE_DDR3)
1889                 zq_wait_long = max((u32)256,
1890                                  div_o3(320000, destination_clock_period) + 2);
1891         else
1892                 zq_wait_long = 0;
1893
1894         if (dram_type == DRAM_TYPE_LPDDR2 || is_lpddr3)
1895                 zq_wait_short = max(max(next_timing->min_mrs_wait, (u32)6),
1896                                   div_o3(90000, destination_clock_period)) + 4;
1897         else if (dram_type == DRAM_TYPE_DDR3)
1898                 zq_wait_short = max((u32)64,
1899                                   div_o3(80000, destination_clock_period)) + 2;
1900         else
1901                 zq_wait_short = 0;
1902
1903         /* Step 6:
1904          *   Training code - removed.
1905          */
1906         emc_cc_dbg(STEPS, "Step 6\n");
1907
1908         /* Step 7:
1909          *   Program FSP reference registers and send MRWs to new FSPWR.
1910          */
1911         emc_cc_dbg(STEPS, "Step 7\n");
1912         if (!fsp_for_next_freq) {
1913                 mr13_flip_fspwr = (next_timing->emc_mrw3 & 0xffffff3f) | 0x80;
1914                 mr13_flip_fspop = (next_timing->emc_mrw3 & 0xffffff3f) | 0x00;
1915         } else {
1916                 mr13_flip_fspwr = (next_timing->emc_mrw3 & 0xffffff3f) | 0x40;
1917                 mr13_flip_fspop = (next_timing->emc_mrw3 & 0xffffff3f) | 0xc0;
1918         }
1919
1920         mr13_catr_enable = (mr13_flip_fspwr & 0xFFFFFFFE) | 0x01;
1921         if (dram_dev_num == TWO_RANK)
1922                 mr13_catr_enable =
1923                         (mr13_catr_enable & 0x3fffffff) | 0x80000000;
1924
1925         if (dram_type == DRAM_TYPE_LPDDR4) {
1926                 emc_writel(mr13_flip_fspwr, EMC_MRW3);
1927                 emc_writel(next_timing->emc_mrw, EMC_MRW);
1928                 emc_writel(next_timing->emc_mrw2, EMC_MRW2);
1929         }
1930
1931         /* Step 8:
1932          *   Program the shadow registers.
1933          */
1934         emc_cc_dbg(STEPS, "Step 8\n");
1935         emc_cc_dbg(SUB_STEPS, "Writing burst_regs\n");
1936         for (i = 0; i < next_timing->burst_regs_num; i++) {
1937                 u64 var;
1938                 u32 wval;
1939
1940                 if (!burst_reg_off[i])
1941                         continue;
1942
1943                 var = (u64)burst_reg_off[i];
1944                 wval = next_timing->burst_regs[i];
1945
1946                 if (dram_type != DRAM_TYPE_LPDDR4 &&
1947                     (var == emc_mrw6_ab      || var == emc_mrw7_ab ||
1948                      var == emc_mrw8_ab      || var == emc_mrw9_ab ||
1949                      var == emc_mrw10_ch0_ab || var == emc_mrw10_ch1_ab ||
1950                      var == emc_mrw11_ch0_ab || var == emc_mrw11_ch1_ab ||
1951                      var == emc_mrw12_ch0_ab || var == emc_mrw12_ch1_ab ||
1952                      var == emc_mrw13_ch0_ab || var == emc_mrw13_ch1_ab ||
1953                      var == emc_mrw14_ab     || var == emc_mrw15_ab ||
1954                      var == emc_training_ctrl_ab))
1955                         continue;
1956
1957                 /* Pain... And suffering. */
1958                 if (var == emc_cfg_ab) {
1959                         wval &= ~EMC_CFG_DRAM_ACPD;
1960                         wval &= ~EMC_CFG_DYN_SELF_REF;
1961                         if (dram_type == DRAM_TYPE_LPDDR4) {
1962                                 wval &= ~EMC_CFG_DRAM_CLKSTOP_SR;
1963                                 wval &= ~EMC_CFG_DRAM_CLKSTOP_PD;
1964                         }
1965                 } else if (var == emc_mrs_wait_cnt_ab &&
1966                            dram_type == DRAM_TYPE_LPDDR2 &&
1967                            opt_zcal_en_cc && !opt_cc_short_zcal &&
1968                            opt_short_zcal) {
1969                         wval = (wval & ~(EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK <<
1970                                          EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT)) |
1971                            ((zq_wait_long & EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) <<
1972                             EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
1973                 } else if (var == emc_zcal_wait_cnt_ab &&
1974                            dram_type == DRAM_TYPE_DDR3 && opt_zcal_en_cc &&
1975                            !opt_cc_short_zcal && opt_short_zcal) {
1976                         wval = (wval & ~(EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK <<
1977                                        EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_SHIFT)) |
1978                             ((zq_wait_long &
1979                               EMC_ZCAL_WAIT_CNT_ZCAL_WAIT_CNT_MASK) <<
1980                               EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT);
1981                 } else if (var == emc_zcal_interval_ab && opt_zcal_en_cc) {
1982                         wval = 0; /* EMC_ZCAL_INTERVAL reset value. */
1983                 } else if (var == emc_pmacro_autocal_cfg_common_ab) {
1984                         wval |= EMC_PMACRO_AUTOCAL_CFG_COMMON_E_CAL_BYPASS_DVFS;
1985                 } else if (var == emc_pmacro_data_pad_tx_ctrl_ab) {
1986                         wval &=
1987                              ~(EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSP_TX_E_DCC |
1988                                EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQSN_TX_E_DCC |
1989                                EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_DQ_TX_E_DCC |
1990                                EMC_PMACRO_DATA_PAD_TX_CTRL_DATA_CMD_TX_E_DCC);
1991                 } else if (var == emc_pmacro_cmd_pad_tx_ctrl_ab) {
1992                         wval |= EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_DRVFORCEON;
1993                         wval &= ~(EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSP_TX_E_DCC |
1994                                   EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQSN_TX_E_DCC |
1995                                   EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_DQ_TX_E_DCC |
1996                                   EMC_PMACRO_CMD_PAD_TX_CTRL_CMD_CMD_TX_E_DCC);
1997                 } else if (var == emc_pmacro_brick_ctrl_rfu1_ab) {
1998                         wval &= 0xf800f800;
1999                 } else if (var == emc_pmacro_common_pad_tx_ctrl_ab) {
2000                         wval &= 0xfffffff0;
2001                 }
2002
2003                 emc_cc_dbg(REG_LISTS, "(%u) 0x%08x => 0x%p\n",
2004                            i, wval, (void *)var);
2005                 __raw_writel(wval, (void __iomem *)var);
2006         }
2007
2008         /* Per channel burst registers. */
2009         emc_cc_dbg(SUB_STEPS, "Writing burst_regs_per_ch\n");
2010         for (i = 0; i < next_timing->burst_regs_per_ch_num; i++) {
2011                 if (!burst_perch_reg_off[i])
2012                         continue;
2013
2014                 if (dram_type != DRAM_TYPE_LPDDR4 &&
2015                     ((u64)burst_perch_reg_off[i] == emc_mrw6_ab ||
2016                      (u64)burst_perch_reg_off[i] == emc_mrw7_ab ||
2017                      (u64)burst_perch_reg_off[i] == emc_mrw8_ab ||
2018                      (u64)burst_perch_reg_off[i] == emc_mrw9_ab ||
2019                      (u64)burst_perch_reg_off[i] == emc_mrw10_ch0_ab ||
2020                      (u64)burst_perch_reg_off[i] == emc_mrw10_ch1_ab ||
2021                      (u64)burst_perch_reg_off[i] == emc_mrw11_ch0_ab ||
2022                      (u64)burst_perch_reg_off[i] == emc_mrw11_ch1_ab ||
2023                      (u64)burst_perch_reg_off[i] == emc_mrw12_ch0_ab ||
2024                      (u64)burst_perch_reg_off[i] == emc_mrw12_ch1_ab ||
2025                      (u64)burst_perch_reg_off[i] == emc_mrw13_ch0_ab ||
2026                      (u64)burst_perch_reg_off[i] == emc_mrw13_ch1_ab ||
2027                      (u64)burst_perch_reg_off[i] == emc_mrw14_ab ||
2028                      (u64)burst_perch_reg_off[i] == emc_mrw15_ab))
2029                         continue;
2030
2031                 /* Filter out second channel if not in DUAL_CHANNEL mode. */
2032                 if (channel_mode != DUAL_CHANNEL &&
2033                     (u64)burst_perch_reg_off[i] >=
2034                     (u64)IO_ADDRESS(TEGRA_EMC1_BASE))
2035                         continue;
2036
2037                 emc_cc_dbg(REG_LISTS, "(%u) 0x%08x => 0x%p\n",
2038                            i, next_timing->burst_regs_per_ch[i],
2039                            burst_perch_reg_off[i]);
2040                 __raw_writel(next_timing->burst_regs_per_ch[i],
2041                              burst_perch_reg_off[i]);
2042         }
2043
2044         /* Vref regs. */
2045         emc_cc_dbg(SUB_STEPS, "Writing vref_regs\n");
2046         for (i = 0; i < next_timing->vref_regs_num; i++) {
2047                 if (!vref_reg_off[i])
2048                         continue;
2049
2050                 if (channel_mode != DUAL_CHANNEL &&
2051                     (u64)vref_reg_off[i] >= (u64)IO_ADDRESS(TEGRA_EMC1_BASE))
2052                         continue;
2053
2054                 emc_cc_dbg(REG_LISTS, "(%u) 0x%08x => 0x%p\n",
2055                            i, next_timing->vref_regs[i], vref_reg_off[i]);
2056                 __raw_writel(next_timing->vref_regs[i], vref_reg_off[i]);
2057         }
2058
2059         /* Trimmers. */
2060         emc_cc_dbg(SUB_STEPS, "Writing trim_regs\n");
2061         for (i = 0; i < next_timing->trim_regs_num; i++) {
2062                 if (!trim_reg_off[i])
2063                         continue;
2064
2065                 emc_cc_dbg(REG_LISTS, "(%u) 0x%08x => 0x%p\n",
2066                            i, next_timing->trim_regs[i],
2067                            trim_reg_off[i]);
2068                 __raw_writel(next_timing->trim_regs[i], trim_reg_off[i]);
2069         }
2070
2071         /* Per channel trimmers. */
2072         emc_cc_dbg(SUB_STEPS, "Writing trim_regs_per_ch\n");
2073         for (i = 0; i < next_timing->trim_regs_per_ch_num; i++) {
2074                 if (!trim_perch_reg_off[i])
2075                         continue;
2076
2077                 if (channel_mode != DUAL_CHANNEL &&
2078                     (u64)vref_reg_off[i] >=
2079                     (u64)IO_ADDRESS(TEGRA_EMC1_BASE))
2080                         continue;
2081
2082                 emc_cc_dbg(REG_LISTS, "(%u) 0x%08x => 0x%p\n",
2083                            i, next_timing->trim_regs_per_ch[i],
2084                            trim_perch_reg_off[i]);
2085                 __raw_writel(next_timing->trim_regs_per_ch[i],
2086                              trim_perch_reg_off[i]);
2087         }
2088
2089         emc_cc_dbg(SUB_STEPS, "Writing burst_mc_regs\n");
2090         for (i = 0; i < next_timing->burst_mc_regs_num; i++) {
2091                 emc_cc_dbg(REG_LISTS, "(%u) 0x%08x => 0x%p\n",
2092                            i, next_timing->burst_mc_regs[i],
2093                            burst_mc_reg_off[i]);
2094                 __raw_writel(next_timing->burst_mc_regs[i],
2095                              burst_mc_reg_off[i]);
2096         }
2097
2098         /* Registers to be programmed on the faster clock. */
2099         if (next_timing->rate < last_timing->rate) {
2100                 emc_cc_dbg(SUB_STEPS, "Writing la_scale_regs\n");
2101                 for (i = 0; i < next_timing->la_scale_regs_num; i++) {
2102                         emc_cc_dbg(REG_LISTS, "(%u) 0x%08x => 0x%p\n",
2103                                    i, next_timing->la_scale_regs[i],
2104                                    la_scale_off_regs[i]);
2105                         __raw_writel(next_timing->la_scale_regs[i],
2106                                      la_scale_off_regs[i]);
2107                 }
2108         }
2109
2110         /* Flush all the burst register writes. */
2111         wmb();
2112
2113         /* Step 9:
2114          *   LPDDR4 section A.
2115          */
2116         emc_cc_dbg(STEPS, "Step 9\n");
2117         if (dram_type == DRAM_TYPE_LPDDR4) {
2118                 emc_writel(emc_zcal_interval, EMC_ZCAL_INTERVAL);
2119                 emc_writel(emc_zcal_wait_cnt_new, EMC_ZCAL_WAIT_CNT);
2120
2121                 emc_dbg |= (EMC_DBG_WRITE_MUX_ACTIVE |
2122                             EMC_DBG_WRITE_ACTIVE_ONLY);
2123
2124                 emc_writel(emc_dbg, EMC_DBG);
2125                 emc_writel(emc_zcal_interval, EMC_ZCAL_INTERVAL);
2126                 emc_writel(emc_dbg_o, EMC_DBG);
2127         }
2128
2129         /* Step 10:
2130          *   LPDDR4 and DDR3 common section.
2131          */
2132         emc_cc_dbg(STEPS, "Step 10\n");
2133         if (opt_dvfs_mode == MAN_SR || dram_type == DRAM_TYPE_LPDDR4) {
2134                 if (dram_type == DRAM_TYPE_LPDDR4)
2135                         ccfifo_writel(0x101, EMC_SELF_REF, 0);
2136                 else
2137                         ccfifo_writel(0x1, EMC_SELF_REF, 0);
2138
2139                 if (dram_type == DRAM_TYPE_LPDDR4 &&
2140                     source_clock_period <= zqcal_before_cc_cutoff) {
2141                         ccfifo_writel(mr13_flip_fspwr ^ 0x40, EMC_MRW3, 0);
2142                         ccfifo_writel((next_timing->burst_regs[EMC_MRW6_INDEX] &
2143                                        0xFFFF3F3F) |
2144                                       (last_timing->burst_regs[EMC_MRW6_INDEX] &
2145                                        0x0000C0C0), EMC_MRW6, 0);
2146                         ccfifo_writel(
2147                                 (next_timing->burst_regs[EMC_MRW14_INDEX] &
2148                                  0xFFFF0707) |
2149                                 (last_timing->burst_regs[EMC_MRW14_INDEX] &
2150                                  0x00003838), EMC_MRW14, 0);
2151
2152                         if (dram_dev_num == TWO_RANK) {
2153                                 ccfifo_writel(
2154                                       (next_timing->burst_regs[EMC_MRW7_INDEX] &
2155                                        0xFFFF3F3F) |
2156                                       (last_timing->burst_regs[EMC_MRW7_INDEX] &
2157                                        0x0000C0C0), EMC_MRW7, 0);
2158                                 ccfifo_writel(
2159                                      (next_timing->burst_regs[EMC_MRW15_INDEX] &
2160                                       0xFFFF0707) |
2161                                      (last_timing->burst_regs[EMC_MRW15_INDEX] &
2162                                       0x00003838), EMC_MRW15, 0);
2163                         }
2164                         if (opt_zcal_en_cc) {
2165                                 if (dram_dev_num == ONE_RANK)
2166                                         ccfifo_writel(
2167                                                 2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
2168                                                 EMC_ZQ_CAL_ZQ_CAL_CMD,
2169                                                 EMC_ZQ_CAL, 0);
2170                                 else if (shared_zq_resistor)
2171                                         ccfifo_writel(
2172                                                 2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
2173                                                 EMC_ZQ_CAL_ZQ_CAL_CMD,
2174                                                 EMC_ZQ_CAL, 0);
2175                                 else
2176                                         ccfifo_writel(EMC_ZQ_CAL_ZQ_CAL_CMD,
2177                                                      EMC_ZQ_CAL, 0);
2178                         }
2179                 }
2180         }
2181
2182         emc_dbg = emc_dbg_o;
2183         if (dram_type == DRAM_TYPE_LPDDR4) {
2184                 ccfifo_writel(mr13_flip_fspop | 0x8, EMC_MRW3,
2185                               (1000 * fake_timing->dram_timing_regs[T_RP]) /
2186                               source_clock_period);
2187                 ccfifo_writel(0, 0, tFC_lpddr4 / source_clock_period);
2188         }
2189
2190         if (dram_type == DRAM_TYPE_LPDDR4 || opt_dvfs_mode != MAN_SR) {
2191                 u32 t = 30 + (cya_allow_ref_cc ?
2192                         (4000 * fake_timing->dram_timing_regs[T_RFC]) +
2193                         ((1000 * fake_timing->dram_timing_regs[T_RP]) /
2194                          source_clock_period) : 0);
2195
2196                 ccfifo_writel(emc_pin_o & ~(EMC_PIN_PIN_CKE_PER_DEV |
2197                                             EMC_PIN_PIN_CKEB | EMC_PIN_PIN_CKE),
2198                               EMC_PIN, t);
2199         }
2200
2201         ref_delay_mult = 1;
2202         ref_b4_sref_en = 0;
2203         cya_issue_pc_ref = 0;
2204
2205         ref_delay_mult += ref_b4_sref_en   ? 1 : 0;
2206         ref_delay_mult += cya_allow_ref_cc ? 1 : 0;
2207         ref_delay_mult += cya_issue_pc_ref ? 1 : 0;
2208         ref_delay = ref_delay_mult *
2209                 ((1000 * fake_timing->dram_timing_regs[T_RP]
2210                   / source_clock_period) +
2211                  (1000 * fake_timing->dram_timing_regs[T_RFC] /
2212                   source_clock_period)) + 20;
2213
2214         /* Step 11:
2215          *   Ramp down.
2216          */
2217         emc_cc_dbg(STEPS, "Step 11\n");
2218         ccfifo_writel(0x0, EMC_CFG_SYNC,
2219                       dram_type == DRAM_TYPE_LPDDR4 ? 0 : ref_delay);
2220
2221         emc_dbg_active = emc_dbg | (EMC_DBG_WRITE_MUX_ACTIVE | /* Redundant. */
2222                                     EMC_DBG_WRITE_ACTIVE_ONLY);
2223         ccfifo_writel(emc_dbg_active, EMC_DBG, 0);
2224
2225         /* Todo: implement do_dvfs_power_ramp_down */
2226         ramp_down_wait = do_dvfs_power_ramp_down(source_clock_period, 0,
2227                                                  last_timing, next_timing);
2228
2229         /* Step 12:
2230          *   And finally - trigger the clock change.
2231          */
2232         emc_cc_dbg(STEPS, "Step 12\n");
2233         ccfifo_writel(1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE, 0);
2234         emc_dbg_active &= ~EMC_DBG_WRITE_ACTIVE_ONLY;
2235         ccfifo_writel(emc_dbg_active, EMC_DBG, 0);
2236
2237         /* Step 13:
2238          *   Ramp up.
2239          */
2240         /* Todo: implement do_dvfs_power_ramp_up(). */
2241         emc_cc_dbg(STEPS, "Step 13\n");
2242         ramp_up_wait = do_dvfs_power_ramp_up(destination_clock_period, 0,
2243                                              last_timing, next_timing);
2244         ccfifo_writel(emc_dbg, EMC_DBG, 0);
2245
2246         /* Step 14:
2247          *   Bringup CKE pins.
2248          */
2249         emc_cc_dbg(STEPS, "Step 14\n");
2250         if (dram_type == DRAM_TYPE_LPDDR4) {
2251                 u32 r = emc_pin_o | EMC_PIN_PIN_CKE;
2252                 if (dram_dev_num == TWO_RANK)
2253                         ccfifo_writel(r | EMC_PIN_PIN_CKEB |
2254                                       EMC_PIN_PIN_CKE_PER_DEV, EMC_PIN,
2255                                       0);
2256                 else
2257                         ccfifo_writel(r & ~(EMC_PIN_PIN_CKEB |
2258                                             EMC_PIN_PIN_CKE_PER_DEV),
2259                                       EMC_PIN, 0);
2260         }
2261
2262         /* Step 15: (two step 15s ??)
2263          *   Calculate zqlatch wait time; has dependency on ramping times.
2264          */
2265         emc_cc_dbg(STEPS, "Step 15\n");
2266
2267         if (source_clock_period <= zqcal_before_cc_cutoff)
2268                 zq_latch_dvfs_wait_time =
2269                         (tZQCAL_lpddr4_fc_adj - (ramp_up_wait + ramp_down_wait))
2270                         / destination_clock_period;
2271         else
2272                 zq_latch_dvfs_wait_time = tZQCAL_lpddr4_fc_adj -
2273                         div_o3(1000 * next_timing->dram_timing_regs[T_PDEX],
2274                                destination_clock_period);
2275
2276         emc_cc_dbg(INFO, "tZQCAL_lpddr4_fc_adj = %u\n", tZQCAL_lpddr4_fc_adj);
2277         emc_cc_dbg(INFO, "destination_clock_period = %u\n",
2278                    destination_clock_period);
2279         emc_cc_dbg(INFO, "next_timing->dram_timing_regs[T_PDEX] = %u\n",
2280                    next_timing->dram_timing_regs[T_PDEX]);
2281         emc_cc_dbg(INFO, "zq_latch_dvfs_wait_time = %u\n",
2282                    zq_latch_dvfs_wait_time);
2283
2284         if (dram_type == DRAM_TYPE_LPDDR4 && opt_zcal_en_cc) {
2285                 if (dram_dev_num == ONE_RANK) {
2286                         if (source_clock_period > zqcal_before_cc_cutoff)
2287                                 ccfifo_writel(2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
2288                                    EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
2289                                    div_o3(1000 *
2290                                           next_timing->dram_timing_regs[T_PDEX],
2291                                           destination_clock_period));
2292                         ccfifo_writel((mr13_flip_fspop & 0xFFFFFFF7) |
2293                                    0x0C000000, EMC_MRW3,
2294                                    div_o3(1000 *
2295                                           next_timing->dram_timing_regs[T_PDEX],
2296                                           destination_clock_period));
2297                         ccfifo_writel(EMC_SELF_REF_ACTIVE_SELF_REF,
2298                                       EMC_SELF_REF, 0);
2299                         ccfifo_writel(0, EMC_REF, 0);
2300                         ccfifo_writel(2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
2301                                       EMC_ZQ_CAL_ZQ_LATCH_CMD,
2302                                       EMC_ZQ_CAL, zq_latch_dvfs_wait_time);
2303                 } else if (shared_zq_resistor) {
2304                         if (source_clock_period > zqcal_before_cc_cutoff)
2305                                 ccfifo_writel(2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
2306                                    EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
2307                                    div_o3(1000 *
2308                                           next_timing->dram_timing_regs[T_PDEX],
2309                                           destination_clock_period));
2310
2311                         ccfifo_writel(2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
2312                                   EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
2313                                   zq_latch_dvfs_wait_time +
2314                                   div_o3(1000 *
2315                                          next_timing->dram_timing_regs[T_PDEX],
2316                                          destination_clock_period));
2317                         ccfifo_writel(1 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
2318                                       EMC_ZQ_CAL_ZQ_LATCH_CMD,
2319                                       EMC_ZQ_CAL, 0);
2320
2321                         ccfifo_writel((mr13_flip_fspop & 0xfffffff7) |
2322                                       0x0c000000, EMC_MRW3, 0);
2323                         ccfifo_writel(EMC_SELF_REF_ACTIVE_SELF_REF,
2324                                       EMC_SELF_REF, 0);
2325                         ccfifo_writel(0, EMC_REF, 0);
2326
2327                         ccfifo_writel(1 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
2328                                       EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
2329                                       tZQCAL_lpddr4 / destination_clock_period);
2330                 } else {
2331                         if (source_clock_period > zqcal_before_cc_cutoff) {
2332                                 ccfifo_writel(EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL,
2333                                    div_o3(1000 *
2334                                           next_timing->dram_timing_regs[T_PDEX],
2335                                           destination_clock_period));
2336                         }
2337
2338                         ccfifo_writel((mr13_flip_fspop & 0xfffffff7) |
2339                                    0x0c000000, EMC_MRW3,
2340                                    div_o3(1000 *
2341                                           next_timing->dram_timing_regs[T_PDEX],
2342                                           destination_clock_period));
2343                         ccfifo_writel(EMC_SELF_REF_ACTIVE_SELF_REF,
2344                                       EMC_SELF_REF, 0);
2345                         ccfifo_writel(0, EMC_REF, 0);
2346
2347                         ccfifo_writel(EMC_ZQ_CAL_ZQ_LATCH_CMD, EMC_ZQ_CAL,
2348                                       zq_latch_dvfs_wait_time);
2349                 }
2350         }
2351
2352         /* WAR: delay for zqlatch */
2353         ccfifo_writel(0, 0, 10);
2354
2355         /* Step 16:
2356          *   LPDDR4 Conditional Training Kickoff. Removed.
2357          */
2358
2359         /* Step 17:
2360          *   MANSR exit self refresh.
2361          */
2362         emc_cc_dbg(STEPS, "Step 17\n");
2363         if (opt_dvfs_mode == MAN_SR && dram_type != DRAM_TYPE_LPDDR4)
2364                 ccfifo_writel(0, EMC_SELF_REF, 0);
2365
2366         /* Step 18:
2367          *   Send MRWs to LPDDR3/DDR3.
2368          */
2369         emc_cc_dbg(STEPS, "Step 18\n");
2370         if (dram_type == DRAM_TYPE_LPDDR2) {
2371                 ccfifo_writel(next_timing->emc_mrw2, EMC_MRW2, 0);
2372                 ccfifo_writel(next_timing->emc_mrw,  EMC_MRW,  0);
2373                 if (is_lpddr3)
2374                         ccfifo_writel(next_timing->emc_mrw4, EMC_MRW4, 0);
2375         } else if (dram_type == DRAM_TYPE_DDR3) {
2376                 if (opt_dll_mode == DLL_ON)
2377                         ccfifo_writel(next_timing->emc_emrs &
2378                                       ~EMC_EMRS_USE_EMRS_LONG_CNT, EMC_EMRS, 0);
2379                 ccfifo_writel(next_timing->emc_emrs2 &
2380                               ~EMC_EMRS2_USE_EMRS2_LONG_CNT, EMC_EMRS2, 0);
2381                 ccfifo_writel(next_timing->emc_mrs |
2382                               EMC_EMRS_USE_EMRS_LONG_CNT, EMC_MRS, 0);
2383         }
2384
2385         /* Step 19:
2386          *   ZQCAL for LPDDR3/DDR3
2387          */
2388         emc_cc_dbg(STEPS, "Step 19\n");
2389         if (opt_zcal_en_cc) {
2390                 if (dram_type == DRAM_TYPE_LPDDR2) {
2391                         u32 r;
2392
2393                         zq_op = opt_cc_short_zcal  ? 0x56 : 0xAB;
2394                         zcal_wait_time_ps = opt_cc_short_zcal  ? 90000 : 360000;
2395                         zcal_wait_time_clocks = div_o3(zcal_wait_time_ps,
2396                                                     destination_clock_period);
2397                         r = zcal_wait_time_clocks <<
2398                                 EMC_MRS_WAIT_CNT2_MRS_EXT2_WAIT_CNT_SHIFT |
2399                                 zcal_wait_time_clocks <<
2400                                 EMC_MRS_WAIT_CNT2_MRS_EXT1_WAIT_CNT_SHIFT;
2401                         ccfifo_writel(r, EMC_MRS_WAIT_CNT2, 0);
2402                         ccfifo_writel(2 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
2403                                       EMC_MRW_USE_MRW_EXT_CNT |
2404                                       10 << EMC_MRW_MRW_MA_SHIFT |
2405                                       zq_op << EMC_MRW_MRW_OP_SHIFT,
2406                                       EMC_MRW, 0);
2407                         if (dram_dev_num == TWO_RANK) {
2408                                 r = 1 << EMC_MRW_MRW_DEV_SELECTN_SHIFT |
2409                                         EMC_MRW_USE_MRW_EXT_CNT |
2410                                         10 << EMC_MRW_MRW_MA_SHIFT |
2411                                         zq_op << EMC_MRW_MRW_OP_SHIFT;
2412                                 ccfifo_writel(r, EMC_MRW, 0);
2413                         }
2414                 } else if (dram_type == DRAM_TYPE_DDR3) {
2415                         zq_op = opt_cc_short_zcal ? 0 : EMC_ZQ_CAL_LONG;
2416                         ccfifo_writel(zq_op | 2 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
2417                                       EMC_ZQ_CAL_ZQ_CAL_CMD, EMC_ZQ_CAL, 0);
2418                         if (dram_dev_num == TWO_RANK)
2419                                 ccfifo_writel(zq_op |
2420                                               1 << EMC_ZQ_CAL_DEV_SEL_SHIFT |
2421                                               EMC_ZQ_CAL_ZQ_CAL_CMD,
2422                                               EMC_ZQ_CAL, 0);
2423                 }
2424         }
2425
2426         /* Step 20:
2427          *   Issue ref and optional QRST.
2428          */
2429         emc_cc_dbg(STEPS, "Step 20\n");
2430         if (dram_type != DRAM_TYPE_LPDDR4)
2431                 ccfifo_writel(0, EMC_REF, 0);
2432
2433         if (opt_do_sw_qrst) {
2434                 ccfifo_writel(1, EMC_ISSUE_QRST, 0);
2435                 ccfifo_writel(0, EMC_ISSUE_QRST, 2);
2436         }
2437
2438         /* Step 21:
2439          *   Restore ZCAL and ZCAL interval.
2440          */
2441         emc_cc_dbg(STEPS, "Step 21\n");
2442         if (save_restore_clkstop_pd || opt_zcal_en_cc) {
2443                 ccfifo_writel(emc_dbg_o | EMC_DBG_WRITE_MUX_ACTIVE, EMC_DBG, 0);
2444                 if (opt_zcal_en_cc && dram_type != DRAM_TYPE_LPDDR4)
2445                         ccfifo_writel(next_timing->
2446                                       burst_regs[EMC_ZCAL_INTERVAL_INDEX],
2447                                       EMC_ZCAL_INTERVAL, 0);
2448
2449                 if (save_restore_clkstop_pd)
2450                         ccfifo_writel(next_timing->burst_regs[EMC_CFG_INDEX] &
2451                                       ~EMC_CFG_DYN_SELF_REF, EMC_CFG, 0);
2452                 ccfifo_writel(emc_dbg_o, EMC_DBG, 0);
2453         }
2454
2455         /* Step 22:
2456          *   Restore EMC_CFG_PIPE_CLK.
2457          */
2458         emc_cc_dbg(STEPS, "Step 22\n");
2459         ccfifo_writel(emc_cfg_pipe_clk_o, EMC_CFG_PIPE_CLK, 0);
2460
2461         /* Step 23:
2462          */
2463         emc_cc_dbg(STEPS, "Step 23\n");
2464
2465         /* Fix: rename tmp to something meaningful. */
2466         tmp = emc_readl(EMC_CFG_DIG_DLL);
2467         tmp |= EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_TRAFFIC;
2468         tmp &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_RW_UNTIL_LOCK;
2469         tmp &= ~EMC_CFG_DIG_DLL_CFG_DLL_STALL_ALL_UNTIL_LOCK;
2470         tmp = (tmp & ~EMC_CFG_DIG_DLL_CFG_DLL_MODE_MASK) |
2471                 (2 << EMC_CFG_DIG_DLL_CFG_DLL_MODE_SHIFT);
2472         emc_writel(tmp, EMC_CFG_DIG_DLL);
2473
2474         /* Clock change. Woot. BUG()s out if something fails. */
2475         do_clock_change(clksrc);
2476
2477         /* Step 24:
2478          *   Save training results. Removed.
2479          */
2480
2481         /* Step 25:
2482          *   Program MC updown registers.
2483          */
2484         emc_cc_dbg(STEPS, "Step 25\n");
2485
2486         if (next_timing->rate > last_timing->rate) {
2487                 for (i = 0; i < next_timing->la_scale_regs_num; i++)
2488                         __raw_writel(next_timing->la_scale_regs[i],
2489                                      la_scale_off_regs[i]);
2490                 emc_timing_update(0);
2491         }
2492
2493         /* Step 26:
2494          *   Restore ZCAL registers.
2495          */
2496         emc_cc_dbg(STEPS, "Step 26\n");
2497         if (dram_type == DRAM_TYPE_LPDDR4) {
2498                 emc_set_shadow_bypass(ACTIVE);
2499                 emc_writel(next_timing->burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
2500                            EMC_ZCAL_WAIT_CNT);
2501                 emc_writel(next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX],
2502                            EMC_ZCAL_INTERVAL);
2503                 emc_set_shadow_bypass(ASSEMBLY);
2504         }
2505
2506         if (dram_type != DRAM_TYPE_LPDDR4 &&
2507             opt_zcal_en_cc && !opt_short_zcal && opt_cc_short_zcal) {
2508                 udelay(2);
2509
2510                 emc_set_shadow_bypass(ACTIVE);
2511                 if (dram_type == DRAM_TYPE_LPDDR2)
2512                         emc_writel(next_timing->
2513                                   burst_regs[EMC_MRS_WAIT_CNT_INDEX],
2514                                   EMC_MRS_WAIT_CNT);
2515                 else if (dram_type == DRAM_TYPE_DDR3)
2516                         emc_writel(next_timing->
2517                                    burst_regs[EMC_ZCAL_WAIT_CNT_INDEX],
2518                                    EMC_ZCAL_WAIT_CNT);
2519                 emc_set_shadow_bypass(ASSEMBLY);
2520         }
2521
2522         /* Step 27:
2523          *   Restore EMC_CFG, FDPD registers.
2524          */
2525         emc_cc_dbg(STEPS, "Step 27\n");
2526         emc_set_shadow_bypass(ACTIVE);
2527         emc_writel(next_timing->burst_regs[EMC_CFG_INDEX], EMC_CFG);
2528         emc_set_shadow_bypass(ASSEMBLY);
2529         emc_writel(next_timing->emc_fdpd_ctrl_cmd_no_ramp,
2530                    EMC_FDPD_CTRL_CMD_NO_RAMP);
2531         emc_writel(next_timing->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
2532
2533         /* Step 28:
2534          *   Training recover. Removed.
2535          */
2536         emc_cc_dbg(STEPS, "Step 28\n");
2537
2538         emc_set_shadow_bypass(ACTIVE);
2539         emc_writel(next_timing->burst_regs[EMC_PMACRO_AUTOCAL_CFG_COMMON_INDEX],
2540                    EMC_PMACRO_AUTOCAL_CFG_COMMON);
2541         emc_set_shadow_bypass(ASSEMBLY);
2542
2543         /* Step 29:
2544          *   Power fix WAR.
2545          */
2546         emc_cc_dbg(STEPS, "Step 29\n");
2547         emc_writel(EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE0 |
2548                    EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE1 |
2549                    EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE2 |
2550                    EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE3 |
2551                    EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE4 |
2552                    EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE5 |
2553                    EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE6 |
2554                    EMC_PMACRO_CFG_PM_GLOBAL_0_DISABLE_CFG_BYTE7,
2555                    EMC_PMACRO_CFG_PM_GLOBAL_0);
2556         emc_writel(EMC_PMACRO_TRAINING_CTRL_0_CH0_TRAINING_E_WRPTR,
2557                    EMC_PMACRO_TRAINING_CTRL_0);
2558         emc_writel(EMC_PMACRO_TRAINING_CTRL_1_CH1_TRAINING_E_WRPTR,
2559                    EMC_PMACRO_TRAINING_CTRL_1);
2560         emc_writel(0, EMC_PMACRO_CFG_PM_GLOBAL_0);
2561
2562         /* Step 30:
2563          *   Re-enable autocal.
2564          */
2565         emc_cc_dbg(STEPS, "Step 30\n");
2566         emc_auto_cal_config = next_timing->emc_auto_cal_config;
2567         emc_writel(emc_auto_cal_config, EMC_AUTO_CAL_CONFIG);
2568
2569         /* Step 31:
2570          *   Restore FSP to account for switch back. Only needed in training.
2571          */
2572         emc_cc_dbg(STEPS, "Step 31\n");
2573
2574         /* Done! Yay. */
2575 }
2576
2577 static inline void emc_get_timing(struct tegra21_emc_table *timing)
2578 {
2579         int i;
2580
2581         /* Burst updates depends on previous state; burst_up_down are
2582          * stateless. */
2583         for (i = 0; i < timing->burst_regs_num; i++) {
2584                 if (burst_reg_off[i])
2585                         timing->burst_regs[i] = __raw_readl(burst_reg_off[i]);
2586                 else
2587                         timing->burst_regs[i] = 0;
2588         }
2589
2590         for (i = 0; i < timing->burst_regs_per_ch_num; i++)
2591                 timing->burst_regs_per_ch[i] =
2592                         __raw_readl(burst_perch_reg_off[i]);
2593
2594         for (i = 0; i < timing->trim_regs_num; i++)
2595                 timing->trim_regs[i] = __raw_readl(trim_reg_off[i]);
2596
2597         for (i = 0; i < timing->trim_regs_per_ch_num; i++)
2598                 timing->trim_regs_per_ch[i] =
2599                         __raw_readl(trim_perch_reg_off[i]);
2600
2601         for (i = 0; i < timing->vref_regs_num; i++)
2602                 timing->vref_regs[i] = __raw_readl(vref_reg_off[i]);
2603
2604         for (i = 0; i < timing->burst_mc_regs_num; i++)
2605                 timing->burst_mc_regs[i] = __raw_readl(burst_mc_reg_off[i]);
2606
2607         for (i = 0; i < timing->la_scale_regs_num; i++)
2608                 timing->la_scale_regs[i] = __raw_readl(la_scale_off_regs[i]);
2609
2610         /* TODO: fill in necessary table registers. */
2611
2612         timing->rate = clk_get_rate_locked(emc) / 1000;
2613 }
2614
2615 /* FIXME: expose latency interface */
2616 u32 tegra21_get_dvfs_clk_change_latency_nsec(unsigned long emc_freq_khz)
2617 {
2618         int i;
2619
2620         if (!tegra_emc_table)
2621                 goto default_val;
2622
2623         if (emc_freq_khz > tegra_emc_table[tegra_emc_table_size - 1].rate) {
2624                 i = tegra_emc_table_size - 1;
2625                 if (tegra_emc_table[i].clock_change_latency != 0)
2626                         return tegra_emc_table[i].clock_change_latency;
2627                 else
2628                         goto default_val;
2629         }
2630
2631         for (i = get_start_idx(emc_freq_khz); i < tegra_emc_table_size; i++) {
2632                 if (tegra_emc_table[i].rate == emc_freq_khz)
2633                         break;
2634
2635                 if (tegra_emc_table[i].rate > emc_freq_khz) {
2636                         /* emc_freq_khz was not found in the emc table. Use the
2637                            DVFS latency value of the EMC frequency just below
2638                            emc_freq_khz. */
2639                         i--;
2640                         break;
2641                 }
2642         }
2643
2644         if (tegra_emc_table[i].clock_change_latency != 0)
2645                 return tegra_emc_table[i].clock_change_latency;
2646
2647 default_val:
2648         /* The DVFS clock change latency value couldn't be found. Use
2649            a default value. */
2650         WARN_ONCE(1, "%s: Couldn't find DVFS clock change latency "
2651                         "value - using default value\n",
2652                 __func__);
2653         return 2000;
2654 }
2655
2656 static const struct tegra21_emc_table *emc_get_table(
2657         unsigned long over_temp_state)
2658 {
2659         if ((over_temp_state == DRAM_OVER_TEMP_THROTTLE) &&
2660             (tegra_emc_table_derated != NULL))
2661                 return tegra_emc_table_derated;
2662         else
2663                 return tegra_emc_table;
2664 }
2665
2666 /* The EMC registers have shadow registers. When the EMC clock is updated
2667  * in the clock controller, the shadow registers are copied to the active
2668  * registers, allowing glitchless memory bus frequency changes.
2669  * This function updates the shadow registers for a new clock frequency,
2670  * and relies on the clock lock on the emc clock to avoid races between
2671  * multiple frequency changes. In addition access lock prevents concurrent
2672  * access to EMC registers from reading MRR registers */
2673 int tegra_emc_set_rate_on_parent(unsigned long rate, struct clk *p)
2674 {
2675         int i;
2676         u32 clk_setting;
2677         const struct tegra21_emc_table *last_timing;
2678         const struct tegra21_emc_table *current_table;
2679         unsigned long flags;
2680         s64 last_change_delay;
2681         struct emc_sel *sel;
2682
2683         if (!tegra_emc_table)
2684                 return -EINVAL;
2685
2686         /* Table entries specify rate in kHz */
2687         rate = rate / 1000;
2688
2689         i = get_start_idx(rate);
2690         for (; i < tegra_emc_table_size; i++) {
2691                 if (tegra_emc_clk_sel[i].input == NULL)
2692                         continue;       /* invalid entry */
2693
2694                 if (tegra_emc_table[i].rate == rate)
2695                         break;
2696         }
2697
2698         if (i >= tegra_emc_table_size)
2699                 return -EINVAL;
2700
2701         if (!emc_timing) {
2702                 /* can not assume that boot timing matches dfs table even
2703                    if boot frequency matches one of the table nodes */
2704                 emc_get_timing(&start_timing);
2705                 last_timing = &start_timing;
2706         } else
2707                 last_timing = emc_timing;
2708
2709         /* Select settings of matching pll_m(b) */
2710         sel = &tegra_emc_clk_sel[i];
2711         clk_setting = (p == sel->input) ?
2712                 sel->value : tegra_emc_clk_sel_b[i].value;
2713
2714         if (!timekeeping_suspended) {
2715                 last_change_delay = ktime_us_delta(ktime_get(), clkchange_time);
2716                 if ((last_change_delay >= 0) &&
2717                     (last_change_delay < clkchange_delay))
2718                         udelay(clkchange_delay - (int)last_change_delay);
2719         }
2720
2721         spin_lock_irqsave(&emc_access_lock, flags);
2722         /* Pick EMC table based on the status of the over temp state flag */
2723         current_table = emc_get_table(dram_over_temp_state);
2724         emc_set_clock(&current_table[i], last_timing, 0, clk_setting);
2725         clkchange_time = timekeeping_suspended ? clkchange_time : ktime_get();
2726         emc_timing = &current_table[i];
2727         tegra_mc_divider_update(emc);
2728         spin_unlock_irqrestore(&emc_access_lock, flags);
2729
2730         emc_last_stats_update(i);
2731
2732         pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
2733
2734         return 0;
2735 }
2736
2737 long tegra_emc_round_rate_updown(unsigned long rate, bool up)
2738 {
2739         int i;
2740         unsigned long table_rate;
2741
2742         if (!tegra_emc_table)
2743                 return clk_get_rate_locked(emc); /* no table - no rate change */
2744
2745         if (!emc_enable)
2746                 return -EINVAL;
2747
2748         pr_debug("%s: %lu\n", __func__, rate);
2749
2750         /* Table entries specify rate in kHz */
2751         rate = rate / 1000;
2752
2753         i = get_start_idx(rate);
2754         for (; i < tegra_emc_table_size; i++) {
2755                 if (tegra_emc_clk_sel[i].input == NULL)
2756                         continue;       /* invalid entry */
2757
2758                 table_rate = tegra_emc_table[i].rate;
2759                 if (table_rate >= rate) {
2760                         if (!up && i && (table_rate > rate)) {
2761                                 i--;
2762                                 table_rate = tegra_emc_table[i].rate;
2763                         }
2764                         pr_debug("%s: using %lu\n", __func__, table_rate);
2765                         last_round_idx = i;
2766                         return table_rate * 1000;
2767                 }
2768         }
2769
2770         return -EINVAL;
2771 }
2772
2773 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
2774 {
2775         int i;
2776         unsigned long pll_rate;
2777         struct clk *p, *p_new;
2778
2779         if (!tegra_emc_table) {
2780                 if (rate == clk_get_rate_locked(emc)) {
2781                         *div_value = emc->div - 2;
2782                         return emc->parent;
2783                 }
2784                 return NULL;
2785         }
2786
2787         pr_debug("%s: %lu\n", __func__, rate);
2788
2789         /* Table entries specify rate in kHz */
2790         rate = rate / 1000;
2791
2792         i = get_start_idx(rate);
2793         for (; i < tegra_emc_table_size; i++) {
2794                 if (tegra_emc_table[i].rate == rate) {
2795                         p_new = tegra_emc_clk_sel[i].input;
2796                         if (!p_new)
2797                                 continue;
2798
2799                         pll_rate = tegra_emc_clk_sel[i].input_rate;
2800                         *div_value = (tegra_emc_clk_sel[i].value &
2801                                       EMC_CLK_EMC_2X_CLK_DIVISOR_MASK) >>
2802                                 EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT;
2803
2804                         /*
2805                          * pll_m/pll_mb ping-pong:
2806                          * - select current parent when its rate matches table
2807                          * - select pll_m or pll_mb, when it is not current
2808                          *   parent; set pll rate if it is not matching table
2809                          */
2810                         p = clk_get_parent(emc);
2811                         if (pll_rate == clk_get_rate(p))
2812                                 return p;
2813
2814                         if (p_new != p) {
2815                                 int ret = 0;
2816                                 if (pll_rate != clk_get_rate(p_new))
2817                                         ret = clk_set_rate(p_new, pll_rate);
2818                                 if (!ret)
2819                                         return p_new;
2820                         }
2821
2822                         p_new = tegra_emc_clk_sel_b[i].input;
2823                         if (p_new != p) {
2824                                 if (pll_rate != clk_get_rate(p_new)) {
2825                                         if (clk_set_rate(p_new, pll_rate))
2826                                                 return NULL;
2827                                 }
2828                                 return p_new;
2829                         }
2830                 }
2831         }
2832         return NULL;
2833 }
2834
2835 static inline const struct clk_mux_sel *get_emc_input(u32 val)
2836 {
2837         const struct clk_mux_sel *sel;
2838
2839         for (sel = emc->inputs; sel->input != NULL; sel++) {
2840                 if (sel->value == val)
2841                         break;
2842         }
2843         return sel;
2844 }
2845
2846 static int find_matching_input(const struct tegra21_emc_table *table,
2847         struct clk *pll_m, struct clk *pll_mb, int sel_idx)
2848 {
2849         u32 div_value = (table->src_sel_reg &
2850                          EMC_CLK_EMC_2X_CLK_DIVISOR_MASK) >>
2851                 EMC_CLK_EMC_2X_CLK_DIVISOR_SHIFT;
2852         u32 src_value = (table->src_sel_reg & EMC_CLK_EMC_2X_CLK_SRC_MASK) >>
2853                 EMC_CLK_EMC_2X_CLK_SRC_SHIFT;
2854
2855         unsigned long input_rate = 0;
2856         unsigned long table_rate = table->rate * 1000; /* table rate in kHz */
2857         struct emc_sel *emc_clk_sel = &tegra_emc_clk_sel[sel_idx];
2858         struct emc_sel *emc_clk_sel_b = &tegra_emc_clk_sel_b[sel_idx];
2859         const struct clk_mux_sel *sel = get_emc_input(src_value);
2860
2861         if (div_value & 0x1) {
2862                 pr_warn("tegra: invalid odd divider for EMC rate %lu\n",
2863                         table_rate);
2864                 return -EINVAL;
2865         }
2866         if (!sel->input) {
2867                 pr_warn("tegra: no matching input found for EMC rate %lu\n",
2868                         table_rate);
2869                 return -EINVAL;
2870         }
2871
2872         if (!(table->src_sel_reg & EMC_CLK_MC_EMC_SAME_FREQ) !=
2873             !(MC_EMEM_ARB_MISC0_EMC_SAME_FREQ &
2874               table->burst_mc_regs[MC_EMEM_ARB_MISC0_INDEX])) {
2875                 pr_warn("tegra: ambiguous EMC to MC ratio for EMC rate %lu\n",
2876                         table_rate);
2877                 return -EINVAL;
2878         }
2879
2880         if (sel->input == pll_m) {
2881                 /* pll_m(b) can scale to match target rate */
2882                 input_rate = table_rate * (1 + div_value / 2);
2883         } else {
2884                 /* all other sources are fixed, must exactly match the rate */
2885                 input_rate = clk_get_rate(sel->input);
2886                 if (input_rate != (table_rate * (1 + div_value / 2))) {
2887                         pr_warn("tegra: EMC rate %lu does not match %s rate %lu\n",
2888                                 table_rate, sel->input->name, input_rate);
2889                         return -EINVAL;
2890                 }
2891         }
2892
2893         /* Get ready emc clock selection settings for this table rate */
2894         emc_clk_sel->input = sel->input;
2895         emc_clk_sel->input_rate = input_rate;
2896         emc_clk_sel->value = table->src_sel_reg;
2897
2898         emc_clk_sel_b->input = sel->input;
2899         emc_clk_sel_b->input_rate = input_rate;
2900         emc_clk_sel_b->value = table->src_sel_reg;
2901
2902         /* Replace PLLM with PLLMB is PLLMB selection able */
2903         if (pll_mb && (sel->input == pll_m)) {
2904                 u32 src_value_b = src_value == EMC_CLK_SOURCE_PLLM_LJ ?
2905                         EMC_CLK_SOURCE_PLLMB_LJ : EMC_CLK_SOURCE_PLLMB;
2906                 emc_clk_sel_b->input = pll_mb;
2907                 emc_clk_sel_b->value &= ~EMC_CLK_EMC_2X_CLK_SRC_MASK;
2908                 emc_clk_sel_b->value |= src_value_b <<
2909                         EMC_CLK_EMC_2X_CLK_SRC_SHIFT;
2910         }
2911
2912         return 0;
2913 }
2914
2915
2916 static int emc_core_millivolts[MAX_DVFS_FREQS];
2917
2918 static void adjust_emc_dvfs_table(const struct tegra21_emc_table *table,
2919                                   int table_size)
2920 {
2921         int i, j, mv;
2922         unsigned long rate;
2923
2924         BUG_ON(table_size > MAX_DVFS_FREQS);
2925
2926         for (i = 0, j = 0; j < table_size; j++) {
2927                 if (tegra_emc_clk_sel[j].input == NULL)
2928                         continue;       /* invalid entry */
2929
2930                 rate = table[j].rate * 1000;
2931                 mv = table[j].emc_min_mv;
2932
2933                 if ((i == 0) || (mv > emc_core_millivolts[i-1])) {
2934                         /* advance: voltage has increased */
2935                         emc->dvfs->freqs[i] = rate;
2936                         emc_core_millivolts[i] = mv;
2937                         i++;
2938                 } else {
2939                         /* squash: voltage has not increased */
2940                         emc->dvfs->freqs[i-1] = rate;
2941                 }
2942         }
2943
2944         emc->dvfs->millivolts = emc_core_millivolts;
2945         emc->dvfs->num_freqs = i;
2946 }
2947
2948 /*
2949  * pll_m can be scaled provided pll_mb is available;
2950  * if not - remove rates that require pll_m scaling
2951  */
2952 static int purge_emc_table(unsigned long max_rate)
2953 {
2954         int i;
2955         int ret = 0;
2956
2957         pr_warn("tegra: cannot scale pll_m since pll_mb is not available:\n");
2958         pr_warn("       removed not supported entries from the table:\n");
2959
2960         /* made all entries with non matching rate invalid */
2961         for (i = 0; i < tegra_emc_table_size; i++) {
2962                 struct emc_sel *sel = &tegra_emc_clk_sel[i];
2963                 struct emc_sel *sel_b = &tegra_emc_clk_sel_b[i];
2964                 if (sel->input) {
2965                         if (clk_get_rate(sel->input) != sel->input_rate) {
2966                                 pr_warn("       EMC rate %lu\n",
2967                                         tegra_emc_table[i].rate * 1000);
2968                                 sel->input = NULL;
2969                                 sel->input_rate = 0;
2970                                 sel->value = 0;
2971                                 *sel_b = *sel;
2972                                 if (max_rate == tegra_emc_table[i].rate)
2973                                         ret = -EINVAL;
2974                         }
2975                 }
2976         }
2977         return ret;
2978 }
2979
2980 static int init_emc_table(const struct tegra21_emc_table *table,
2981                           const struct tegra21_emc_table *table_der,
2982                           int table_size)
2983 {
2984         int i, mv;
2985         bool max_entry = false;
2986         bool emc_max_dvfs_sel = 1; /* FIXME: restore get_emc_max_dvfs(); */
2987         unsigned long boot_rate, max_rate;
2988         struct clk *pll_m = tegra_get_clock_by_name("pll_m");
2989         struct clk *pll_mb = tegra_get_clock_by_name("pll_mb");
2990
2991         if (!tegra_clk_is_parent_allowed(emc, pll_mb)) {
2992                 WARN(1, "tegra: PLLMB can not be used for EMC DVFS\n");
2993                 pll_mb = NULL;
2994         }
2995
2996         emc_stats.clkchange_count = 0;
2997         spin_lock_init(&emc_stats.spinlock);
2998         emc_stats.last_update = get_jiffies_64();
2999         emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
3000
3001         if ((dram_type != DRAM_TYPE_LPDDR4) &&
3002             (dram_type != DRAM_TYPE_LPDDR2) &&
3003             (dram_type != DRAM_TYPE_DDR3)) {
3004                 pr_err("tegra: not supported DRAM type %u\n", dram_type);
3005                 return -ENODATA;
3006         }
3007
3008         if (!table || !table_size) {
3009                 pr_err("tegra: EMC DFS table is empty\n");
3010                 return -ENODATA;
3011         }
3012
3013         boot_rate = clk_get_rate(emc) / 1000;
3014         max_rate = boot_rate;
3015
3016         tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
3017         switch (table[0].rev) {
3018         case 0x5:
3019                 start_timing.burst_regs_num = table[0].burst_regs_num;
3020                 break;
3021         default:
3022                 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
3023                         table[0].rev);
3024                 return -ENODATA;
3025         }
3026
3027         if (table_der) {
3028                 /* Check that the derated table and non-derated table match. */
3029                 for (i = 0; i < tegra_emc_table_size; i++) {
3030                         if (table[i].rate        != table_der[i].rate ||
3031                             table[i].rev         != table_der[i].rev ||
3032                             table[i].emc_min_mv  != table_der[i].emc_min_mv ||
3033                             table[i].src_sel_reg != table_der[i].src_sel_reg) {
3034                                 pr_err("tegra: emc: Derated table mismatch.\n");
3035                                 return -EINVAL;
3036                         }
3037                 }
3038                 pr_info("tegra: emc: Derated table is valid.\n");
3039         }
3040
3041         /* Match EMC source/divider settings with table entries */
3042         for (i = 0; i < tegra_emc_table_size; i++) {
3043                 unsigned long table_rate = table[i].rate;
3044
3045                 /* Stop: "no-rate" entry, or entry violating ascending order */
3046                 if (!table_rate || (i && ((table_rate <= table[i-1].rate) ||
3047                         (table[i].emc_min_mv < table[i-1].emc_min_mv)))) {
3048                         pr_warn("tegra: EMC rate entry %lu is not ascending\n",
3049                                 table_rate);
3050                         break;
3051                 }
3052
3053                 BUG_ON(table[i].rev != table[0].rev);
3054
3055                 if (find_matching_input(&table[i], pll_m, pll_mb, i))
3056                         continue;
3057
3058                 if (table_rate == boot_rate)
3059                         emc_stats.last_sel = i;
3060
3061                 if (emc_max_dvfs_sel) {
3062                         /* EMC max rate = max table entry above boot rate */
3063                         if (table_rate >= max_rate) {
3064                                 max_rate = table_rate;
3065                                 max_entry = true;
3066                         }
3067                 } else if (table_rate == max_rate) {
3068                         /* EMC max rate = boot rate */
3069                         max_entry = true;
3070                         break;
3071                 }
3072         }
3073
3074         /* Validate EMC rate and voltage limits */
3075         if (!max_entry) {
3076                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
3077                        " %lu kHz is not found\n", max_rate);
3078                 return -ENODATA;
3079         }
3080
3081         if (emc_stats.last_sel == TEGRA_EMC_TABLE_MAX_SIZE) {
3082                 pr_err("tegra: invalid EMC DFS table: entry for boot rate"
3083                        " %lu kHz is not found\n", boot_rate);
3084                 return -ENODATA;
3085         }
3086
3087         tegra_emc_table = table;
3088         tegra_emc_table_derated = table_der;
3089
3090         /*
3091          * Purge rates that cannot be reached because PLLMB can not be used
3092          * If maximum rate was purged, do not install table.
3093          */
3094         if (!pll_mb && purge_emc_table(max_rate)) {
3095                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
3096                        " %lu kHz can not be reached\n", max_rate);
3097                 return -ENODATA;
3098         }
3099         tegra_init_max_rate(emc, max_rate * 1000);
3100
3101         if (emc->dvfs) {
3102                 adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
3103                 mv = tegra_dvfs_predict_peak_millivolts(emc, max_rate * 1000);
3104                 if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
3105                         tegra_emc_table = NULL;
3106                         pr_err("tegra: invalid EMC DFS table: maximum rate %lu"
3107                                " kHz does not match nominal voltage %d\n",
3108                                max_rate, emc->dvfs->max_millivolts);
3109                         return -ENODATA;
3110                 }
3111         }
3112
3113         pr_info("tegra: validated EMC DFS table\n");
3114
3115         return 0;
3116 }
3117
3118 #ifdef CONFIG_PASR
3119 static bool tegra21_is_lpddr3(void)
3120 {
3121         return (dram_type == DRAM_TYPE_LPDDR2);
3122 }
3123
3124 static void tegra21_pasr_apply_mask(u16 *mem_reg, void *cookie)
3125 {
3126         u32 val = 0;
3127         int device = (int)(uintptr_t)cookie;
3128
3129         val = TEGRA_EMC_MODE_REG_17 | *mem_reg;
3130         val |= device << TEGRA_EMC_MRW_DEV_SHIFT;
3131
3132         emc_writel(val, EMC_MRW);
3133
3134         pr_debug("%s: cookie = %d mem_reg = 0x%04x val = 0x%08x\n", __func__,
3135                         (int)(uintptr_t)cookie, *mem_reg, val);
3136 }
3137
3138 static void tegra21_pasr_remove_mask(phys_addr_t base, void *cookie)
3139 {
3140         u16 mem_reg = 0;
3141
3142         if (!pasr_register_mask_function(base, NULL, cookie))
3143                         tegra21_pasr_apply_mask(&mem_reg, cookie);
3144
3145 }
3146
3147 static int tegra21_pasr_set_mask(phys_addr_t base, void *cookie)
3148 {
3149         return pasr_register_mask_function(base, &tegra21_pasr_apply_mask,
3150                                         cookie);
3151 }
3152
3153 static int tegra21_pasr_enable(const char *arg, const struct kernel_param *kp)
3154 {
3155         unsigned int old_pasr_enable;
3156         void *cookie;
3157         int num_devices;
3158         u64 device_size;
3159         u64 size_mul;
3160         int ret = 0;
3161
3162         if (!tegra21_is_lpddr3())
3163                 return -ENOSYS;
3164
3165         old_pasr_enable = pasr_enable;
3166         param_set_int(arg, kp);
3167
3168         if (old_pasr_enable == pasr_enable)
3169                 return ret;
3170
3171         num_devices = 1 << (mc_readl(MC_EMEM_ADR_CFG) & BIT(0));
3172         size_mul = 1 << ((emc_readl(EMC_FBIO_CFG5) >> 4) & BIT(0));
3173
3174         /* Cookie represents the device number to write to MRW register.
3175          * 0x2 to for only dev0, 0x1 for dev1.
3176          */
3177         if (pasr_enable == 0) {
3178                 cookie = (void *)(int)TEGRA_EMC_MRW_DEV1;
3179
3180                 tegra21_pasr_remove_mask(TEGRA_DRAM_BASE, cookie);
3181
3182                 if (num_devices == 1)
3183                         goto exit;
3184
3185                 cookie = (void *)(int)TEGRA_EMC_MRW_DEV2;
3186                 /* Next device is located after first device, so read DEV0 size
3187                  * to decide base address for DEV1 */
3188                 device_size = 1 << ((mc_readl(MC_EMEM_ADR_CFG_DEV0) >>
3189                                         MC_EMEM_DEV_SIZE_SHIFT) &
3190                                         MC_EMEM_DEV_SIZE_MASK);
3191                 device_size = device_size * size_mul * SZ_4M;
3192
3193                 tegra21_pasr_remove_mask(TEGRA_DRAM_BASE + device_size, cookie);
3194         } else {
3195                 cookie = (void *)(int)TEGRA_EMC_MRW_DEV1;
3196
3197                 ret = tegra21_pasr_set_mask(TEGRA_DRAM_BASE, cookie);
3198
3199                 if (num_devices == 1 || ret)
3200                         goto exit;
3201
3202                 cookie = (void *)(int)TEGRA_EMC_MRW_DEV2;
3203
3204                 /* Next device is located after first device, so read DEV0 size
3205                  * to decide base address for DEV1 */
3206                 device_size = 1 << ((mc_readl(MC_EMEM_ADR_CFG_DEV0) >>
3207                                         MC_EMEM_DEV_SIZE_SHIFT) &
3208                                         MC_EMEM_DEV_SIZE_MASK);
3209                 device_size = device_size * size_mul * SZ_4M;
3210
3211                 ret = tegra21_pasr_set_mask(TEGRA_DRAM_BASE + device_size, cookie);
3212         }
3213
3214 exit:
3215         return ret;
3216 }
3217
3218 static struct kernel_param_ops tegra21_pasr_enable_ops = {
3219         .set = tegra21_pasr_enable,
3220         .get = param_get_int,
3221 };
3222 module_param_cb(pasr_enable, &tegra21_pasr_enable_ops, &pasr_enable, 0644);
3223 #endif
3224
3225 /* FIXME: add to clock resume */
3226 void tegra21_mc_holdoff_enable(void)
3227 {
3228         mc_writel(HYST_DISPLAYHCB | HYST_DISPLAYHC |
3229                 HYST_DISPLAY0CB | HYST_DISPLAY0C | HYST_DISPLAY0BB |
3230                 HYST_DISPLAY0B | HYST_DISPLAY0AB | HYST_DISPLAY0A,
3231                 MC_EMEM_ARB_HYSTERESIS_0_0);
3232         mc_writel(HYST_VDEDBGW | HYST_VDEBSEVW | HYST_NVENCSWR,
3233                 MC_EMEM_ARB_HYSTERESIS_1_0);
3234         mc_writel(HYST_DISPLAYT | HYST_GPUSWR | HYST_ISPWBB |
3235                 HYST_ISPWAB | HYST_ISPWB | HYST_ISPWA |
3236                 HYST_VDETPMW | HYST_VDEMBEW,
3237                 MC_EMEM_ARB_HYSTERESIS_2_0);
3238         mc_writel(HYST_DISPLAYD | HYST_VIW | HYST_VICSWR,
3239                 MC_EMEM_ARB_HYSTERESIS_3_0);
3240 }
3241
3242 static int tegra21_emc_probe(struct platform_device *pdev)
3243 {
3244         struct tegra21_emc_pdata *pdata;
3245         struct resource *res;
3246         int ret;
3247
3248         if (tegra_emc_table) {
3249                 ret = -EINVAL;
3250                 goto out;
3251         }
3252
3253         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3254         if (!res) {
3255                 dev_err(&pdev->dev, "missing register base\n");
3256                 ret = -ENOMEM;
3257                 goto out;
3258         }
3259
3260         pdata = tegra_emc_dt_parse_pdata(pdev);
3261
3262         if (!pdata) {
3263                 dev_err(&pdev->dev, "missing platform data\n");
3264                 ret = -ENODATA;
3265                 goto out;
3266         }
3267
3268         pr_info("Loading EMC tables...\n");
3269         ret = init_emc_table(pdata->tables, pdata->tables_derated,
3270                               pdata->num_tables);
3271
3272         if (!ret) {
3273                 tegra_emc_iso_usage_table_init(tegra21_emc_iso_usage,
3274                                 ARRAY_SIZE(tegra21_emc_iso_usage));
3275                 if (emc_enable) {
3276                         unsigned long rate = tegra_emc_round_rate_updown(
3277                                 emc->boot_rate, false);
3278                         if (!IS_ERR_VALUE(rate))
3279                                 tegra_clk_preset_emc_monitor(rate);
3280                 }
3281         }
3282
3283 out:
3284         return ret;
3285 }
3286
3287 static struct of_device_id tegra21_emc_of_match[] = {
3288         { .compatible = "nvidia,tegra21-emc", },
3289         { },
3290 };
3291
3292 static struct platform_driver tegra21_emc_driver = {
3293         .driver         = {
3294                 .name   = "tegra-emc",
3295                 .owner  = THIS_MODULE,
3296                 .of_match_table = tegra21_emc_of_match
3297         },
3298         .probe          = tegra21_emc_probe,
3299 };
3300
3301 int __init tegra21_emc_init(void)
3302 {
3303         return platform_driver_register(&tegra21_emc_driver);
3304 }
3305
3306 void tegra_emc_timing_invalidate(void)
3307 {
3308         emc_timing = NULL;
3309         tegra_mc_divider_update(emc);
3310 }
3311
3312 void tegra_emc_dram_type_init(struct clk *c)
3313 {
3314         emc = c;
3315
3316         dram_type = (emc_readl(EMC_FBIO_CFG5) &
3317                      EMC_FBIO_CFG5_DRAM_TYPE_MASK) >>
3318                 EMC_FBIO_CFG5_DRAM_TYPE_SHIFT;
3319
3320         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
3321 }
3322
3323 int tegra_emc_get_dram_type(void)
3324 {
3325         return dram_type;
3326 }
3327
3328 static int emc_read_mrr(int dev, int addr)
3329 {
3330         int ret;
3331         u32 val, emc_cfg;
3332
3333         if (dram_type != DRAM_TYPE_LPDDR2)
3334                 return -ENODEV;
3335
3336         ret = wait_for_update(EMC_EMC_STATUS,
3337                               EMC_EMC_STATUS_MRR_DIVLD, false, 0);
3338         if (ret)
3339                 return ret;
3340
3341         emc_cfg = emc_readl(EMC_CFG);
3342         if (emc_cfg & EMC_CFG_DRAM_ACPD) {
3343                 emc_writel(emc_cfg & ~EMC_CFG_DRAM_ACPD, EMC_CFG);
3344                 emc_timing_update(0);
3345         }
3346
3347         val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
3348         val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
3349         emc_writel(val, EMC_MRR);
3350
3351         ret = wait_for_update(EMC_EMC_STATUS,
3352                               EMC_EMC_STATUS_MRR_DIVLD, true, 0);
3353         if (emc_cfg & EMC_CFG_DRAM_ACPD) {
3354                 emc_writel(emc_cfg, EMC_CFG);
3355                 emc_timing_update(0);
3356         }
3357         if (ret)
3358                 return ret;
3359
3360         val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
3361         return val;
3362 }
3363
3364 int tegra_emc_get_dram_temperature(void)
3365 {
3366         int mr4 = 0;
3367         unsigned long flags;
3368
3369         spin_lock_irqsave(&emc_access_lock, flags);
3370
3371         mr4 = emc_read_mrr(0, 4);
3372         if (IS_ERR_VALUE(mr4)) {
3373                 spin_unlock_irqrestore(&emc_access_lock, flags);
3374                 return mr4;
3375         }
3376
3377         spin_unlock_irqrestore(&emc_access_lock, flags);
3378
3379         mr4 = (mr4 & LPDDR2_MR4_TEMP_MASK) >> LPDDR2_MR4_TEMP_SHIFT;
3380         return mr4;
3381 }
3382
3383 int tegra_emc_set_over_temp_state(unsigned long state)
3384 {
3385         int offset;
3386         unsigned long flags;
3387         const struct tegra21_emc_table *current_table;
3388         const struct tegra21_emc_table *new_table;
3389
3390         if (dram_type != DRAM_TYPE_LPDDR2 || !emc_timing)
3391                 return -ENODEV;
3392
3393         if (state > DRAM_OVER_TEMP_THROTTLE)
3394                 return -EINVAL;
3395
3396         /* Silently do nothing if there is no state change. */
3397         if (state == dram_over_temp_state)
3398                 return 0;
3399
3400         /*
3401          * If derating needs to be turned on/off force a clock change. That
3402          * will take care of the refresh as well. In derating is not going to
3403          * be changed then all that is needed is an update to the refresh
3404          * settings.
3405          */
3406         spin_lock_irqsave(&emc_access_lock, flags);
3407
3408         current_table = emc_get_table(dram_over_temp_state);
3409         new_table = emc_get_table(state);
3410         dram_over_temp_state = state;
3411
3412         if (current_table != new_table) {
3413                 offset = emc_timing - current_table;
3414                 emc_set_clock(&new_table[offset], emc_timing, 0,
3415                         new_table[offset].src_sel_reg |
3416                         EMC_CLK_FORCE_CC_TRIGGER);
3417                 emc_timing = &new_table[offset];
3418                 tegra_mc_divider_update(emc);
3419         } else {
3420                 set_over_temp_timing(emc_timing, state);
3421                 emc_timing_update(0);
3422                 if (state != DRAM_OVER_TEMP_NONE)
3423                         emc_writel(EMC_REF_FORCE_CMD, EMC_REF);
3424         }
3425
3426         spin_unlock_irqrestore(&emc_access_lock, flags);
3427
3428         pr_debug("[emc] %s: temp_state: %lu  - selected %s table\n",
3429                 __func__, dram_over_temp_state,
3430                 new_table == tegra_emc_table ? "regular" : "derated");
3431
3432         return 0;
3433 }
3434
3435
3436 #ifdef CONFIG_TEGRA_USE_NCT
3437 int tegra21_nct_emc_table_init(struct tegra21_emc_pdata *nct_emc_pdata)
3438 {
3439         union nct_item_type *entry = NULL;
3440         struct tegra21_emc_table *mem_table_ptr;
3441         u8 *src, *dest;
3442         unsigned int i, non_zero_freqs;
3443         int ret = 0;
3444
3445         /* Allocating memory for holding a single NCT entry */
3446         entry = kmalloc(sizeof(union nct_item_type), GFP_KERNEL);
3447         if (!entry) {
3448                 pr_err("%s: failed to allocate buffer for single entry. ",
3449                                                                 __func__);
3450                 ret = -ENOMEM;
3451                 goto done;
3452         }
3453         src = (u8 *)entry;
3454
3455         /* Counting the actual number of frequencies present in the table */
3456         non_zero_freqs = 0;
3457         for (i = 0; i < TEGRA_EMC_MAX_FREQS; i++) {
3458                 if (!tegra_nct_read_item(NCT_ID_MEMTABLE + i, entry)) {
3459                         if (entry->tegra_emc_table.tegra21_emc_table.rate > 0) {
3460                                 non_zero_freqs++;
3461                                 pr_info("%s: Found NCT item for freq %lu.\n",
3462                                  __func__,
3463                                  entry->tegra_emc_table.tegra21_emc_table.rate);
3464                         } else
3465                                 break;
3466                 } else {
3467                         pr_err("%s: NCT: Could not read item for %dth freq.\n",
3468                                                                 __func__, i);
3469                         ret = -EIO;
3470                         goto free_entry;
3471                 }
3472         }
3473
3474         /* Allocating memory for the DVFS table */
3475         mem_table_ptr = kmalloc(sizeof(struct tegra21_emc_table) *
3476                                 non_zero_freqs, GFP_KERNEL);
3477         if (!mem_table_ptr) {
3478                 pr_err("%s: Memory allocation for emc table failed.",
3479                                                             __func__);
3480                 ret = -ENOMEM;
3481                 goto free_entry;
3482         }
3483
3484         /* Copy paste the emc table from NCT partition */
3485         for (i = 0; i < non_zero_freqs; i++) {
3486                 /*
3487                  * We reset the whole buffer, to emulate the property
3488                  * of a static variable being initialized to zero
3489                  */
3490                 memset(entry, 0, sizeof(*entry));
3491                 ret = tegra_nct_read_item(NCT_ID_MEMTABLE + i, entry);
3492                 if (!ret) {
3493                         dest = (u8 *)mem_table_ptr + (i * sizeof(struct
3494                                                         tegra21_emc_table));
3495                         memcpy(dest, src, sizeof(struct tegra21_emc_table));
3496                 } else {
3497                         pr_err("%s: Could not copy item for %dth freq.\n",
3498                                                                 __func__, i);
3499                         goto free_mem_table_ptr;
3500                 }
3501         }
3502
3503         /* Setting appropriate pointers */
3504         nct_emc_pdata->tables = mem_table_ptr;
3505         nct_emc_pdata->num_tables = non_zero_freqs;
3506
3507         goto free_entry;
3508
3509 free_mem_table_ptr:
3510         kfree(mem_table_ptr);
3511 free_entry:
3512         kfree(entry);
3513 done:
3514         return ret;
3515 }
3516 #endif
3517
3518 /*
3519  * Given the passed ISO BW find the index into the table of ISO efficiencies.
3520  */
3521 static inline int get_iso_bw_table_idx(unsigned long iso_bw)
3522 {
3523         int i = ARRAY_SIZE(iso_bw_table) - 1;
3524
3525         while (i > 0 && iso_bw_table[i] > iso_bw)
3526                 i--;
3527
3528         return i;
3529 }
3530
3531 /*
3532  * Return the ISO BW efficiency for the attached DRAM type at the passed ISO BW.
3533  * This is used for when only the display is active - OS IDLE.
3534  *
3535  * For now when the DRAM is being temperature throttled return the normal ISO
3536  * efficiency. This will have to change once the throttling efficiency data
3537  * becomes available.
3538  */
3539 static u8 get_iso_bw_os_idle(unsigned long iso_bw)
3540 {
3541         int freq_idx = get_iso_bw_table_idx(iso_bw);
3542
3543         /* On T21- LPDDR2 means LPDDR3. */
3544         if (dram_type == DRAM_TYPE_LPDDR2) {
3545                 if (dram_over_temp_state == DRAM_OVER_TEMP_THROTTLE)
3546                         return tegra21_lpddr3_iso_efficiency_os_idle[freq_idx];
3547                 else
3548                         return tegra21_lpddr3_iso_efficiency_os_idle[freq_idx];
3549         } else if (dram_type == DRAM_TYPE_DDR3) {
3550                 if (dram_over_temp_state == DRAM_OVER_TEMP_THROTTLE)
3551                         return tegra21_ddr3_iso_efficiency_os_idle[freq_idx];
3552                 else
3553                         return tegra21_ddr3_iso_efficiency_os_idle[freq_idx];
3554         } else { /* LPDDR4 */
3555                 if (dram_over_temp_state == DRAM_OVER_TEMP_THROTTLE)
3556                         return tegra21_lpddr4_iso_efficiency_os_idle[freq_idx];
3557                 else
3558                         return tegra21_lpddr4_iso_efficiency_os_idle[freq_idx];
3559         }
3560 }
3561
3562 /*
3563  * Same as get_iso_bw_os_idle() only this is used for when there are other
3564  * engines aside from display running.
3565  */
3566 static u8 get_iso_bw_general(unsigned long iso_bw)
3567 {
3568         int freq_idx = get_iso_bw_table_idx(iso_bw);
3569
3570         /* On T21- LPDDR2 means LPDDR3. */
3571         if (dram_type == DRAM_TYPE_LPDDR2) {
3572                 if (dram_over_temp_state == DRAM_OVER_TEMP_THROTTLE)
3573                         return tegra21_lpddr3_iso_efficiency_general[freq_idx];
3574                 else
3575                         return tegra21_lpddr3_iso_efficiency_general[freq_idx];
3576         } else if (dram_type == DRAM_TYPE_DDR3) {
3577                 if (dram_over_temp_state == DRAM_OVER_TEMP_THROTTLE)
3578                         return tegra21_ddr3_iso_efficiency_general[freq_idx];
3579                 else
3580                         return tegra21_ddr3_iso_efficiency_general[freq_idx];
3581         } else { /* LPDDR4 */
3582                 if (dram_over_temp_state == DRAM_OVER_TEMP_THROTTLE)
3583                         return tegra21_lpddr4_iso_efficiency_general[freq_idx];
3584                 else
3585                         return tegra21_lpddr4_iso_efficiency_general[freq_idx];
3586         }
3587 }
3588
3589 #ifdef CONFIG_DEBUG_FS
3590
3591 static struct dentry *emc_debugfs_root;
3592
3593 static int emc_stats_show(struct seq_file *s, void *data)
3594 {
3595         int i;
3596
3597         emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
3598
3599         seq_printf(s, "%-10s %-10s\n", "rate kHz", "time");
3600         for (i = 0; i < tegra_emc_table_size; i++) {
3601                 if (tegra_emc_clk_sel[i].input == NULL)
3602                         continue;       /* invalid entry */
3603
3604                 seq_printf(s, "%-10lu %-10llu\n", tegra_emc_table[i].rate,
3605                         cputime64_to_clock_t(emc_stats.time_at_clock[i]));
3606         }
3607         seq_printf(s, "%-15s %llu\n", "transitions:",
3608                    emc_stats.clkchange_count);
3609         seq_printf(s, "%-15s %llu\n", "time-stamp:",
3610                    cputime64_to_clock_t(emc_stats.last_update));
3611
3612         return 0;
3613 }
3614
3615 static int emc_stats_open(struct inode *inode, struct file *file)
3616 {
3617         return single_open(file, emc_stats_show, inode->i_private);
3618 }
3619
3620 static const struct file_operations emc_stats_fops = {
3621         .open           = emc_stats_open,
3622         .read           = seq_read,
3623         .llseek         = seq_lseek,
3624         .release        = single_release,
3625 };
3626
3627 static int emc_table_info_show(struct seq_file *s, void *data)
3628 {
3629         int i;
3630         for (i = 0; i < tegra_emc_table_size; i++) {
3631                 if (tegra_emc_clk_sel[i].input == NULL)
3632                         continue;
3633                 seq_printf(s, "Table info:\n   Rev: 0x%02x\n"
3634                 "   Table ID: %s\n", tegra_emc_table[i].rev,
3635                 tegra_emc_table[i].table_id);
3636                 seq_printf(s, "    %lu\n", tegra_emc_table[i].rate);
3637         }
3638
3639         return 0;
3640 }
3641
3642 static int emc_table_info_open(struct inode *inode, struct file *file)
3643 {
3644         return single_open(file, emc_table_info_show, inode->i_private);
3645 }
3646
3647 static const struct file_operations emc_table_info_fops = {
3648         .open           = emc_table_info_open,
3649         .read           = seq_read,
3650         .llseek         = seq_lseek,
3651         .release        = single_release,
3652 };
3653
3654 static int dram_temperature_get(void *data, u64 *val)
3655 {
3656         *val = tegra_emc_get_dram_temperature();
3657         return 0;
3658 }
3659 DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
3660                         NULL, "%lld\n");
3661
3662 static int over_temp_state_get(void *data, u64 *val)
3663 {
3664         *val = dram_over_temp_state;
3665         return 0;
3666 }
3667 static int over_temp_state_set(void *data, u64 val)
3668 {
3669         return tegra_emc_set_over_temp_state(val);
3670 }
3671 DEFINE_SIMPLE_ATTRIBUTE(over_temp_state_fops, over_temp_state_get,
3672                         over_temp_state_set, "%llu\n");
3673
3674 static int efficiency_get(void *data, u64 *val)
3675 {
3676         *val = tegra_emc_bw_efficiency;
3677         return 0;
3678 }
3679 static int efficiency_set(void *data, u64 val)
3680 {
3681         tegra_emc_bw_efficiency = (val > 100) ? 100 : val;
3682         if (emc)
3683                 tegra_clk_shared_bus_update(emc);
3684
3685         return 0;
3686 }
3687 DEFINE_SIMPLE_ATTRIBUTE(efficiency_fops, efficiency_get,
3688                         efficiency_set, "%llu\n");
3689
3690 static int __init tegra_emc_debug_init(void)
3691 {
3692         if (!tegra_emc_table)
3693                 return 0;
3694
3695         emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
3696         if (!emc_debugfs_root)
3697                 return -ENOMEM;
3698
3699         if (!debugfs_create_file(
3700                 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
3701                 goto err_out;
3702
3703         if (!debugfs_create_u32("clkchange_delay", S_IRUGO | S_IWUSR,
3704                 emc_debugfs_root, (u32 *)&clkchange_delay))
3705                 goto err_out;
3706
3707         /*
3708          * Reading dram temperature supported only for LP DDR variants,
3709          * Currently two variants of DDR are supported i.e. LPDDR2 and DDR3
3710          */
3711         if (dram_type == DRAM_TYPE_LPDDR2 &&
3712                 !debugfs_create_file("dram_temperature",
3713                 S_IRUGO, emc_debugfs_root, NULL, &dram_temperature_fops))
3714                 goto err_out;
3715
3716         if (!debugfs_create_file("over_temp_state", S_IRUGO | S_IWUSR,
3717                                 emc_debugfs_root, NULL, &over_temp_state_fops))
3718                 goto err_out;
3719
3720         if (!debugfs_create_file("efficiency", S_IRUGO | S_IWUSR,
3721                                  emc_debugfs_root, NULL, &efficiency_fops))
3722                 goto err_out;
3723
3724
3725         if (tegra_emc_iso_usage_debugfs_init(emc_debugfs_root))
3726                 goto err_out;
3727
3728         if (!debugfs_create_file("table_info", S_IRUGO,
3729                                  emc_debugfs_root, NULL, &emc_table_info_fops))
3730                 goto err_out;
3731
3732         return 0;
3733
3734 err_out:
3735         debugfs_remove_recursive(emc_debugfs_root);
3736         return -ENOMEM;
3737 }
3738
3739 late_initcall(tegra_emc_debug_init);
3740 #endif