arm: tegra: Create VI/ISP emc clocks
[linux-3.10.git] / arch / arm / mach-tegra / tegra12_emc.c
1 /*
2  * arch/arm/mach-tegra/tegra12_emc.c
3  *
4  * Copyright (c) 2013, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
18  *
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/clk.h>
23 #include <linux/err.h>
24 #include <linux/io.h>
25 #include <linux/module.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/platform_data/tegra_emc.h>
29 #include <linux/debugfs.h>
30 #include <linux/seq_file.h>
31 #include <linux/hrtimer.h>
32
33 #include <asm/cputime.h>
34
35 #include "clock.h"
36 #include "board.h"
37 #include "dvfs.h"
38 #include "iomap.h"
39 #include "tegra12_emc.h"
40
41 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
42 static bool emc_enable = true;
43 #else
44 static bool emc_enable;
45 #endif
46 module_param(emc_enable, bool, 0644);
47
48 u8 tegra_emc_bw_efficiency = 100;
49
50 static struct emc_iso_usage tegra12_emc_iso_usage[] = {
51         { BIT(EMC_USER_DC1),                     80 },
52         { BIT(EMC_USER_DC2),                     80 },
53         { BIT(EMC_USER_DC1) | BIT(EMC_USER_DC2),        50 },
54         { BIT(EMC_USER_DC1) | BIT(EMC_USER_VI),  50 },
55         { BIT(EMC_USER_DC2) | BIT(EMC_USER_VI),  50 },
56         { BIT(EMC_USER_DC1) | BIT(EMC_USER_VI2),  50 },
57         { BIT(EMC_USER_DC2) | BIT(EMC_USER_VI2),  50 },
58         { BIT(EMC_USER_DC1) | BIT(EMC_USER_ISP1),  50 },
59         { BIT(EMC_USER_DC2) | BIT(EMC_USER_ISP1),  50 },
60         { BIT(EMC_USER_DC1) | BIT(EMC_USER_ISP2),  50 },
61         { BIT(EMC_USER_DC2) | BIT(EMC_USER_ISP2),  50 },
62 };
63
64 #define PLL_C_DIRECT_FLOOR              333500000
65 #define EMC_STATUS_UPDATE_TIMEOUT       100
66 #define TEGRA_EMC_TABLE_MAX_SIZE        16
67
68 enum {
69         DLL_CHANGE_NONE = 0,
70         DLL_CHANGE_ON,
71         DLL_CHANGE_OFF,
72 };
73
74 #define EMC_CLK_DIV_SHIFT               0
75 #define EMC_CLK_DIV_MASK                (0xFF << EMC_CLK_DIV_SHIFT)
76 #define EMC_CLK_SOURCE_SHIFT            29
77 #define EMC_CLK_SOURCE_MASK             (0x7 << EMC_CLK_SOURCE_SHIFT)
78 #define EMC_CLK_LOW_JITTER_ENABLE       (0x1 << 31)
79 #define EMC_CLK_MC_SAME_FREQ            (0x1 << 16)
80
81 #define BURST_REG_LIST \
82         DEFINE_REG(TEGRA_EMC_BASE, EMC_RC),                     \
83         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC),                    \
84         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC_SLR),                \
85         DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS),                    \
86         DEFINE_REG(TEGRA_EMC_BASE, EMC_RP),                     \
87         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W),                    \
88         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R),                    \
89         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P),                    \
90         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P),                    \
91         DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD),                 \
92         DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD),                 \
93         DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD),                    \
94         DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT),                   \
95         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT),                   \
96         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV),                    \
97         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV_MASK),               \
98         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE),                   \
99         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_WIDTH),             \
100         DEFINE_REG(TEGRA_EMC_BASE, EMC_IBDLY),                  \
101         DEFINE_REG(TEGRA_EMC_BASE, EMC_EINPUT),                 \
102         DEFINE_REG(TEGRA_EMC_BASE, EMC_EINPUT_DURATION),        \
103         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_EXTRA),           \
104         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_WIDTH),           \
105         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_ADJ),             \
106         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_1),             \
107         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_2),             \
108         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_3),             \
109         DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST),                   \
110         DEFINE_REG(TEGRA_EMC_BASE, EMC_QSAFE),                  \
111         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV),                    \
112         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV_MASK),               \
113         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH),                \
114         DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM),      \
115         DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT),    \
116         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR),                \
117         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD),                \
118         DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN),              \
119         DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN),               \
120         DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN),                \
121         DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN),                \
122         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR),                   \
123         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL),                \
124         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE),                   \
125         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKESR),                 \
126         DEFINE_REG(TEGRA_EMC_BASE, EMC_TPD),                    \
127         DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW),                   \
128         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB),                  \
129         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE),             \
130         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP),               \
131         DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW),                 \
132         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG6),              \
133         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE),              \
134         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ),               \
135         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5),              \
136         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL),            \
137         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD),     \
138         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS0),         \
139         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS1),         \
140         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS2),         \
141         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS3),         \
142         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4),         \
143         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5),         \
144         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6),         \
145         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7),         \
146         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS8),         \
147         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS9),         \
148         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS10),        \
149         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS11),        \
150         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS12),        \
151         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS13),        \
152         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS14),        \
153         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS15),        \
154         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE0),        \
155         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE1),        \
156         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE2),        \
157         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE3),        \
158         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4),        \
159         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5),        \
160         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6),        \
161         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7),        \
162         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR0),        \
163         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR1),        \
164         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR2),        \
165         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR3),        \
166         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR4),        \
167         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR5),        \
168         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE8),        \
169         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE9),        \
170         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE10),       \
171         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE11),       \
172         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE12),       \
173         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE13),       \
174         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE14),       \
175         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE15),       \
176         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS0),        \
177         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS1),        \
178         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS2),        \
179         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS3),        \
180         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4),        \
181         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5),        \
182         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6),        \
183         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7),        \
184         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS8),        \
185         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS9),        \
186         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS10),       \
187         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS11),       \
188         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS12),       \
189         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS13),       \
190         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS14),       \
191         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS15),       \
192         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ0),          \
193         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ1),          \
194         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ2),          \
195         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ3),          \
196         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ4),          \
197         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ5),          \
198         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ6),          \
199         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ7),          \
200         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL),          \
201         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL4),         \
202         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL5),         \
203         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2),         \
204         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2),          \
205         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL3),          \
206         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL),          \
207         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL2),         \
208         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL),         \
209         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL),       \
210         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2),      \
211         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL3),      \
212         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL3),         \
213         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL4),         \
214         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL5),         \
215         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL6),         \
216         DEFINE_REG(TEGRA_EMC_BASE, EMC_DSR_VTTGEN_DRV),         \
217         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXDSRVTTGEN),            \
218         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE),             \
219         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL),          \
220         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT),          \
221         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT),           \
222         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT2),          \
223         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG2),       \
224         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG3),       \
225         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG),        \
226         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT),                    \
227         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION),           \
228         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_PIPE),               \
229         DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL),   \
230         DEFINE_REG(TEGRA_EMC_BASE, EMC_QPOP),                   \
231         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG),             \
232         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
233         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD),      \
234         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP),       \
235         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC),       \
236         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS),      \
237         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW),      \
238         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD),      \
239         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE),  \
240         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE),  \
241         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R),      \
242         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W),      \
243         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W),      \
244         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R),      \
245         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS),        \
246         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS),       \
247         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0),           \
248         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE),
249
250 #define BURST_UP_DOWN_REG_LIST \
251         DEFINE_REG(TEGRA_MC_BASE, MC_MLL_MPCORER_PTSA_RATE),            \
252         DEFINE_REG(TEGRA_MC_BASE, MC_PTSA_GRANT_DECREMENT),             \
253         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_XUSB_0),         \
254         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_XUSB_1),         \
255         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_TSEC_0),         \
256         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCA_0),       \
257         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCAA_0),      \
258         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMC_0),        \
259         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCAB_0),      \
260         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_PPCS_0),         \
261         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_PPCS_1),         \
262         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_MPCORE_0),       \
263         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_MPCORELP_0),     \
264         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_HC_0),           \
265         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_HC_1),           \
266         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_AVPC_0),         \
267         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_GPU_0),          \
268         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_MSENC_0),        \
269         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_HDA_0),          \
270         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VIC_0),          \
271         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VI2_0),          \
272         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2_0),         \
273         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2_1),         \
274         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2B_0),        \
275         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2B_1),        \
276         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_0),          \
277         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_1),          \
278         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_2),          \
279         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_3),          \
280         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SATA_0),         \
281         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_AFI_0),
282
283 #define DEFINE_REG(base, reg) ((base) ? (IO_ADDRESS((base)) + (reg)) : 0)
284 static void __iomem *burst_reg_addr[TEGRA12_EMC_MAX_NUM_REGS] = {
285         BURST_REG_LIST
286 };
287
288 #ifndef EMULATE_CLOCK_SWITCH
289 static void __iomem *burst_up_down_reg_addr[TEGRA12_EMC_MAX_NUM_REGS] = {
290         BURST_UP_DOWN_REG_LIST
291 };
292 #endif
293 #undef DEFINE_REG
294
295 #define DEFINE_REG(base, reg)   reg##_INDEX
296 enum {
297         BURST_REG_LIST
298 };
299 #undef DEFINE_REG
300
301 struct emc_sel {
302         struct clk      *input;
303         u32             value;
304         unsigned long   input_rate;
305 };
306 static struct emc_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
307 static struct tegra12_emc_table start_timing;
308 static const struct tegra12_emc_table *emc_timing;
309
310 static ktime_t clkchange_time;
311 static int clkchange_delay = 100;
312
313 static const u32 *dram_to_soc_bit_map;
314 static const struct tegra12_emc_table *tegra_emc_table;
315 static int tegra_emc_table_size;
316
317 static u32 dram_dev_num;
318 static u32 dram_type = -1;
319
320 static struct clk *emc;
321
322 static struct {
323         cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
324         int last_sel;
325         u64 last_update;
326         u64 clkchange_count;
327         spinlock_t spinlock;
328 } emc_stats;
329
330 static DEFINE_SPINLOCK(emc_access_lock);
331
332
333 static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
334 static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
335 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
336
337 static inline void emc_writel(u32 val, unsigned long addr)
338 {
339         writel(val, emc_base + addr);
340 }
341
342 static inline u32 emc_readl(unsigned long addr)
343 {
344         return readl(emc_base + addr);
345 }
346 static inline void mc_writel(u32 val, unsigned long addr)
347 {
348         writel(val, mc_base + addr);
349 }
350 static inline u32 mc_readl(unsigned long addr)
351 {
352         return readl(mc_base + addr);
353 }
354 static inline void ccfifo_writel(u32 val, unsigned long addr)
355 {
356         writel(val, emc_base + EMC_CCFIFO_DATA);
357         writel(addr, emc_base + EMC_CCFIFO_ADDR);
358 }
359
360 static int last_round_idx;
361 static inline int get_start_idx(unsigned long rate)
362 {
363         if (tegra_emc_table[last_round_idx].rate == rate)
364                 return last_round_idx;
365         return 0;
366 }
367 static void emc_last_stats_update(int last_sel)
368 {
369         unsigned long flags;
370         u64 cur_jiffies = get_jiffies_64();
371
372         spin_lock_irqsave(&emc_stats.spinlock, flags);
373
374         if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
375                 emc_stats.time_at_clock[emc_stats.last_sel] =
376                         emc_stats.time_at_clock[emc_stats.last_sel] +
377                         (cur_jiffies - emc_stats.last_update);
378
379         emc_stats.last_update = cur_jiffies;
380
381         if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
382                 emc_stats.clkchange_count++;
383                 emc_stats.last_sel = last_sel;
384         }
385         spin_unlock_irqrestore(&emc_stats.spinlock, flags);
386 }
387
388 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
389 {
390         int i;
391         for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
392                 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
393                         return 0;
394                 udelay(1);
395         }
396         return -ETIMEDOUT;
397 }
398
399 static inline void emc_timing_update(void)
400 {
401         int err;
402
403         emc_writel(0x1, EMC_TIMING_CONTROL);
404         err = wait_for_update(EMC_STATUS,
405                               EMC_STATUS_TIMING_UPDATE_STALLED, false);
406         if (err) {
407                 pr_err("%s: timing update error: %d", __func__, err);
408                 BUG();
409         }
410 }
411
412 static inline void auto_cal_disable(void)
413 {
414         int err;
415
416         emc_writel(0, EMC_AUTO_CAL_INTERVAL);
417         err = wait_for_update(EMC_AUTO_CAL_STATUS,
418                               EMC_AUTO_CAL_STATUS_ACTIVE, false);
419         if (err) {
420                 pr_err("%s: disable auto-cal error: %d", __func__, err);
421                 BUG();
422         }
423 }
424
425 static inline bool dqs_preset(const struct tegra12_emc_table *next_timing,
426                               const struct tegra12_emc_table *last_timing)
427 {
428         bool ret = false;
429         int data;
430 #define DQS_SET(reg, bit)                                               \
431         do {                                            \
432                 data = emc_readl(EMC_XM2DQSPADCTRL2); \
433                 if ((next_timing->burst_regs[EMC_##reg##_INDEX] &       \
434                      EMC_##reg##_##bit##_ENABLE) &&                     \
435                         (!(data &       \
436                        EMC_##reg##_##bit##_ENABLE)))   {                \
437                                 emc_writel(data \
438                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg); \
439                         pr_debug("dqs preset: presetting rx_ft_rec\n"); \
440                         ret = true;                                     \
441                 }                                                       \
442         } while (0)
443         DQS_SET(XM2DQSPADCTRL2, VREF);
444         DQS_SET(XM2DQSPADCTRL2, RX_FT_REC);
445
446         return ret;
447 }
448
449 static inline void overwrite_mrs_wait_cnt(
450         const struct tegra12_emc_table *next_timing,
451         bool zcal_long)
452 {
453         u32 reg;
454         u32 cnt = 512;
455
456         /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
457            for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
458            expected operation length. Reduce the latter by the overlapping
459            zq-calibration, if any */
460         if (zcal_long)
461                 cnt -= dram_dev_num * 256;
462
463         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
464                 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
465                 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
466         if (cnt < reg)
467                 cnt = reg;
468
469         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
470                 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
471         reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
472                 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
473
474         emc_writel(reg, EMC_MRS_WAIT_CNT);
475 }
476
477 static inline int get_dll_change(const struct tegra12_emc_table *next_timing,
478                                  const struct tegra12_emc_table *last_timing)
479 {
480         bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
481         bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
482
483         if (next_dll_enabled == last_dll_enabled)
484                 return DLL_CHANGE_NONE;
485         else if (next_dll_enabled)
486                 return DLL_CHANGE_ON;
487         else
488                 return DLL_CHANGE_OFF;
489 }
490
491 static inline void set_dram_mode(const struct tegra12_emc_table *next_timing,
492                                  const struct tegra12_emc_table *last_timing,
493                                  int dll_change)
494 {
495         if (dram_type == DRAM_TYPE_DDR3) {
496                 /* first mode_1, then mode_2, then mode_reset*/
497                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
498                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
499                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
500                         ccfifo_writel(next_timing->emc_mode_2, EMC_EMRS2);
501
502                 if ((next_timing->emc_mode_reset !=
503                      last_timing->emc_mode_reset) ||
504                     (dll_change == DLL_CHANGE_ON)) {
505                         u32 reg = next_timing->emc_mode_reset &
506                                 (~EMC_MODE_SET_DLL_RESET);
507                         if (dll_change == DLL_CHANGE_ON) {
508                                 reg |= EMC_MODE_SET_DLL_RESET;
509                                 reg |= EMC_MODE_SET_LONG_CNT;
510                         }
511                         ccfifo_writel(reg, EMC_MRS);
512                 }
513         } else {
514                 /* first mode_2, then mode_1; mode_reset is not applicable */
515                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
516                         ccfifo_writel(next_timing->emc_mode_2, EMC_MRW2);
517                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
518                         ccfifo_writel(next_timing->emc_mode_1, EMC_MRW);
519                 if (next_timing->emc_mode_4 != last_timing->emc_mode_4)
520                         ccfifo_writel(next_timing->emc_mode_4, EMC_MRW4);
521         }
522 }
523
524 static inline void do_clock_change(u32 clk_setting)
525 {
526         int err;
527
528         mc_readl(MC_EMEM_ADR_CFG);      /* completes prev writes */
529         emc_readl(EMC_INTSTATUS);
530
531         writel(clk_setting,
532                 (void __iomem *)((u32)clk_base + emc->reg));
533         readl((void __iomem *)((u32)clk_base + emc->reg));
534                                 /* completes prev write */
535
536         err = wait_for_update(EMC_INTSTATUS,
537                               EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
538         if (err) {
539                 pr_err("%s: clock change completion error: %d", __func__, err);
540                 BUG();
541         }
542 }
543
544 static noinline void emc_set_clock(const struct tegra12_emc_table *next_timing,
545                                    const struct tegra12_emc_table *last_timing,
546                                    u32 clk_setting)
547 {
548 #ifndef EMULATE_CLOCK_SWITCH
549         int i, dll_change, pre_wait, ctt_term_changed;
550         bool dyn_sref_enabled, zcal_long;
551
552         u32 emc_cfg_reg = emc_readl(EMC_CFG);
553         u32 emc_cfg_2_reg = emc_readl(EMC_CFG_2);
554
555         dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
556         dll_change = get_dll_change(next_timing, last_timing);
557         zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
558                 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
559
560         /* 1. clear clkchange_complete interrupts */
561         emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
562
563
564         /* 1.5 On t124,  prelock the DLL - assuming the DLL is enabled. */
565         /* TODO: implement. */
566
567         /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
568            possible self-refresh entry/exit and/or dqs vref settled - waiting
569            before the clock change decreases worst case change stall time */
570         pre_wait = 0;
571         if (dyn_sref_enabled) {
572                 emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
573                 emc_writel(emc_cfg_reg, EMC_CFG);
574                 pre_wait = 5;           /* 5us+ for self-refresh entry/exit */
575         }
576
577         /* 2.5 check dq/dqs vref delay */
578         if (dqs_preset(next_timing, last_timing)) {
579                 if (pre_wait < 30)
580                         pre_wait = 30;  /* 3us+ for dqs vref settled */
581         }
582
583         /* 2.6 Program CTT_TERM Control if it changed since last time*/
584         /* PLACE HOLDER FOR NOW , CODE TO BE ADDED
585         Bug-1258083, software hack for updating EMC_CCT_TERM_CTRL
586         /term-slope,offset values instantly*/
587         ctt_term_changed = (last_timing->emc_ctt_term_ctrl
588                                 != next_timing->emc_ctt_term_ctrl);
589         if (last_timing->emc_ctt_term_ctrl !=
590                         next_timing->emc_ctt_term_ctrl) {
591                         auto_cal_disable();
592                         emc_writel(next_timing->emc_ctt_term_ctrl,
593                                 EMC_CTT_TERM_CTRL);
594         }
595
596         if (pre_wait || ctt_term_changed) {
597                 emc_timing_update();
598                 udelay(pre_wait);
599         }
600
601         /* 3. disable auto-cal if vref mode is switching - removed */
602
603         /* 4. program burst shadow registers */
604         for (i = 0; i < next_timing->burst_regs_num; i++) {
605                 if (!burst_reg_addr[i])
606                         continue;
607                 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
608         }
609
610         emc_cfg_reg &= ~EMC_CFG_UPDATE_MASK;
611         emc_cfg_reg |= next_timing->emc_cfg & EMC_CFG_UPDATE_MASK;
612         emc_writel(emc_cfg_reg, EMC_CFG);
613         wmb();
614         barrier();
615
616         /* 4.1 On ddr3 when DLL is re-started predict MRS long wait count and
617            overwrite DFS table setting  */
618         if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
619                 overwrite_mrs_wait_cnt(next_timing, zcal_long);
620
621         /* 5.2 disable auto-refresh to save time after clock change */
622         emc_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
623
624         /* 5.3 post cfg_2 write and dis ob clock gate */
625         emc_cfg_2_reg = next_timing->emc_cfg_2;
626
627         if (emc_cfg_2_reg & EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR)
628                 emc_cfg_2_reg &= ~EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR;
629         ccfifo_writel(emc_cfg_2_reg, EMC_CFG_2);
630
631         /* 5.4 program sel_dpd */
632         ccfifo_writel(next_timing->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
633
634         /* 6. turn Off dll and enter self-refresh on DDR3  */
635         if (dram_type == DRAM_TYPE_DDR3) {
636                 if (dll_change == DLL_CHANGE_OFF)
637                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
638                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num) |
639                               EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
640         }
641
642         /* 7. flow control marker 2 */
643         ccfifo_writel(1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
644
645         /* 8. exit self-refresh on DDR3 */
646         if (dram_type == DRAM_TYPE_DDR3)
647                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
648
649         /* 9. set dram mode registers */
650         set_dram_mode(next_timing, last_timing, dll_change);
651
652         /* 10. issue zcal command if turning zcal On */
653         if (zcal_long) {
654                 ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
655                 if (dram_dev_num > 1)
656                         ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
657         }
658
659         /* 10.1 dummy write to RO register to remove stall after change */
660         ccfifo_writel(0, EMC_CCFIFO_STATUS);
661
662
663         /* 11.1 DIS_STP_OB_CLK_DURING_NON_WR ->0 */
664         if (next_timing->emc_cfg_2 & EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR) {
665                 emc_cfg_2_reg = next_timing->emc_cfg_2;
666                 ccfifo_writel(emc_cfg_2_reg, EMC_CFG_2);
667         }
668
669         /* 11.5 program burst_up_down registers if emc rate is going down */
670         if (next_timing->rate < last_timing->rate) {
671                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
672                         __raw_writel(next_timing->burst_up_down_regs[i],
673                                 burst_up_down_reg_addr[i]);
674                 wmb();
675         }
676
677         /* 12-14. read any MC register to ensure the programming is done
678            change EMC clock source register wait for clk change completion */
679         do_clock_change(clk_setting);
680
681         /* 14.1 re-enable auto-refresh */
682         emc_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
683
684         /* 14.2 program burst_up_down registers if emc rate is going up */
685         if (next_timing->rate > last_timing->rate) {
686                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
687                         __raw_writel(next_timing->burst_up_down_regs[i],
688                                 burst_up_down_reg_addr[i]);
689                 wmb();
690         }
691
692         /* 15. restore auto-cal */
693         if (last_timing->emc_ctt_term_ctrl != next_timing->emc_ctt_term_ctrl)
694                 emc_writel(next_timing->emc_acal_interval,
695                         EMC_AUTO_CAL_INTERVAL);
696
697         /* 16. restore dynamic self-refresh */
698         if (next_timing->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
699                 emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
700                 emc_writel(emc_cfg_reg, EMC_CFG);
701         }
702
703         /* 17. set zcal wait count */
704         emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
705
706         /* 18. update restored timing */
707         udelay(2);
708         emc_timing_update();
709 #else
710         /* FIXME: implement */
711         pr_info("tegra12_emc: Configuring EMC rate %lu (setting: 0x%x)\n",
712                 next_timing->rate, clk_setting);
713 #endif
714 }
715
716 static inline void emc_get_timing(struct tegra12_emc_table *timing)
717 {
718         int i;
719
720         /* Burst updates depends on previous state; burst_up_down are
721          * stateless. */
722         for (i = 0; i < timing->burst_regs_num; i++) {
723                 if (burst_reg_addr[i])
724                         timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
725                 else
726                         timing->burst_regs[i] = 0;
727         }
728         timing->emc_acal_interval = 0;
729         timing->emc_zcal_cnt_long = 0;
730         timing->emc_mode_reset = 0;
731         timing->emc_mode_1 = 0;
732         timing->emc_mode_2 = 0;
733         timing->emc_mode_4 = 0;
734         timing->emc_cfg = emc_readl(EMC_CFG);
735         timing->rate = clk_get_rate_locked(emc) / 1000;
736 }
737
738 /* The EMC registers have shadow registers. When the EMC clock is updated
739  * in the clock controller, the shadow registers are copied to the active
740  * registers, allowing glitchless memory bus frequency changes.
741  * This function updates the shadow registers for a new clock frequency,
742  * and relies on the clock lock on the emc clock to avoid races between
743  * multiple frequency changes. In addition access lock prevents concurrent
744  * access to EMC registers from reading MRR registers */
745 int tegra_emc_set_rate(unsigned long rate)
746 {
747         int i;
748         u32 clk_setting;
749         const struct tegra12_emc_table *last_timing;
750         unsigned long flags;
751         s64 last_change_delay;
752
753         if (!tegra_emc_table)
754                 return -EINVAL;
755
756         /* Table entries specify rate in kHz */
757         rate = rate / 1000;
758
759         i = get_start_idx(rate);
760         for (; i < tegra_emc_table_size; i++) {
761                 if (tegra_emc_clk_sel[i].input == NULL)
762                         continue;       /* invalid entry */
763
764                 if (tegra_emc_table[i].rate == rate)
765                         break;
766         }
767
768         if (i >= tegra_emc_table_size)
769                 return -EINVAL;
770
771         if (!emc_timing) {
772                 /* can not assume that boot timing matches dfs table even
773                    if boot frequency matches one of the table nodes */
774                 emc_get_timing(&start_timing);
775                 last_timing = &start_timing;
776         } else
777                 last_timing = emc_timing;
778
779         clk_setting = tegra_emc_clk_sel[i].value;
780
781         last_change_delay = ktime_us_delta(ktime_get(), clkchange_time);
782         if ((last_change_delay >= 0) && (last_change_delay < clkchange_delay))
783                 udelay(clkchange_delay - (int)last_change_delay);
784
785         spin_lock_irqsave(&emc_access_lock, flags);
786         emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
787         clkchange_time = ktime_get();
788         emc_timing = &tegra_emc_table[i];
789         spin_unlock_irqrestore(&emc_access_lock, flags);
790
791         emc_last_stats_update(i);
792
793         pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
794
795         return 0;
796 }
797
798 long tegra_emc_round_rate_updown(unsigned long rate, bool up)
799 {
800         int i;
801         unsigned long table_rate;
802
803         if (!tegra_emc_table)
804                 return clk_get_rate_locked(emc); /* no table - no rate change */
805
806         if (!emc_enable)
807                 return -EINVAL;
808
809         pr_debug("%s: %lu\n", __func__, rate);
810
811         /* Table entries specify rate in kHz */
812         rate = rate / 1000;
813
814         i = get_start_idx(rate);
815         for (; i < tegra_emc_table_size; i++) {
816                 if (tegra_emc_clk_sel[i].input == NULL)
817                         continue;       /* invalid entry */
818
819                 table_rate = tegra_emc_table[i].rate;
820                 if (table_rate >= rate) {
821                         if (!up && i && (table_rate > rate)) {
822                                 i--;
823                                 table_rate = tegra_emc_table[i].rate;
824                         }
825                         pr_debug("%s: using %lu\n", __func__, table_rate);
826                         last_round_idx = i;
827                         return table_rate * 1000;
828                 }
829         }
830
831         return -EINVAL;
832 }
833
834 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
835 {
836         int i;
837
838         if (!tegra_emc_table) {
839                 if (rate == clk_get_rate_locked(emc)) {
840                         *div_value = emc->div - 2;
841                         return emc->parent;
842                 }
843                 return NULL;
844         }
845
846         pr_debug("%s: %lu\n", __func__, rate);
847
848         /* Table entries specify rate in kHz */
849         rate = rate / 1000;
850
851         i = get_start_idx(rate);
852         for (; i < tegra_emc_table_size; i++) {
853                 if (tegra_emc_table[i].rate == rate) {
854                         struct clk *p = tegra_emc_clk_sel[i].input;
855
856                         if (p && (tegra_emc_clk_sel[i].input_rate ==
857                                   clk_get_rate(p))) {
858                                 *div_value = (tegra_emc_clk_sel[i].value &
859                                         EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
860                                 return p;
861                         }
862                 }
863         }
864         return NULL;
865 }
866
867 bool tegra_emc_is_parent_ready(unsigned long rate, struct clk **parent,
868                 unsigned long *parent_rate, unsigned long *backup_rate)
869 {
870
871         int i;
872         struct clk *p = NULL;
873         unsigned long p_rate = 0;
874
875         if (!tegra_emc_table || !emc_enable)
876                 return true;
877
878         pr_debug("%s: %lu\n", __func__, rate);
879
880         /* Table entries specify rate in kHz */
881         rate = rate / 1000;
882
883         i = get_start_idx(rate);
884         for (; i < tegra_emc_table_size; i++) {
885                 if (tegra_emc_table[i].rate == rate) {
886                         p = tegra_emc_clk_sel[i].input;
887                         if (!p)
888                                 continue;       /* invalid entry */
889
890                         p_rate = tegra_emc_clk_sel[i].input_rate;
891                         if (p_rate == clk_get_rate(p))
892                                 return true;
893                         break;
894                 }
895         }
896
897         /* Table match not found - "non existing parent" is ready */
898         if (!p)
899                 return true;
900
901 #ifdef CONFIG_TEGRA_PLLM_SCALED
902         /*
903          * Table match found, but parent is not ready - check if backup entry
904          * was found during initialization, and return the respective backup
905          * rate
906          */
907         if (emc->shared_bus_backup.input &&
908             (emc->shared_bus_backup.input != p)) {
909                 *parent = p;
910                 *parent_rate = p_rate;
911                 *backup_rate = emc->shared_bus_backup.bus_rate;
912                 return false;
913         }
914 #else
915         /*
916          * Table match found, but parent is not ready - continue search
917          * for backup rate: min rate above requested that has different
918          * parent source (since only pll_c is scaled and may not be ready,
919          * any other parent can provide backup)
920          */
921         *parent = p;
922         *parent_rate = p_rate;
923
924         for (i++; i < tegra_emc_table_size; i++) {
925                 p = tegra_emc_clk_sel[i].input;
926                 if (!p)
927                         continue;       /* invalid entry */
928
929                 if (p != (*parent)) {
930                         *backup_rate = tegra_emc_table[i].rate * 1000;
931                         return false;
932                 }
933         }
934 #endif
935         /* Parent is not ready, and no backup found */
936         *backup_rate = -EINVAL;
937         return false;
938 }
939
940 static inline const struct clk_mux_sel *get_emc_input(u32 val)
941 {
942         const struct clk_mux_sel *sel;
943
944         for (sel = emc->inputs; sel->input != NULL; sel++) {
945                 if (sel->value == val)
946                         break;
947         }
948         return sel;
949 }
950
951 static int find_matching_input(const struct tegra12_emc_table *table,
952         struct clk *pll_c, struct clk *pll_m, struct emc_sel *emc_clk_sel)
953 {
954         u32 div_value = (table->src_sel_reg & EMC_CLK_DIV_MASK) >>
955                 EMC_CLK_DIV_SHIFT;
956         u32 src_value = (table->src_sel_reg & EMC_CLK_SOURCE_MASK) >>
957                 EMC_CLK_SOURCE_SHIFT;
958         unsigned long input_rate = 0;
959         unsigned long table_rate = table->rate * 1000; /* table rate in kHz */
960         const struct clk_mux_sel *sel = get_emc_input(src_value);
961
962 #ifdef CONFIG_TEGRA_PLLM_SCALED
963         struct clk *scalable_pll = pll_m;
964 #else
965         struct clk *scalable_pll = pll_c;
966 #endif
967         pr_info_once("tegra: %s is selected as scalable EMC clock source\n",
968                      scalable_pll->name);
969
970         if (div_value & 0x1) {
971                 pr_warn("tegra: invalid odd divider for EMC rate %lu\n",
972                         table_rate);
973                 return -EINVAL;
974         }
975         if (!sel->input) {
976                 pr_warn("tegra: no matching input found for EMC rate %lu\n",
977                         table_rate);
978                 return -EINVAL;
979         }
980         if (div_value && (table->src_sel_reg & EMC_CLK_LOW_JITTER_ENABLE)) {
981                 pr_warn("tegra: invalid LJ path for EMC rate %lu\n",
982                         table_rate);
983                 return -EINVAL;
984         }
985         if (!(table->src_sel_reg & EMC_CLK_MC_SAME_FREQ) !=
986             !(MC_EMEM_ARB_MISC0_EMC_SAME_FREQ &
987               table->burst_regs[MC_EMEM_ARB_MISC0_INDEX])) {
988                 pr_warn("tegra: ambiguous EMC to MC ratio for EMC rate %lu\n",
989                         table_rate);
990                 return -EINVAL;
991         }
992
993 #ifndef CONFIG_TEGRA_DUAL_CBUS
994         if (sel->input == pll_c) {
995                 pr_warn("tegra: %s is cbus source: no EMC rate %lu support\n",
996                         sel->input->name, table_rate);
997                 return -EINVAL;
998         }
999 #endif
1000
1001         if (sel->input == scalable_pll) {
1002                 input_rate = table_rate * (1 + div_value / 2);
1003         } else {
1004                 /* all other sources are fixed, must exactly match the rate */
1005                 input_rate = clk_get_rate(sel->input);
1006                 if (input_rate != (table_rate * (1 + div_value / 2))) {
1007                         pr_warn("tegra: EMC rate %lu does not match %s rate %lu\n",
1008                                 table_rate, sel->input->name, input_rate);
1009                         return -EINVAL;
1010                 }
1011         }
1012
1013 #ifdef CONFIG_TEGRA_PLLM_SCALED
1014                 if (sel->input == pll_c) {
1015                         /* maybe overwritten in a loop - end up at max rate
1016                            from pll_c */
1017                         emc->shared_bus_backup.input = pll_c;
1018                         emc->shared_bus_backup.bus_rate = table_rate;
1019                 }
1020 #endif
1021         /* Get ready emc clock selection settings for this table rate */
1022         emc_clk_sel->input = sel->input;
1023         emc_clk_sel->input_rate = input_rate;
1024         emc_clk_sel->value = table->src_sel_reg;
1025
1026         return 0;
1027 }
1028
1029
1030 static int emc_core_millivolts[MAX_DVFS_FREQS];
1031
1032 static void adjust_emc_dvfs_table(const struct tegra12_emc_table *table,
1033                                   int table_size)
1034 {
1035         int i, j, mv;
1036         unsigned long rate;
1037
1038         BUG_ON(table_size > MAX_DVFS_FREQS);
1039
1040         for (i = 0, j = 0; j < table_size; j++) {
1041                 if (tegra_emc_clk_sel[j].input == NULL)
1042                         continue;       /* invalid entry */
1043
1044                 rate = table[j].rate * 1000;
1045                 mv = table[j].emc_min_mv;
1046
1047                 if ((i == 0) || (mv > emc_core_millivolts[i-1])) {
1048                         /* advance: voltage has increased */
1049                         emc->dvfs->freqs[i] = rate;
1050                         emc_core_millivolts[i] = mv;
1051                         i++;
1052                 } else {
1053                         /* squash: voltage has not increased */
1054                         emc->dvfs->freqs[i-1] = rate;
1055                 }
1056         }
1057
1058         emc->dvfs->millivolts = emc_core_millivolts;
1059         emc->dvfs->num_freqs = i;
1060 }
1061
1062 #ifdef CONFIG_TEGRA_PLLM_SCALED
1063 /* When pll_m is scaled, pll_c must provide backup rate;
1064    if not - remove rates that require pll_m scaling */
1065 static int purge_emc_table(unsigned long max_rate)
1066 {
1067         int i;
1068         int ret = 0;
1069
1070         if (emc->shared_bus_backup.input)
1071                 return ret;
1072
1073         pr_warn("tegra: selected pll_m scaling option but no backup source:\n");
1074         pr_warn("       removed not supported entries from the table:\n");
1075
1076         /* made all entries with non matching rate invalid */
1077         for (i = 0; i < tegra_emc_table_size; i++) {
1078                 struct emc_sel *sel = &tegra_emc_clk_sel[i];
1079                 if (sel->input) {
1080                         if (clk_get_rate(sel->input) != sel->input_rate) {
1081                                 pr_warn("       EMC rate %lu\n",
1082                                         tegra_emc_table[i].rate * 1000);
1083                                 sel->input = NULL;
1084                                 sel->input_rate = 0;
1085                                 sel->value = 0;
1086                                 if (max_rate == tegra_emc_table[i].rate)
1087                                         ret = -EINVAL;
1088                         }
1089                 }
1090         }
1091         return ret;
1092 }
1093 #else
1094 /* When pll_m is fixed @ max EMC rate, it always provides backup for pll_c */
1095 #define purge_emc_table(max_rate) (0)
1096 #endif
1097
1098 static int init_emc_table(const struct tegra12_emc_table *table, int table_size)
1099 {
1100         int i, mv;
1101         u32 reg;
1102         bool max_entry = false;
1103         bool emc_max_dvfs_sel = get_emc_max_dvfs();
1104         unsigned long boot_rate, max_rate;
1105         struct clk *pll_c = tegra_get_clock_by_name("pll_c");
1106         struct clk *pll_m = tegra_get_clock_by_name("pll_m");
1107
1108         emc_stats.clkchange_count = 0;
1109         spin_lock_init(&emc_stats.spinlock);
1110         emc_stats.last_update = get_jiffies_64();
1111         emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
1112
1113         if (dram_type != DRAM_TYPE_DDR3) {
1114                 pr_err("tegra: not supported DRAM type %u\n", dram_type);
1115                 return -ENODATA;
1116         }
1117
1118         if (!table || !table_size) {
1119                 pr_err("tegra: EMC DFS table is empty\n");
1120                 return -ENODATA;
1121         }
1122
1123         boot_rate = clk_get_rate(emc) / 1000;
1124         max_rate = boot_rate;
1125
1126         tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
1127         switch (table[0].rev) {
1128         case 0x14:
1129         case 0x15:
1130                 start_timing.burst_regs_num = table[0].burst_regs_num;
1131                 break;
1132         default:
1133                 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
1134                         table[0].rev);
1135                 return -ENODATA;
1136         }
1137
1138         /* Match EMC source/divider settings with table entries */
1139         for (i = 0; i < tegra_emc_table_size; i++) {
1140                 unsigned long table_rate = table[i].rate;
1141
1142                 /* Stop: "no-rate" entry, or entry violating ascending order */
1143                 if (!table_rate || (i && ((table_rate <= table[i-1].rate) ||
1144                         (table[i].emc_min_mv < table[i-1].emc_min_mv)))) {
1145                         pr_warn("tegra: EMC rate entry %lu is not ascending\n",
1146                                 table_rate);
1147                         break;
1148                 }
1149
1150                 BUG_ON(table[i].rev != table[0].rev);
1151
1152                 if (find_matching_input(&table[i], pll_c, pll_m,
1153                                         &tegra_emc_clk_sel[i]))
1154                         continue;
1155
1156                 if (table_rate == boot_rate)
1157                         emc_stats.last_sel = i;
1158
1159                 if (emc_max_dvfs_sel) {
1160                         /* EMC max rate = max table entry above boot rate */
1161                         if (table_rate >= max_rate) {
1162                                 max_rate = table_rate;
1163                                 max_entry = true;
1164                         }
1165                 } else if (table_rate == max_rate) {
1166                         /* EMC max rate = boot rate */
1167                         max_entry = true;
1168                         break;
1169                 }
1170         }
1171
1172         /* Validate EMC rate and voltage limits */
1173         if (!max_entry) {
1174                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
1175                        " %lu kHz is not found\n", max_rate);
1176                 return -ENODATA;
1177         }
1178
1179         tegra_emc_table = table;
1180
1181         /*
1182          * Purge rates that cannot be reached because table does not specify
1183          * proper backup source. If maximum rate was purged, fall back on boot
1184          * rate as maximum limit. In any case propagate new maximum limit
1185          * down stream to shared users, and check it against nominal voltage.
1186          */
1187         if (purge_emc_table(max_rate))
1188                 max_rate = boot_rate;
1189         tegra_init_max_rate(emc, max_rate * 1000);
1190
1191         if (emc->dvfs) {
1192                 adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
1193                 mv = tegra_dvfs_predict_millivolts(emc, max_rate * 1000);
1194                 if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
1195                         tegra_emc_table = NULL;
1196                         pr_err("tegra: invalid EMC DFS table: maximum rate %lu"
1197                                " kHz does not match nominal voltage %d\n",
1198                                max_rate, emc->dvfs->max_millivolts);
1199                         return -ENODATA;
1200                 }
1201         }
1202
1203         pr_info("tegra: validated EMC DFS table\n");
1204
1205         /* Configure clock change mode according to dram type */
1206         reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
1207         reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
1208                 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
1209         emc_writel(reg, EMC_CFG_2);
1210         return 0;
1211 }
1212
1213 static int tegra12_emc_probe(struct platform_device *pdev)
1214 {
1215         struct tegra12_emc_pdata *pdata;
1216         struct resource *res;
1217
1218         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1219         if (!res) {
1220                 dev_err(&pdev->dev, "missing register base\n");
1221                 return -ENOMEM;
1222         }
1223
1224         pdata = pdev->dev.platform_data;
1225         if (!pdata) {
1226                 dev_err(&pdev->dev, "missing platform data\n");
1227                 return -ENODATA;
1228         }
1229
1230         return init_emc_table(pdata->tables, pdata->num_tables);
1231 }
1232
1233 static struct platform_driver tegra12_emc_driver = {
1234         .driver         = {
1235                 .name   = "tegra-emc",
1236                 .owner  = THIS_MODULE,
1237         },
1238         .probe          = tegra12_emc_probe,
1239 };
1240
1241 int __init tegra12_emc_init(void)
1242 {
1243         int ret = platform_driver_register(&tegra12_emc_driver);
1244
1245         if (!ret) {
1246                 tegra_emc_iso_usage_table_init(tegra12_emc_iso_usage,
1247                                 ARRAY_SIZE(tegra12_emc_iso_usage));
1248                 if (emc_enable) {
1249                         unsigned long rate = tegra_emc_round_rate_updown(
1250                                 emc->boot_rate, false);
1251                         if (!IS_ERR_VALUE(rate))
1252                                 tegra_clk_preset_emc_monitor(rate);
1253                 }
1254         }
1255         return ret;
1256 }
1257
1258 void tegra_emc_timing_invalidate(void)
1259 {
1260         emc_timing = NULL;
1261 }
1262
1263 void tegra_emc_dram_type_init(struct clk *c)
1264 {
1265         emc = c;
1266
1267         dram_type = (emc_readl(EMC_FBIO_CFG5) &
1268                      EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
1269
1270         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
1271 }
1272
1273 int tegra_emc_get_dram_type(void)
1274 {
1275         return dram_type;
1276 }
1277
1278 static u32 soc_to_dram_bit_swap(u32 soc_val, u32 dram_mask, u32 dram_shift)
1279 {
1280         int bit;
1281         u32 dram_val = 0;
1282
1283         /* tegra clocks definitions use shifted mask always */
1284         if (!dram_to_soc_bit_map)
1285                 return soc_val & dram_mask;
1286
1287         for (bit = dram_shift; bit < 32; bit++) {
1288                 u32 dram_bit_mask = 0x1 << bit;
1289                 u32 soc_bit_mask = dram_to_soc_bit_map[bit];
1290
1291                 if (!(dram_bit_mask & dram_mask))
1292                         break;
1293
1294                 if (soc_bit_mask & soc_val)
1295                         dram_val |= dram_bit_mask;
1296         }
1297
1298         return dram_val;
1299 }
1300
1301 static int emc_read_mrr(int dev, int addr)
1302 {
1303         int ret;
1304         u32 val, emc_cfg;
1305
1306         if (dram_type != DRAM_TYPE_LPDDR2)
1307                 return -ENODEV;
1308
1309         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, false);
1310         if (ret)
1311                 return ret;
1312
1313         emc_cfg = emc_readl(EMC_CFG);
1314         if (emc_cfg & EMC_CFG_DRAM_ACPD) {
1315                 emc_writel(emc_cfg & ~EMC_CFG_DRAM_ACPD, EMC_CFG);
1316                 emc_timing_update();
1317         }
1318
1319         val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
1320         val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
1321         emc_writel(val, EMC_MRR);
1322
1323         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, true);
1324         if (emc_cfg & EMC_CFG_DRAM_ACPD) {
1325                 emc_writel(emc_cfg, EMC_CFG);
1326                 emc_timing_update();
1327         }
1328         if (ret)
1329                 return ret;
1330
1331         val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
1332         return val;
1333 }
1334
1335 int tegra_emc_get_dram_temperature(void)
1336 {
1337         int mr4;
1338         unsigned long flags;
1339
1340         spin_lock_irqsave(&emc_access_lock, flags);
1341
1342         mr4 = emc_read_mrr(0, 4);
1343         if (IS_ERR_VALUE(mr4)) {
1344                 spin_unlock_irqrestore(&emc_access_lock, flags);
1345                 return mr4;
1346         }
1347         spin_unlock_irqrestore(&emc_access_lock, flags);
1348
1349         mr4 = soc_to_dram_bit_swap(
1350                 mr4, LPDDR2_MR4_TEMP_MASK, LPDDR2_MR4_TEMP_SHIFT);
1351         return mr4;
1352 }
1353
1354 #ifdef CONFIG_DEBUG_FS
1355
1356 static struct dentry *emc_debugfs_root;
1357
1358 static int emc_stats_show(struct seq_file *s, void *data)
1359 {
1360         int i;
1361
1362         emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
1363
1364         seq_printf(s, "%-10s %-10s\n", "rate kHz", "time");
1365         for (i = 0; i < tegra_emc_table_size; i++) {
1366                 if (tegra_emc_clk_sel[i].input == NULL)
1367                         continue;       /* invalid entry */
1368
1369                 seq_printf(s, "%-10lu %-10llu\n", tegra_emc_table[i].rate,
1370                         cputime64_to_clock_t(emc_stats.time_at_clock[i]));
1371         }
1372         seq_printf(s, "%-15s %llu\n", "transitions:",
1373                    emc_stats.clkchange_count);
1374         seq_printf(s, "%-15s %llu\n", "time-stamp:",
1375                    cputime64_to_clock_t(emc_stats.last_update));
1376
1377         return 0;
1378 }
1379
1380 static int emc_stats_open(struct inode *inode, struct file *file)
1381 {
1382         return single_open(file, emc_stats_show, inode->i_private);
1383 }
1384
1385 static const struct file_operations emc_stats_fops = {
1386         .open           = emc_stats_open,
1387         .read           = seq_read,
1388         .llseek         = seq_lseek,
1389         .release        = single_release,
1390 };
1391
1392 static int emc_table_info_show(struct seq_file *s, void *data)
1393 {
1394         int i;
1395         for (i = 0; i < tegra_emc_table_size; i++) {
1396                 if (tegra_emc_clk_sel[i].input == NULL)
1397                         continue;
1398                 seq_printf(s, "Table info:\n   Rev: 0x%02x\n"
1399                 "   Table ID: %s\n", tegra_emc_table[i].rev,
1400                 tegra_emc_table[i].table_id);
1401                 seq_printf(s, "    %lu\n", tegra_emc_table[i].rate);
1402         }
1403
1404         return 0;
1405 }
1406
1407 static int emc_table_info_open(struct inode *inode, struct file *file)
1408 {
1409         return single_open(file, emc_table_info_show, inode->i_private);
1410 }
1411
1412 static const struct file_operations emc_table_info_fops = {
1413         .open           = emc_table_info_open,
1414         .read           = seq_read,
1415         .llseek         = seq_lseek,
1416         .release        = single_release,
1417 };
1418
1419 static int dram_temperature_get(void *data, u64 *val)
1420 {
1421         *val = tegra_emc_get_dram_temperature();
1422         return 0;
1423 }
1424 DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
1425                         NULL, "%lld\n");
1426
1427 static int efficiency_get(void *data, u64 *val)
1428 {
1429         *val = tegra_emc_bw_efficiency;
1430         return 0;
1431 }
1432 static int efficiency_set(void *data, u64 val)
1433 {
1434         tegra_emc_bw_efficiency = (val > 100) ? 100 : val;
1435         if (emc)
1436                 tegra_clk_shared_bus_update(emc);
1437
1438         return 0;
1439 }
1440 DEFINE_SIMPLE_ATTRIBUTE(efficiency_fops, efficiency_get,
1441                         efficiency_set, "%llu\n");
1442
1443 static int __init tegra_emc_debug_init(void)
1444 {
1445         if (!tegra_emc_table)
1446                 return 0;
1447
1448         emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
1449         if (!emc_debugfs_root)
1450                 return -ENOMEM;
1451
1452         if (!debugfs_create_file(
1453                 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
1454                 goto err_out;
1455
1456         if (!debugfs_create_u32("clkchange_delay", S_IRUGO | S_IWUSR,
1457                 emc_debugfs_root, (u32 *)&clkchange_delay))
1458                 goto err_out;
1459
1460         if (!debugfs_create_file("dram_temperature", S_IRUGO, emc_debugfs_root,
1461                                  NULL, &dram_temperature_fops))
1462                 goto err_out;
1463
1464         if (!debugfs_create_file("efficiency", S_IRUGO | S_IWUSR,
1465                                  emc_debugfs_root, NULL, &efficiency_fops))
1466                 goto err_out;
1467
1468
1469         if (tegra_emc_iso_usage_debugfs_init(emc_debugfs_root))
1470                 goto err_out;
1471
1472         if (!debugfs_create_file("table_info", S_IRUGO,
1473                                  emc_debugfs_root, NULL, &emc_table_info_fops))
1474                 goto err_out;
1475
1476         return 0;
1477
1478 err_out:
1479         debugfs_remove_recursive(emc_debugfs_root);
1480         return -ENOMEM;
1481 }
1482
1483 late_initcall(tegra_emc_debug_init);
1484 #endif