ARM: tegra: loki: fix build error due to warning
[linux-3.10.git] / arch / arm / mach-tegra / tegra12_emc.c
1 /*
2  * arch/arm/mach-tegra/tegra12_emc.c
3  *
4  * Copyright (c) 2013, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
18  *
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/clk.h>
23 #include <linux/err.h>
24 #include <linux/io.h>
25 #include <linux/module.h>
26 #include <linux/delay.h>
27 #include <linux/platform_device.h>
28 #include <linux/platform_data/tegra_emc.h>
29 #include <linux/debugfs.h>
30 #include <linux/seq_file.h>
31 #include <linux/hrtimer.h>
32
33 #include <asm/cputime.h>
34
35 #include "clock.h"
36 #include "board.h"
37 #include "dvfs.h"
38 #include "iomap.h"
39 #include "tegra12_emc.h"
40
41 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
42 static bool emc_enable = true;
43 #else
44 static bool emc_enable;
45 #endif
46 module_param(emc_enable, bool, 0644);
47
48 u8 tegra_emc_bw_efficiency = 100;
49
50 static struct emc_iso_usage tegra12_emc_iso_usage[] = {
51         { BIT(EMC_USER_DC1),                     80 },
52         { BIT(EMC_USER_DC2),                     80 },
53         { BIT(EMC_USER_DC1) | BIT(EMC_USER_DC2),        50 },
54         { BIT(EMC_USER_DC1) | BIT(EMC_USER_VI),  50 },
55         { BIT(EMC_USER_DC2) | BIT(EMC_USER_VI),  50 },
56 };
57
58 #define PLL_C_DIRECT_FLOOR              333500000
59 #define EMC_STATUS_UPDATE_TIMEOUT       100
60 #define TEGRA_EMC_TABLE_MAX_SIZE        16
61
62 enum {
63         DLL_CHANGE_NONE = 0,
64         DLL_CHANGE_ON,
65         DLL_CHANGE_OFF,
66 };
67
68 #define EMC_CLK_DIV_SHIFT               0
69 #define EMC_CLK_DIV_MASK                (0xFF << EMC_CLK_DIV_SHIFT)
70 #define EMC_CLK_SOURCE_SHIFT            29
71 #define EMC_CLK_SOURCE_MASK             (0x7 << EMC_CLK_SOURCE_SHIFT)
72 #define EMC_CLK_LOW_JITTER_ENABLE       (0x1 << 31)
73 #define EMC_CLK_MC_SAME_FREQ            (0x1 << 16)
74
75 #define BURST_REG_LIST \
76         DEFINE_REG(TEGRA_EMC_BASE, EMC_RC),                     \
77         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC),                    \
78         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC_SLR),                \
79         DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS),                    \
80         DEFINE_REG(TEGRA_EMC_BASE, EMC_RP),                     \
81         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W),                    \
82         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R),                    \
83         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P),                    \
84         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P),                    \
85         DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD),                 \
86         DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD),                 \
87         DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD),                    \
88         DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT),                   \
89         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT),                   \
90         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV),                    \
91         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV_MASK),               \
92         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE),                   \
93         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_WIDTH),             \
94         DEFINE_REG(TEGRA_EMC_BASE, EMC_IBDLY),                  \
95         DEFINE_REG(TEGRA_EMC_BASE, EMC_EINPUT),                 \
96         DEFINE_REG(TEGRA_EMC_BASE, EMC_EINPUT_DURATION),        \
97         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_EXTRA),           \
98         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_WIDTH),           \
99         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_ADJ),             \
100         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_1),             \
101         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_2),             \
102         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_3),             \
103         DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST),                   \
104         DEFINE_REG(TEGRA_EMC_BASE, EMC_QSAFE),                  \
105         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV),                    \
106         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV_MASK),               \
107         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH),                \
108         DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM),      \
109         DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT),    \
110         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR),                \
111         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD),                \
112         DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN),              \
113         DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN),               \
114         DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN),                \
115         DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN),                \
116         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR),                   \
117         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL),                \
118         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE),                   \
119         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKESR),                 \
120         DEFINE_REG(TEGRA_EMC_BASE, EMC_TPD),                    \
121         DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW),                   \
122         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB),                  \
123         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE),             \
124         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP),               \
125         DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW),                 \
126         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG6),              \
127         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE),              \
128         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ),               \
129         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5),              \
130         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL),            \
131         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD),     \
132         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS0),         \
133         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS1),         \
134         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS2),         \
135         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS3),         \
136         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4),         \
137         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5),         \
138         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6),         \
139         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7),         \
140         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS8),         \
141         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS9),         \
142         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS10),        \
143         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS11),        \
144         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS12),        \
145         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS13),        \
146         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS14),        \
147         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS15),        \
148         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE0),        \
149         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE1),        \
150         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE2),        \
151         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE3),        \
152         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4),        \
153         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5),        \
154         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6),        \
155         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7),        \
156         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR0),        \
157         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR1),        \
158         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR2),        \
159         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR3),        \
160         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR4),        \
161         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR5),        \
162         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE8),        \
163         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE9),        \
164         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE10),       \
165         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE11),       \
166         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE12),       \
167         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE13),       \
168         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE14),       \
169         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE15),       \
170         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS0),        \
171         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS1),        \
172         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS2),        \
173         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS3),        \
174         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4),        \
175         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5),        \
176         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6),        \
177         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7),        \
178         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS8),        \
179         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS9),        \
180         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS10),       \
181         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS11),       \
182         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS12),       \
183         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS13),       \
184         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS14),       \
185         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS15),       \
186         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ0),          \
187         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ1),          \
188         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ2),          \
189         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ3),          \
190         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ4),          \
191         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ5),          \
192         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ6),          \
193         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ7),          \
194         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL),          \
195         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL4),         \
196         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL5),         \
197         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2),         \
198         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2),          \
199         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL3),          \
200         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL),          \
201         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL2),         \
202         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL),         \
203         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL),       \
204         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2),      \
205         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL3),      \
206         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL3),         \
207         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL4),         \
208         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL5),         \
209         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL6),         \
210         DEFINE_REG(TEGRA_EMC_BASE, EMC_DSR_VTTGEN_DRV),         \
211         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXDSRVTTGEN),            \
212         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE),             \
213         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL),          \
214         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT),          \
215         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT),           \
216         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT2),          \
217         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG2),       \
218         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG3),       \
219         DEFINE_REG(TEGRA_EMC_BASE, EMC_AUTO_CAL_CONFIG),        \
220         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT),                    \
221         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION),           \
222         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_PIPE),               \
223         DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL),   \
224         DEFINE_REG(TEGRA_EMC_BASE, EMC_QPOP),                   \
225         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG),             \
226         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
227         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD),      \
228         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP),       \
229         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC),       \
230         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS),      \
231         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW),      \
232         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD),      \
233         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE),  \
234         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE),  \
235         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R),      \
236         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W),      \
237         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W),      \
238         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R),      \
239         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS),        \
240         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS),       \
241         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0),           \
242         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE),
243
244 #define BURST_UP_DOWN_REG_LIST \
245         DEFINE_REG(TEGRA_MC_BASE, MC_MLL_MPCORER_PTSA_RATE),            \
246         DEFINE_REG(TEGRA_MC_BASE, MC_PTSA_GRANT_DECREMENT),             \
247         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_XUSB_0),         \
248         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_XUSB_1),         \
249         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_TSEC_0),         \
250         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCA_0),       \
251         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCAA_0),      \
252         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMC_0),        \
253         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCAB_0),      \
254         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_PPCS_0),         \
255         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_PPCS_1),         \
256         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_MPCORE_0),       \
257         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_MPCORELP_0),     \
258         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_HC_0),           \
259         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_HC_1),           \
260         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_AVPC_0),         \
261         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_GPU_0),          \
262         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_MSENC_0),        \
263         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_HDA_0),          \
264         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VIC_0),          \
265         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VI2_0),          \
266         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2_0),         \
267         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2_1),         \
268         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2B_0),        \
269         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2B_1),        \
270         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_0),          \
271         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_1),          \
272         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_2),          \
273         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_3),          \
274         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SATA_0),         \
275         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_AFI_0),
276
277 #define DEFINE_REG(base, reg) ((base) ? (IO_ADDRESS((base)) + (reg)) : 0)
278 static void __iomem *burst_reg_addr[TEGRA12_EMC_MAX_NUM_REGS] = {
279         BURST_REG_LIST
280 };
281
282 #ifndef EMULATE_CLOCK_SWITCH
283 static void __iomem *burst_up_down_reg_addr[TEGRA12_EMC_MAX_NUM_REGS] = {
284         BURST_UP_DOWN_REG_LIST
285 };
286 #endif
287 #undef DEFINE_REG
288
289 #define DEFINE_REG(base, reg)   reg##_INDEX
290 enum {
291         BURST_REG_LIST
292 };
293 #undef DEFINE_REG
294
295 struct emc_sel {
296         struct clk      *input;
297         u32             value;
298         unsigned long   input_rate;
299 };
300 static struct emc_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
301 static struct tegra12_emc_table start_timing;
302 static const struct tegra12_emc_table *emc_timing;
303
304 static ktime_t clkchange_time;
305 static int clkchange_delay = 100;
306
307 static const u32 *dram_to_soc_bit_map;
308 static const struct tegra12_emc_table *tegra_emc_table;
309 static int tegra_emc_table_size;
310
311 static u32 dram_dev_num;
312 static u32 dram_type = -1;
313
314 static struct clk *emc;
315
316 static struct {
317         cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
318         int last_sel;
319         u64 last_update;
320         u64 clkchange_count;
321         spinlock_t spinlock;
322 } emc_stats;
323
324 static DEFINE_SPINLOCK(emc_access_lock);
325
326
327 static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
328 static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
329 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
330
331 static inline void emc_writel(u32 val, unsigned long addr)
332 {
333         writel(val, emc_base + addr);
334 }
335
336 static inline u32 emc_readl(unsigned long addr)
337 {
338         return readl(emc_base + addr);
339 }
340 static inline void mc_writel(u32 val, unsigned long addr)
341 {
342         writel(val, mc_base + addr);
343 }
344 static inline u32 mc_readl(unsigned long addr)
345 {
346         return readl(mc_base + addr);
347 }
348 static inline void ccfifo_writel(u32 val, unsigned long addr)
349 {
350         writel(val, emc_base + EMC_CCFIFO_DATA);
351         writel(addr, emc_base + EMC_CCFIFO_ADDR);
352 }
353
354 static int last_round_idx;
355 static inline int get_start_idx(unsigned long rate)
356 {
357         if (tegra_emc_table[last_round_idx].rate == rate)
358                 return last_round_idx;
359         return 0;
360 }
361 static void emc_last_stats_update(int last_sel)
362 {
363         unsigned long flags;
364         u64 cur_jiffies = get_jiffies_64();
365
366         spin_lock_irqsave(&emc_stats.spinlock, flags);
367
368         if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
369                 emc_stats.time_at_clock[emc_stats.last_sel] =
370                         emc_stats.time_at_clock[emc_stats.last_sel] +
371                         (cur_jiffies - emc_stats.last_update);
372
373         emc_stats.last_update = cur_jiffies;
374
375         if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
376                 emc_stats.clkchange_count++;
377                 emc_stats.last_sel = last_sel;
378         }
379         spin_unlock_irqrestore(&emc_stats.spinlock, flags);
380 }
381
382 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
383 {
384         int i;
385         for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
386                 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
387                         return 0;
388                 udelay(1);
389         }
390         return -ETIMEDOUT;
391 }
392
393 static inline void emc_timing_update(void)
394 {
395         int err;
396
397         emc_writel(0x1, EMC_TIMING_CONTROL);
398         err = wait_for_update(EMC_STATUS,
399                               EMC_STATUS_TIMING_UPDATE_STALLED, false);
400         if (err) {
401                 pr_err("%s: timing update error: %d", __func__, err);
402                 BUG();
403         }
404 }
405
406 static inline void auto_cal_disable(void)
407 {
408         int err;
409
410         emc_writel(0, EMC_AUTO_CAL_INTERVAL);
411         err = wait_for_update(EMC_AUTO_CAL_STATUS,
412                               EMC_AUTO_CAL_STATUS_ACTIVE, false);
413         if (err) {
414                 pr_err("%s: disable auto-cal error: %d", __func__, err);
415                 BUG();
416         }
417 }
418
419 static inline bool dqs_preset(const struct tegra12_emc_table *next_timing,
420                               const struct tegra12_emc_table *last_timing)
421 {
422         bool ret = false;
423         int data;
424 #define DQS_SET(reg, bit)                                               \
425         do {                                            \
426                 data = emc_readl(EMC_XM2DQSPADCTRL2); \
427                 if ((next_timing->burst_regs[EMC_##reg##_INDEX] &       \
428                      EMC_##reg##_##bit##_ENABLE) &&                     \
429                         (!(data &       \
430                        EMC_##reg##_##bit##_ENABLE)))   {                \
431                                 emc_writel(data \
432                                    | EMC_##reg##_##bit##_ENABLE, EMC_##reg); \
433                         pr_debug("dqs preset: presetting rx_ft_rec\n"); \
434                         ret = true;                                     \
435                 }                                                       \
436         } while (0)
437         DQS_SET(XM2DQSPADCTRL2, VREF);
438         DQS_SET(XM2DQSPADCTRL2, RX_FT_REC);
439
440         return ret;
441 }
442
443 static inline void overwrite_mrs_wait_cnt(
444         const struct tegra12_emc_table *next_timing,
445         bool zcal_long)
446 {
447         u32 reg;
448         u32 cnt = 512;
449
450         /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
451            for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
452            expected operation length. Reduce the latter by the overlapping
453            zq-calibration, if any */
454         if (zcal_long)
455                 cnt -= dram_dev_num * 256;
456
457         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
458                 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
459                 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
460         if (cnt < reg)
461                 cnt = reg;
462
463         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
464                 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
465         reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
466                 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
467
468         emc_writel(reg, EMC_MRS_WAIT_CNT);
469 }
470
471 static inline int get_dll_change(const struct tegra12_emc_table *next_timing,
472                                  const struct tegra12_emc_table *last_timing)
473 {
474         bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
475         bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
476
477         if (next_dll_enabled == last_dll_enabled)
478                 return DLL_CHANGE_NONE;
479         else if (next_dll_enabled)
480                 return DLL_CHANGE_ON;
481         else
482                 return DLL_CHANGE_OFF;
483 }
484
485 static inline void set_dram_mode(const struct tegra12_emc_table *next_timing,
486                                  const struct tegra12_emc_table *last_timing,
487                                  int dll_change)
488 {
489         if (dram_type == DRAM_TYPE_DDR3) {
490                 /* first mode_1, then mode_2, then mode_reset*/
491                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
492                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
493                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
494                         ccfifo_writel(next_timing->emc_mode_2, EMC_EMRS2);
495
496                 if ((next_timing->emc_mode_reset !=
497                      last_timing->emc_mode_reset) ||
498                     (dll_change == DLL_CHANGE_ON)) {
499                         u32 reg = next_timing->emc_mode_reset &
500                                 (~EMC_MODE_SET_DLL_RESET);
501                         if (dll_change == DLL_CHANGE_ON) {
502                                 reg |= EMC_MODE_SET_DLL_RESET;
503                                 reg |= EMC_MODE_SET_LONG_CNT;
504                         }
505                         ccfifo_writel(reg, EMC_MRS);
506                 }
507         } else {
508                 /* first mode_2, then mode_1; mode_reset is not applicable */
509                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
510                         ccfifo_writel(next_timing->emc_mode_2, EMC_MRW2);
511                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
512                         ccfifo_writel(next_timing->emc_mode_1, EMC_MRW);
513                 if (next_timing->emc_mode_4 != last_timing->emc_mode_4)
514                         ccfifo_writel(next_timing->emc_mode_4, EMC_MRW4);
515         }
516 }
517
518 static inline void do_clock_change(u32 clk_setting)
519 {
520         int err;
521
522         mc_readl(MC_EMEM_ADR_CFG);      /* completes prev writes */
523         emc_readl(EMC_INTSTATUS);
524
525         writel(clk_setting,
526                 (void __iomem *)((u32)clk_base + emc->reg));
527         readl((void __iomem *)((u32)clk_base + emc->reg));
528                                 /* completes prev write */
529
530         err = wait_for_update(EMC_INTSTATUS,
531                               EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
532         if (err) {
533                 pr_err("%s: clock change completion error: %d", __func__, err);
534                 BUG();
535         }
536 }
537
538 static noinline void emc_set_clock(const struct tegra12_emc_table *next_timing,
539                                    const struct tegra12_emc_table *last_timing,
540                                    u32 clk_setting)
541 {
542 #ifndef EMULATE_CLOCK_SWITCH
543         int i, dll_change, pre_wait, ctt_term_changed;
544         bool dyn_sref_enabled, zcal_long;
545
546         u32 emc_cfg_reg = emc_readl(EMC_CFG);
547         u32 emc_cfg_2_reg = emc_readl(EMC_CFG_2);
548
549         dyn_sref_enabled = emc_cfg_reg & EMC_CFG_DYN_SREF_ENABLE;
550         dll_change = get_dll_change(next_timing, last_timing);
551         zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
552                 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
553
554         /* 1. clear clkchange_complete interrupts */
555         emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
556
557
558         /* 1.5 On t124,  prelock the DLL - assuming the DLL is enabled. */
559         /* TODO: implement. */
560
561         /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
562            possible self-refresh entry/exit and/or dqs vref settled - waiting
563            before the clock change decreases worst case change stall time */
564         pre_wait = 0;
565         if (dyn_sref_enabled) {
566                 emc_cfg_reg &= ~EMC_CFG_DYN_SREF_ENABLE;
567                 emc_writel(emc_cfg_reg, EMC_CFG);
568                 pre_wait = 5;           /* 5us+ for self-refresh entry/exit */
569         }
570
571         /* 2.5 check dq/dqs vref delay */
572         if (dqs_preset(next_timing, last_timing)) {
573                 if (pre_wait < 30)
574                         pre_wait = 30;  /* 3us+ for dqs vref settled */
575         }
576
577         /* 2.6 Program CTT_TERM Control if it changed since last time*/
578         /* PLACE HOLDER FOR NOW , CODE TO BE ADDED
579         Bug-1258083, software hack for updating EMC_CCT_TERM_CTRL
580         /term-slope,offset values instantly*/
581         ctt_term_changed = (last_timing->emc_ctt_term_ctrl
582                                 != next_timing->emc_ctt_term_ctrl);
583         if (last_timing->emc_ctt_term_ctrl !=
584                         next_timing->emc_ctt_term_ctrl) {
585                         auto_cal_disable();
586                         emc_writel(next_timing->emc_ctt_term_ctrl,
587                                 EMC_CTT_TERM_CTRL);
588         }
589
590         if (pre_wait || ctt_term_changed) {
591                 emc_timing_update();
592                 udelay(pre_wait);
593         }
594
595         /* 3. disable auto-cal if vref mode is switching - removed */
596
597         /* 4. program burst shadow registers */
598         for (i = 0; i < next_timing->burst_regs_num; i++) {
599                 if (!burst_reg_addr[i])
600                         continue;
601                 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
602         }
603
604         emc_cfg_reg &= ~EMC_CFG_UPDATE_MASK;
605         emc_cfg_reg |= next_timing->emc_cfg & EMC_CFG_UPDATE_MASK;
606         emc_writel(emc_cfg_reg, EMC_CFG);
607         wmb();
608         barrier();
609
610         /* 4.1 On ddr3 when DLL is re-started predict MRS long wait count and
611            overwrite DFS table setting  */
612         if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
613                 overwrite_mrs_wait_cnt(next_timing, zcal_long);
614
615         /* 5.2 disable auto-refresh to save time after clock change */
616         emc_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
617
618         /* 5.3 post cfg_2 write and dis ob clock gate */
619         emc_cfg_2_reg = next_timing->emc_cfg_2;
620
621         if (emc_cfg_2_reg & EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR)
622                 emc_cfg_2_reg &= ~EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR;
623         ccfifo_writel(emc_cfg_2_reg, EMC_CFG_2);
624
625         /* 5.4 program sel_dpd */
626         ccfifo_writel(next_timing->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
627
628         /* 6. turn Off dll and enter self-refresh on DDR3  */
629         if (dram_type == DRAM_TYPE_DDR3) {
630                 if (dll_change == DLL_CHANGE_OFF)
631                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
632                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num) |
633                               EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
634         }
635
636         /* 7. flow control marker 2 */
637         ccfifo_writel(1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
638
639         /* 8. exit self-refresh on DDR3 */
640         if (dram_type == DRAM_TYPE_DDR3)
641                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
642
643         /* 9. set dram mode registers */
644         set_dram_mode(next_timing, last_timing, dll_change);
645
646         /* 10. issue zcal command if turning zcal On */
647         if (zcal_long) {
648                 ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
649                 if (dram_dev_num > 1)
650                         ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
651         }
652
653         /* 10.1 dummy write to RO register to remove stall after change */
654         ccfifo_writel(0, EMC_CCFIFO_STATUS);
655
656
657         /* 11.1 DIS_STP_OB_CLK_DURING_NON_WR ->0 */
658         if (next_timing->emc_cfg_2 & EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR) {
659                 emc_cfg_2_reg = next_timing->emc_cfg_2;
660                 ccfifo_writel(emc_cfg_2_reg, EMC_CFG_2);
661         }
662
663         /* 11.5 program burst_up_down registers if emc rate is going down */
664         if (next_timing->rate < last_timing->rate) {
665                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
666                         __raw_writel(next_timing->burst_up_down_regs[i],
667                                 burst_up_down_reg_addr[i]);
668                 wmb();
669         }
670
671         /* 12-14. read any MC register to ensure the programming is done
672            change EMC clock source register wait for clk change completion */
673         do_clock_change(clk_setting);
674
675         /* 14.1 re-enable auto-refresh */
676         emc_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
677
678         /* 14.2 program burst_up_down registers if emc rate is going up */
679         if (next_timing->rate > last_timing->rate) {
680                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
681                         __raw_writel(next_timing->burst_up_down_regs[i],
682                                 burst_up_down_reg_addr[i]);
683                 wmb();
684         }
685
686         /* 15. restore auto-cal */
687         if (last_timing->emc_ctt_term_ctrl != next_timing->emc_ctt_term_ctrl)
688                 emc_writel(next_timing->emc_acal_interval,
689                         EMC_AUTO_CAL_INTERVAL);
690
691         /* 16. restore dynamic self-refresh */
692         if (next_timing->emc_cfg & EMC_CFG_DYN_SREF_ENABLE) {
693                 emc_cfg_reg |= EMC_CFG_DYN_SREF_ENABLE;
694                 emc_writel(emc_cfg_reg, EMC_CFG);
695         }
696
697         /* 17. set zcal wait count */
698         emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
699
700         /* 18. update restored timing */
701         udelay(2);
702         emc_timing_update();
703 #else
704         /* FIXME: implement */
705         pr_info("tegra12_emc: Configuring EMC rate %lu (setting: 0x%x)\n",
706                 next_timing->rate, clk_setting);
707 #endif
708 }
709
710 static inline void emc_get_timing(struct tegra12_emc_table *timing)
711 {
712         int i;
713
714         /* Burst updates depends on previous state; burst_up_down are
715          * stateless. */
716         for (i = 0; i < timing->burst_regs_num; i++) {
717                 if (burst_reg_addr[i])
718                         timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
719                 else
720                         timing->burst_regs[i] = 0;
721         }
722         timing->emc_acal_interval = 0;
723         timing->emc_zcal_cnt_long = 0;
724         timing->emc_mode_reset = 0;
725         timing->emc_mode_1 = 0;
726         timing->emc_mode_2 = 0;
727         timing->emc_mode_4 = 0;
728         timing->emc_cfg = emc_readl(EMC_CFG);
729         timing->rate = clk_get_rate_locked(emc) / 1000;
730 }
731
732 /* The EMC registers have shadow registers. When the EMC clock is updated
733  * in the clock controller, the shadow registers are copied to the active
734  * registers, allowing glitchless memory bus frequency changes.
735  * This function updates the shadow registers for a new clock frequency,
736  * and relies on the clock lock on the emc clock to avoid races between
737  * multiple frequency changes. In addition access lock prevents concurrent
738  * access to EMC registers from reading MRR registers */
739 int tegra_emc_set_rate(unsigned long rate)
740 {
741         int i;
742         u32 clk_setting;
743         const struct tegra12_emc_table *last_timing;
744         unsigned long flags;
745         s64 last_change_delay;
746
747         if (!tegra_emc_table)
748                 return -EINVAL;
749
750         /* Table entries specify rate in kHz */
751         rate = rate / 1000;
752
753         i = get_start_idx(rate);
754         for (; i < tegra_emc_table_size; i++) {
755                 if (tegra_emc_clk_sel[i].input == NULL)
756                         continue;       /* invalid entry */
757
758                 if (tegra_emc_table[i].rate == rate)
759                         break;
760         }
761
762         if (i >= tegra_emc_table_size)
763                 return -EINVAL;
764
765         if (!emc_timing) {
766                 /* can not assume that boot timing matches dfs table even
767                    if boot frequency matches one of the table nodes */
768                 emc_get_timing(&start_timing);
769                 last_timing = &start_timing;
770         } else
771                 last_timing = emc_timing;
772
773         clk_setting = tegra_emc_clk_sel[i].value;
774
775         last_change_delay = ktime_us_delta(ktime_get(), clkchange_time);
776         if ((last_change_delay >= 0) && (last_change_delay < clkchange_delay))
777                 udelay(clkchange_delay - (int)last_change_delay);
778
779         spin_lock_irqsave(&emc_access_lock, flags);
780         emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
781         clkchange_time = ktime_get();
782         emc_timing = &tegra_emc_table[i];
783         spin_unlock_irqrestore(&emc_access_lock, flags);
784
785         emc_last_stats_update(i);
786
787         pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
788
789         return 0;
790 }
791
792 long tegra_emc_round_rate_updown(unsigned long rate, bool up)
793 {
794         int i;
795         unsigned long table_rate;
796
797         if (!tegra_emc_table)
798                 return clk_get_rate_locked(emc); /* no table - no rate change */
799
800         if (!emc_enable)
801                 return -EINVAL;
802
803         pr_debug("%s: %lu\n", __func__, rate);
804
805         /* Table entries specify rate in kHz */
806         rate = rate / 1000;
807
808         i = get_start_idx(rate);
809         for (; i < tegra_emc_table_size; i++) {
810                 if (tegra_emc_clk_sel[i].input == NULL)
811                         continue;       /* invalid entry */
812
813                 table_rate = tegra_emc_table[i].rate;
814                 if (table_rate >= rate) {
815                         if (!up && i && (table_rate > rate)) {
816                                 i--;
817                                 table_rate = tegra_emc_table[i].rate;
818                         }
819                         pr_debug("%s: using %lu\n", __func__, table_rate);
820                         last_round_idx = i;
821                         return table_rate * 1000;
822                 }
823         }
824
825         return -EINVAL;
826 }
827
828 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
829 {
830         int i;
831
832         if (!tegra_emc_table) {
833                 if (rate == clk_get_rate_locked(emc)) {
834                         *div_value = emc->div - 2;
835                         return emc->parent;
836                 }
837                 return NULL;
838         }
839
840         pr_debug("%s: %lu\n", __func__, rate);
841
842         /* Table entries specify rate in kHz */
843         rate = rate / 1000;
844
845         i = get_start_idx(rate);
846         for (; i < tegra_emc_table_size; i++) {
847                 if (tegra_emc_table[i].rate == rate) {
848                         struct clk *p = tegra_emc_clk_sel[i].input;
849
850                         if (p && (tegra_emc_clk_sel[i].input_rate ==
851                                   clk_get_rate(p))) {
852                                 *div_value = (tegra_emc_clk_sel[i].value &
853                                         EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
854                                 return p;
855                         }
856                 }
857         }
858         return NULL;
859 }
860
861 bool tegra_emc_is_parent_ready(unsigned long rate, struct clk **parent,
862                 unsigned long *parent_rate, unsigned long *backup_rate)
863 {
864
865         int i;
866         struct clk *p = NULL;
867         unsigned long p_rate = 0;
868
869         if (!tegra_emc_table || !emc_enable)
870                 return true;
871
872         pr_debug("%s: %lu\n", __func__, rate);
873
874         /* Table entries specify rate in kHz */
875         rate = rate / 1000;
876
877         i = get_start_idx(rate);
878         for (; i < tegra_emc_table_size; i++) {
879                 if (tegra_emc_table[i].rate == rate) {
880                         p = tegra_emc_clk_sel[i].input;
881                         if (!p)
882                                 continue;       /* invalid entry */
883
884                         p_rate = tegra_emc_clk_sel[i].input_rate;
885                         if (p_rate == clk_get_rate(p))
886                                 return true;
887                         break;
888                 }
889         }
890
891         /* Table match not found - "non existing parent" is ready */
892         if (!p)
893                 return true;
894
895 #ifdef CONFIG_TEGRA_PLLM_SCALED
896         /*
897          * Table match found, but parent is not ready - check if backup entry
898          * was found during initialization, and return the respective backup
899          * rate
900          */
901         if (emc->shared_bus_backup.input &&
902             (emc->shared_bus_backup.input != p)) {
903                 *parent = p;
904                 *parent_rate = p_rate;
905                 *backup_rate = emc->shared_bus_backup.bus_rate;
906                 return false;
907         }
908 #else
909         /*
910          * Table match found, but parent is not ready - continue search
911          * for backup rate: min rate above requested that has different
912          * parent source (since only pll_c is scaled and may not be ready,
913          * any other parent can provide backup)
914          */
915         *parent = p;
916         *parent_rate = p_rate;
917
918         for (i++; i < tegra_emc_table_size; i++) {
919                 p = tegra_emc_clk_sel[i].input;
920                 if (!p)
921                         continue;       /* invalid entry */
922
923                 if (p != (*parent)) {
924                         *backup_rate = tegra_emc_table[i].rate * 1000;
925                         return false;
926                 }
927         }
928 #endif
929         /* Parent is not ready, and no backup found */
930         *backup_rate = -EINVAL;
931         return false;
932 }
933
934 static inline const struct clk_mux_sel *get_emc_input(u32 val)
935 {
936         const struct clk_mux_sel *sel;
937
938         for (sel = emc->inputs; sel->input != NULL; sel++) {
939                 if (sel->value == val)
940                         break;
941         }
942         return sel;
943 }
944
945 static int find_matching_input(const struct tegra12_emc_table *table,
946         struct clk *pll_c, struct clk *pll_m, struct emc_sel *emc_clk_sel)
947 {
948         u32 div_value = (table->src_sel_reg & EMC_CLK_DIV_MASK) >>
949                 EMC_CLK_DIV_SHIFT;
950         u32 src_value = (table->src_sel_reg & EMC_CLK_SOURCE_MASK) >>
951                 EMC_CLK_SOURCE_SHIFT;
952         unsigned long input_rate = 0;
953         unsigned long table_rate = table->rate * 1000; /* table rate in kHz */
954         const struct clk_mux_sel *sel = get_emc_input(src_value);
955
956 #ifdef CONFIG_TEGRA_PLLM_SCALED
957         struct clk *scalable_pll = pll_m;
958 #else
959         struct clk *scalable_pll = pll_c;
960 #endif
961         pr_info_once("tegra: %s is selected as scalable EMC clock source\n",
962                      scalable_pll->name);
963
964         if (div_value & 0x1) {
965                 pr_warn("tegra: invalid odd divider for EMC rate %lu\n",
966                         table_rate);
967                 return -EINVAL;
968         }
969         if (!sel->input) {
970                 pr_warn("tegra: no matching input found for EMC rate %lu\n",
971                         table_rate);
972                 return -EINVAL;
973         }
974         if (div_value && (table->src_sel_reg & EMC_CLK_LOW_JITTER_ENABLE)) {
975                 pr_warn("tegra: invalid LJ path for EMC rate %lu\n",
976                         table_rate);
977                 return -EINVAL;
978         }
979         if (!(table->src_sel_reg & EMC_CLK_MC_SAME_FREQ) !=
980             !(MC_EMEM_ARB_MISC0_EMC_SAME_FREQ &
981               table->burst_regs[MC_EMEM_ARB_MISC0_INDEX])) {
982                 pr_warn("tegra: ambiguous EMC to MC ratio for EMC rate %lu\n",
983                         table_rate);
984                 return -EINVAL;
985         }
986
987 #ifndef CONFIG_TEGRA_DUAL_CBUS
988         if (sel->input == pll_c) {
989                 pr_warn("tegra: %s is cbus source: no EMC rate %lu support\n",
990                         sel->input->name, table_rate);
991                 return -EINVAL;
992         }
993 #endif
994
995         if (sel->input == scalable_pll) {
996                 input_rate = table_rate * (1 + div_value / 2);
997         } else {
998                 /* all other sources are fixed, must exactly match the rate */
999                 input_rate = clk_get_rate(sel->input);
1000                 if (input_rate != (table_rate * (1 + div_value / 2))) {
1001                         pr_warn("tegra: EMC rate %lu does not match %s rate %lu\n",
1002                                 table_rate, sel->input->name, input_rate);
1003                         return -EINVAL;
1004                 }
1005         }
1006
1007 #ifdef CONFIG_TEGRA_PLLM_SCALED
1008                 if (sel->input == pll_c) {
1009                         /* maybe overwritten in a loop - end up at max rate
1010                            from pll_c */
1011                         emc->shared_bus_backup.input = pll_c;
1012                         emc->shared_bus_backup.bus_rate = table_rate;
1013                 }
1014 #endif
1015         /* Get ready emc clock selection settings for this table rate */
1016         emc_clk_sel->input = sel->input;
1017         emc_clk_sel->input_rate = input_rate;
1018         emc_clk_sel->value = table->src_sel_reg;
1019
1020         return 0;
1021 }
1022
1023 static void adjust_emc_dvfs_table(const struct tegra12_emc_table *table,
1024                                   int table_size)
1025 {
1026         int i, j;
1027         unsigned long rate;
1028
1029         for (i = 0; i < MAX_DVFS_FREQS; i++) {
1030                 int mv = emc->dvfs->millivolts[i];
1031                 if (!mv)
1032                         break;
1033
1034                 /* For each dvfs voltage find maximum supported rate;
1035                    use 1MHz placeholder if not found */
1036                 for (rate = 1000, j = 0; j < table_size; j++) {
1037                         if (tegra_emc_clk_sel[j].input == NULL)
1038                                 continue;       /* invalid entry */
1039
1040                         if ((mv >= table[j].emc_min_mv) &&
1041                             (rate < table[j].rate))
1042                                 rate = table[j].rate;
1043                 }
1044                 /* Table entries specify rate in kHz */
1045                 emc->dvfs->freqs[i] = rate * 1000;
1046         }
1047 }
1048
1049 #ifdef CONFIG_TEGRA_PLLM_SCALED
1050 /* When pll_m is scaled, pll_c must provide backup rate;
1051    if not - remove rates that require pll_m scaling */
1052 static int purge_emc_table(unsigned long max_rate)
1053 {
1054         int i;
1055         int ret = 0;
1056
1057         if (emc->shared_bus_backup.input)
1058                 return ret;
1059
1060         pr_warn("tegra: selected pll_m scaling option but no backup source:\n");
1061         pr_warn("       removed not supported entries from the table:\n");
1062
1063         /* made all entries with non matching rate invalid */
1064         for (i = 0; i < tegra_emc_table_size; i++) {
1065                 struct emc_sel *sel = &tegra_emc_clk_sel[i];
1066                 if (sel->input) {
1067                         if (clk_get_rate(sel->input) != sel->input_rate) {
1068                                 pr_warn("       EMC rate %lu\n",
1069                                         tegra_emc_table[i].rate * 1000);
1070                                 sel->input = NULL;
1071                                 sel->input_rate = 0;
1072                                 sel->value = 0;
1073                                 if (max_rate == tegra_emc_table[i].rate)
1074                                         ret = -EINVAL;
1075                         }
1076                 }
1077         }
1078         return ret;
1079 }
1080 #else
1081 /* When pll_m is fixed @ max EMC rate, it always provides backup for pll_c */
1082 #define purge_emc_table(max_rate) (0)
1083 #endif
1084
1085 static int init_emc_table(const struct tegra12_emc_table *table, int table_size)
1086 {
1087         int i, mv;
1088         u32 reg;
1089         bool max_entry = false;
1090         bool emc_max_dvfs_sel = get_emc_max_dvfs();
1091         unsigned long boot_rate, max_rate;
1092         struct clk *pll_c = tegra_get_clock_by_name("pll_c");
1093         struct clk *pll_m = tegra_get_clock_by_name("pll_m");
1094
1095         emc_stats.clkchange_count = 0;
1096         spin_lock_init(&emc_stats.spinlock);
1097         emc_stats.last_update = get_jiffies_64();
1098         emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
1099
1100         if (dram_type != DRAM_TYPE_DDR3) {
1101                 pr_err("tegra: not supported DRAM type %u\n", dram_type);
1102                 return -ENODATA;
1103         }
1104
1105         if (!table || !table_size) {
1106                 pr_err("tegra: EMC DFS table is empty\n");
1107                 return -ENODATA;
1108         }
1109
1110         boot_rate = clk_get_rate(emc) / 1000;
1111         max_rate = boot_rate;
1112
1113         tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
1114         switch (table[0].rev) {
1115         case 0x14:
1116                 start_timing.burst_regs_num = table[0].burst_regs_num;
1117                 break;
1118         default:
1119                 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
1120                         table[0].rev);
1121                 return -ENODATA;
1122         }
1123
1124         /* Match EMC source/divider settings with table entries */
1125         for (i = 0; i < tegra_emc_table_size; i++) {
1126                 unsigned long table_rate = table[i].rate;
1127
1128                 /* Skip "no-rate" entry, or entry violating ascending order */
1129                 if (!table_rate ||
1130                     (i && (table_rate <= table[i-1].rate)))
1131                         continue;
1132
1133                 BUG_ON(table[i].rev != table[0].rev);
1134
1135                 if (find_matching_input(&table[i], pll_c, pll_m,
1136                                         &tegra_emc_clk_sel[i]))
1137                         continue;
1138
1139                 if (table_rate == boot_rate)
1140                         emc_stats.last_sel = i;
1141
1142                 if (emc_max_dvfs_sel) {
1143                         /* EMC max rate = max table entry above boot rate */
1144                         if (table_rate >= max_rate) {
1145                                 max_rate = table_rate;
1146                                 max_entry = true;
1147                         }
1148                 } else if (table_rate == max_rate) {
1149                         /* EMC max rate = boot rate */
1150                         max_entry = true;
1151                         break;
1152                 }
1153         }
1154
1155         /* Validate EMC rate and voltage limits */
1156         if (!max_entry) {
1157                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
1158                        " %lu kHz is not found\n", max_rate);
1159                 return -ENODATA;
1160         }
1161
1162         tegra_emc_table = table;
1163
1164         /*
1165          * Purge rates that cannot be reached because table does not specify
1166          * proper backup source. If maximum rate was purged, fall back on boot
1167          * rate as maximum limit. In any case propagate new maximum limit
1168          * down stream to shared users, and check it against nominal voltage.
1169          */
1170         if (purge_emc_table(max_rate))
1171                 max_rate = boot_rate;
1172         tegra_init_max_rate(emc, max_rate * 1000);
1173
1174         if (emc->dvfs) {
1175                 adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
1176                 mv = tegra_dvfs_predict_millivolts(emc, max_rate * 1000);
1177                 if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
1178                         tegra_emc_table = NULL;
1179                         pr_err("tegra: invalid EMC DFS table: maximum rate %lu"
1180                                " kHz does not match nominal voltage %d\n",
1181                                max_rate, emc->dvfs->max_millivolts);
1182                         return -ENODATA;
1183                 }
1184         }
1185
1186         pr_info("tegra: validated EMC DFS table\n");
1187
1188         /* Configure clock change mode according to dram type */
1189         reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
1190         reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
1191                 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
1192         emc_writel(reg, EMC_CFG_2);
1193         return 0;
1194 }
1195
1196 static int tegra12_emc_probe(struct platform_device *pdev)
1197 {
1198         struct tegra12_emc_pdata *pdata;
1199         struct resource *res;
1200
1201         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1202         if (!res) {
1203                 dev_err(&pdev->dev, "missing register base\n");
1204                 return -ENOMEM;
1205         }
1206
1207         pdata = pdev->dev.platform_data;
1208         if (!pdata) {
1209                 dev_err(&pdev->dev, "missing platform data\n");
1210                 return -ENODATA;
1211         }
1212
1213         return init_emc_table(pdata->tables, pdata->num_tables);
1214 }
1215
1216 static struct platform_driver tegra12_emc_driver = {
1217         .driver         = {
1218                 .name   = "tegra-emc",
1219                 .owner  = THIS_MODULE,
1220         },
1221         .probe          = tegra12_emc_probe,
1222 };
1223
1224 int __init tegra12_emc_init(void)
1225 {
1226         int ret = platform_driver_register(&tegra12_emc_driver);
1227
1228         if (!ret) {
1229                 tegra_emc_iso_usage_table_init(tegra12_emc_iso_usage,
1230                                 ARRAY_SIZE(tegra12_emc_iso_usage));
1231                 if (emc_enable) {
1232                         unsigned long rate = tegra_emc_round_rate_updown(
1233                                 emc->boot_rate, false);
1234                         if (!IS_ERR_VALUE(rate))
1235                                 tegra_clk_preset_emc_monitor(rate);
1236                 }
1237         }
1238         return ret;
1239 }
1240
1241 void tegra_emc_timing_invalidate(void)
1242 {
1243         emc_timing = NULL;
1244 }
1245
1246 void tegra_emc_dram_type_init(struct clk *c)
1247 {
1248         emc = c;
1249
1250         dram_type = (emc_readl(EMC_FBIO_CFG5) &
1251                      EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
1252
1253         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
1254 }
1255
1256 int tegra_emc_get_dram_type(void)
1257 {
1258         return dram_type;
1259 }
1260
1261 static u32 soc_to_dram_bit_swap(u32 soc_val, u32 dram_mask, u32 dram_shift)
1262 {
1263         int bit;
1264         u32 dram_val = 0;
1265
1266         /* tegra clocks definitions use shifted mask always */
1267         if (!dram_to_soc_bit_map)
1268                 return soc_val & dram_mask;
1269
1270         for (bit = dram_shift; bit < 32; bit++) {
1271                 u32 dram_bit_mask = 0x1 << bit;
1272                 u32 soc_bit_mask = dram_to_soc_bit_map[bit];
1273
1274                 if (!(dram_bit_mask & dram_mask))
1275                         break;
1276
1277                 if (soc_bit_mask & soc_val)
1278                         dram_val |= dram_bit_mask;
1279         }
1280
1281         return dram_val;
1282 }
1283
1284 static int emc_read_mrr(int dev, int addr)
1285 {
1286         int ret;
1287         u32 val, emc_cfg;
1288
1289         if (dram_type != DRAM_TYPE_LPDDR2)
1290                 return -ENODEV;
1291
1292         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, false);
1293         if (ret)
1294                 return ret;
1295
1296         emc_cfg = emc_readl(EMC_CFG);
1297         if (emc_cfg & EMC_CFG_DRAM_ACPD) {
1298                 emc_writel(emc_cfg & ~EMC_CFG_DRAM_ACPD, EMC_CFG);
1299                 emc_timing_update();
1300         }
1301
1302         val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
1303         val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
1304         emc_writel(val, EMC_MRR);
1305
1306         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, true);
1307         if (emc_cfg & EMC_CFG_DRAM_ACPD) {
1308                 emc_writel(emc_cfg, EMC_CFG);
1309                 emc_timing_update();
1310         }
1311         if (ret)
1312                 return ret;
1313
1314         val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
1315         return val;
1316 }
1317
1318 int tegra_emc_get_dram_temperature(void)
1319 {
1320         int mr4;
1321         unsigned long flags;
1322
1323         spin_lock_irqsave(&emc_access_lock, flags);
1324
1325         mr4 = emc_read_mrr(0, 4);
1326         if (IS_ERR_VALUE(mr4)) {
1327                 spin_unlock_irqrestore(&emc_access_lock, flags);
1328                 return mr4;
1329         }
1330         spin_unlock_irqrestore(&emc_access_lock, flags);
1331
1332         mr4 = soc_to_dram_bit_swap(
1333                 mr4, LPDDR2_MR4_TEMP_MASK, LPDDR2_MR4_TEMP_SHIFT);
1334         return mr4;
1335 }
1336
1337 #ifdef CONFIG_DEBUG_FS
1338
1339 static struct dentry *emc_debugfs_root;
1340
1341 static int emc_stats_show(struct seq_file *s, void *data)
1342 {
1343         int i;
1344
1345         emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
1346
1347         seq_printf(s, "%-10s %-10s\n", "rate kHz", "time");
1348         for (i = 0; i < tegra_emc_table_size; i++) {
1349                 if (tegra_emc_clk_sel[i].input == NULL)
1350                         continue;       /* invalid entry */
1351
1352                 seq_printf(s, "%-10lu %-10llu\n", tegra_emc_table[i].rate,
1353                         cputime64_to_clock_t(emc_stats.time_at_clock[i]));
1354         }
1355         seq_printf(s, "%-15s %llu\n", "transitions:",
1356                    emc_stats.clkchange_count);
1357         seq_printf(s, "%-15s %llu\n", "time-stamp:",
1358                    cputime64_to_clock_t(emc_stats.last_update));
1359
1360         return 0;
1361 }
1362
1363 static int emc_stats_open(struct inode *inode, struct file *file)
1364 {
1365         return single_open(file, emc_stats_show, inode->i_private);
1366 }
1367
1368 static const struct file_operations emc_stats_fops = {
1369         .open           = emc_stats_open,
1370         .read           = seq_read,
1371         .llseek         = seq_lseek,
1372         .release        = single_release,
1373 };
1374
1375 static int dram_temperature_get(void *data, u64 *val)
1376 {
1377         *val = tegra_emc_get_dram_temperature();
1378         return 0;
1379 }
1380 DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
1381                         NULL, "%lld\n");
1382
1383 static int efficiency_get(void *data, u64 *val)
1384 {
1385         *val = tegra_emc_bw_efficiency;
1386         return 0;
1387 }
1388 static int efficiency_set(void *data, u64 val)
1389 {
1390         tegra_emc_bw_efficiency = (val > 100) ? 100 : val;
1391         if (emc)
1392                 tegra_clk_shared_bus_update(emc);
1393
1394         return 0;
1395 }
1396 DEFINE_SIMPLE_ATTRIBUTE(efficiency_fops, efficiency_get,
1397                         efficiency_set, "%llu\n");
1398
1399 static int __init tegra_emc_debug_init(void)
1400 {
1401         if (!tegra_emc_table)
1402                 return 0;
1403
1404         emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
1405         if (!emc_debugfs_root)
1406                 return -ENOMEM;
1407
1408         if (!debugfs_create_file(
1409                 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
1410                 goto err_out;
1411
1412         if (!debugfs_create_u32("clkchange_delay", S_IRUGO | S_IWUSR,
1413                 emc_debugfs_root, (u32 *)&clkchange_delay))
1414                 goto err_out;
1415
1416         if (!debugfs_create_file("dram_temperature", S_IRUGO, emc_debugfs_root,
1417                                  NULL, &dram_temperature_fops))
1418                 goto err_out;
1419
1420         if (!debugfs_create_file("efficiency", S_IRUGO | S_IWUSR,
1421                                  emc_debugfs_root, NULL, &efficiency_fops))
1422                 goto err_out;
1423
1424         if (tegra_emc_iso_usage_debugfs_init(emc_debugfs_root))
1425                 goto err_out;
1426
1427         return 0;
1428
1429 err_out:
1430         debugfs_remove_recursive(emc_debugfs_root);
1431         return -ENOMEM;
1432 }
1433
1434 late_initcall(tegra_emc_debug_init);
1435 #endif