ARM: tegra12: set CPU rate to 2.2GHz for sku 0x87
[linux-3.10.git] / arch / arm / mach-tegra / tegra12_emc.c
1 /*
2  * arch/arm/mach-tegra/tegra12_emc.c
3  *
4  * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program; if not, write to the Free Software Foundation, Inc.,
17  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
18  *
19  */
20
21 #include <linux/kernel.h>
22 #include <linux/clk.h>
23 #include <linux/err.h>
24 #include <linux/io.h>
25 #include <linux/of.h>
26 #include <linux/module.h>
27 #include <linux/delay.h>
28 #include <linux/platform_device.h>
29 #include <linux/platform_data/tegra_emc.h>
30 #include <linux/debugfs.h>
31 #include <linux/seq_file.h>
32 #include <linux/hrtimer.h>
33 #include <linux/pasr.h>
34 #include <linux/slab.h>
35 #include <mach/nct.h>
36
37 #include <asm/cputime.h>
38
39 #include "clock.h"
40 #include "board.h"
41 #include "dvfs.h"
42 #include "iomap.h"
43 #include "tegra12_emc.h"
44 #include "tegra_emc_dt_parse.h"
45 #include "devices.h"
46
47
48 #ifdef CONFIG_TEGRA_EMC_SCALING_ENABLE
49 static bool emc_enable = true;
50 #else
51 static bool emc_enable;
52 #endif
53 module_param(emc_enable, bool, 0644);
54
55 static int pasr_enable;
56
57 u8 tegra_emc_bw_efficiency = 100;
58
59 static u32 bw_calc_freqs[] = {
60         20, 40, 60, 80, 100, 120, 140, 160, 200, 300, 400, 600, 800
61 };
62
63 static u32 tegra12_emc_usage_shared_os_idle[] = {
64         11, 27, 29, 34, 39, 42, 46, 47, 51, 51, 51, 51, 51, 51
65 };
66 static u32 tegra12_emc_usage_shared_general[] = {
67         11, 18, 22, 25, 28, 31, 34, 38, 44, 44, 44, 44, 44, 51
68 };
69
70 static u8 iso_share_calc_t124_os_idle(unsigned long iso_bw);
71 static u8 iso_share_calc_t124_general(unsigned long iso_bw);
72
73
74 static struct emc_iso_usage tegra12_emc_iso_usage[] = {
75         {
76                 BIT(EMC_USER_DC1),
77                 80, iso_share_calc_t124_os_idle
78         },
79         {
80                 BIT(EMC_USER_DC1) | BIT(EMC_USER_DC2),
81                 45, iso_share_calc_t124_general
82         },
83         {
84                 BIT(EMC_USER_DC1) | BIT(EMC_USER_VI),
85                 45, iso_share_calc_t124_general
86         },
87         {
88                 BIT(EMC_USER_DC1) | BIT(EMC_USER_DC2) | BIT(EMC_USER_VI),
89                 45, iso_share_calc_t124_general
90         },
91 };
92
93 #define MHZ 1000000
94 #define TEGRA_EMC_ISO_USE_FREQ_MAX_NUM 13
95 #define PLL_C_DIRECT_FLOOR              333500000
96 #define EMC_STATUS_UPDATE_TIMEOUT       1000
97 #define TEGRA_EMC_TABLE_MAX_SIZE        16
98
99 #define TEGRA_EMC_MODE_REG_17   0x00110000
100 #define TEGRA_EMC_MRW_DEV_SHIFT 30
101 #define TEGRA_EMC_MRW_DEV1      2
102 #define TEGRA_EMC_MRW_DEV2      1
103
104 #define MC_EMEM_DEV_SIZE_MASK   0xF
105 #define MC_EMEM_DEV_SIZE_SHIFT  16
106
107 enum {
108         DLL_CHANGE_NONE = 0,
109         DLL_CHANGE_ON,
110         DLL_CHANGE_OFF,
111 };
112
113 #define EMC_CLK_DIV_SHIFT               0
114 #define EMC_CLK_DIV_MASK                (0xFF << EMC_CLK_DIV_SHIFT)
115 #define EMC_CLK_SOURCE_SHIFT            29
116 #define EMC_CLK_SOURCE_MASK             (0x7 << EMC_CLK_SOURCE_SHIFT)
117 #define EMC_CLK_LOW_JITTER_ENABLE       (0x1 << 31)
118 #define EMC_CLK_MC_SAME_FREQ            (0x1 << 16)
119
120 #define BURST_REG_LIST \
121         DEFINE_REG(TEGRA_EMC_BASE, EMC_RC),                     \
122         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC),                    \
123         DEFINE_REG(TEGRA_EMC_BASE, EMC_RFC_SLR),                \
124         DEFINE_REG(TEGRA_EMC_BASE, EMC_RAS),                    \
125         DEFINE_REG(TEGRA_EMC_BASE, EMC_RP),                     \
126         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2W),                    \
127         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2R),                    \
128         DEFINE_REG(TEGRA_EMC_BASE, EMC_R2P),                    \
129         DEFINE_REG(TEGRA_EMC_BASE, EMC_W2P),                    \
130         DEFINE_REG(TEGRA_EMC_BASE, EMC_RD_RCD),                 \
131         DEFINE_REG(TEGRA_EMC_BASE, EMC_WR_RCD),                 \
132         DEFINE_REG(TEGRA_EMC_BASE, EMC_RRD),                    \
133         DEFINE_REG(TEGRA_EMC_BASE, EMC_REXT),                   \
134         DEFINE_REG(TEGRA_EMC_BASE, EMC_WEXT),                   \
135         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV),                    \
136         DEFINE_REG(TEGRA_EMC_BASE, EMC_WDV_MASK),               \
137         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE),                   \
138         DEFINE_REG(TEGRA_EMC_BASE, EMC_QUSE_WIDTH),             \
139         DEFINE_REG(TEGRA_EMC_BASE, EMC_IBDLY),                  \
140         DEFINE_REG(TEGRA_EMC_BASE, EMC_EINPUT),                 \
141         DEFINE_REG(TEGRA_EMC_BASE, EMC_EINPUT_DURATION),        \
142         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_EXTRA),           \
143         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_WIDTH),           \
144         DEFINE_REG(TEGRA_EMC_BASE, EMC_PUTERM_ADJ),             \
145         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_1),             \
146         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_2),             \
147         DEFINE_REG(TEGRA_EMC_BASE, EMC_CDB_CNTL_3),             \
148         DEFINE_REG(TEGRA_EMC_BASE, EMC_QRST),                   \
149         DEFINE_REG(TEGRA_EMC_BASE, EMC_QSAFE),                  \
150         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV),                    \
151         DEFINE_REG(TEGRA_EMC_BASE, EMC_RDV_MASK),               \
152         DEFINE_REG(TEGRA_EMC_BASE, EMC_REFRESH),                \
153         DEFINE_REG(TEGRA_EMC_BASE, EMC_BURST_REFRESH_NUM),      \
154         DEFINE_REG(TEGRA_EMC_BASE, EMC_PRE_REFRESH_REQ_CNT),    \
155         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2WR),                \
156         DEFINE_REG(TEGRA_EMC_BASE, EMC_PDEX2RD),                \
157         DEFINE_REG(TEGRA_EMC_BASE, EMC_PCHG2PDEN),              \
158         DEFINE_REG(TEGRA_EMC_BASE, EMC_ACT2PDEN),               \
159         DEFINE_REG(TEGRA_EMC_BASE, EMC_AR2PDEN),                \
160         DEFINE_REG(TEGRA_EMC_BASE, EMC_RW2PDEN),                \
161         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSR),                   \
162         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXSRDLL),                \
163         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKE),                   \
164         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCKESR),                 \
165         DEFINE_REG(TEGRA_EMC_BASE, EMC_TPD),                    \
166         DEFINE_REG(TEGRA_EMC_BASE, EMC_TFAW),                   \
167         DEFINE_REG(TEGRA_EMC_BASE, EMC_TRPAB),                  \
168         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTABLE),             \
169         DEFINE_REG(TEGRA_EMC_BASE, EMC_TCLKSTOP),               \
170         DEFINE_REG(TEGRA_EMC_BASE, EMC_TREFBW),                 \
171         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG6),              \
172         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_WRITE),              \
173         DEFINE_REG(TEGRA_EMC_BASE, EMC_ODT_READ),               \
174         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_CFG5),              \
175         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL),            \
176         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_DIG_DLL_PERIOD),     \
177         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS0),         \
178         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS1),         \
179         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS2),         \
180         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS3),         \
181         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS4),         \
182         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS5),         \
183         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS6),         \
184         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS7),         \
185         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS8),         \
186         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS9),         \
187         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS10),        \
188         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS11),        \
189         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS12),        \
190         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS13),        \
191         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS14),        \
192         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQS15),        \
193         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE0),        \
194         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE1),        \
195         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE2),        \
196         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE3),        \
197         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE4),        \
198         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE5),        \
199         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE6),        \
200         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE7),        \
201         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR0),        \
202         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR1),        \
203         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR2),        \
204         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR3),        \
205         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR4),        \
206         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_ADDR5),        \
207         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE8),        \
208         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE9),        \
209         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE10),       \
210         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE11),       \
211         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE12),       \
212         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE13),       \
213         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE14),       \
214         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_QUSE15),       \
215         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS0),        \
216         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS1),        \
217         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS2),        \
218         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS3),        \
219         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS4),        \
220         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS5),        \
221         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS6),        \
222         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS7),        \
223         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS8),        \
224         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS9),        \
225         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS10),       \
226         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS11),       \
227         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS12),       \
228         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS13),       \
229         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS14),       \
230         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLI_TRIM_TXDQS15),       \
231         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ0),          \
232         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ1),          \
233         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ2),          \
234         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ3),          \
235         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ4),          \
236         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ5),          \
237         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ6),          \
238         DEFINE_REG(TEGRA_EMC_BASE, EMC_DLL_XFORM_DQ7),          \
239         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL),          \
240         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL4),         \
241         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CMDPADCTRL5),         \
242         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL2),         \
243         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL2),          \
244         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQPADCTRL3),          \
245         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL),          \
246         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2CLKPADCTRL2),         \
247         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2COMPPADCTRL),         \
248         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL),       \
249         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL2),      \
250         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2VTTGENPADCTRL3),      \
251         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL3),         \
252         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL4),         \
253         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL5),         \
254         DEFINE_REG(TEGRA_EMC_BASE, EMC_XM2DQSPADCTRL6),         \
255         DEFINE_REG(TEGRA_EMC_BASE, EMC_DSR_VTTGEN_DRV),         \
256         DEFINE_REG(TEGRA_EMC_BASE, EMC_TXDSRVTTGEN),            \
257         DEFINE_REG(TEGRA_EMC_BASE, EMC_FBIO_SPARE),             \
258         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_INTERVAL),          \
259         DEFINE_REG(TEGRA_EMC_BASE, EMC_ZCAL_WAIT_CNT),          \
260         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT),           \
261         DEFINE_REG(TEGRA_EMC_BASE, EMC_MRS_WAIT_CNT2),          \
262         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT),                    \
263         DEFINE_REG(TEGRA_EMC_BASE, EMC_CTT_DURATION),           \
264         DEFINE_REG(TEGRA_EMC_BASE, EMC_CFG_PIPE),               \
265         DEFINE_REG(TEGRA_EMC_BASE, EMC_DYN_SELF_REF_CONTROL),   \
266         DEFINE_REG(TEGRA_EMC_BASE, EMC_QPOP),                   \
267         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_CFG),             \
268         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_OUTSTANDING_REQ), \
269         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RCD),      \
270         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RP),       \
271         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RC),       \
272         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAS),      \
273         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_FAW),      \
274         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RRD),      \
275         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_RAP2PRE),  \
276         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_WAP2PRE),  \
277         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2R),      \
278         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2W),      \
279         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_R2W),      \
280         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_TIMING_W2R),      \
281         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_TURNS),        \
282         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_DA_COVERS),       \
283         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC0),           \
284         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_MISC1),           \
285         DEFINE_REG(TEGRA_MC_BASE, MC_EMEM_ARB_RING1_THROTTLE),
286
287 #define BURST_UP_DOWN_REG_LIST \
288         DEFINE_REG(TEGRA_MC_BASE, MC_MLL_MPCORER_PTSA_RATE),            \
289         DEFINE_REG(TEGRA_MC_BASE, MC_PTSA_GRANT_DECREMENT),             \
290         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_XUSB_0),         \
291         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_XUSB_1),         \
292         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_TSEC_0),         \
293         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCA_0),       \
294         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCAA_0),      \
295         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMC_0),        \
296         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SDMMCAB_0),      \
297         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_PPCS_0),         \
298         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_PPCS_1),         \
299         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_MPCORE_0),       \
300         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_MPCORELP_0),     \
301         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_HC_0),           \
302         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_HC_1),           \
303         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_AVPC_0),         \
304         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_GPU_0),          \
305         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_MSENC_0),        \
306         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_HDA_0),          \
307         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VIC_0),          \
308         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VI2_0),          \
309         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2_0),         \
310         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2_1),         \
311         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2B_0),        \
312         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_ISP2B_1),        \
313         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_0),          \
314         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_1),          \
315         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_2),          \
316         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_VDE_3),          \
317         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_SATA_0),         \
318         DEFINE_REG(TEGRA_MC_BASE, MC_LATENCY_ALLOWANCE_AFI_0),
319
320 #define DEFINE_REG(base, reg) ((base) ? (IO_ADDRESS((base)) + (reg)) : 0)
321 static void __iomem *burst_reg_addr[TEGRA12_EMC_MAX_NUM_REGS] = {
322         BURST_REG_LIST
323 };
324
325 #ifndef EMULATE_CLOCK_SWITCH
326 static void __iomem *burst_up_down_reg_addr[TEGRA12_EMC_MAX_NUM_REGS] = {
327         BURST_UP_DOWN_REG_LIST
328 };
329 #endif
330 #undef DEFINE_REG
331
332 #define DEFINE_REG(base, reg)   reg##_INDEX
333 enum {
334         BURST_REG_LIST
335 };
336 #undef DEFINE_REG
337
338 #define BGBIAS_VAL(val, reg, bit) \
339         (((val)>>EMC_##reg##_##bit##_SHIFT) & 0x1)
340
341 #define BGBIAS_SET_NUM(val, n, reg, bit) \
342         (((val) & ~(0x1<<EMC_##reg##_##bit##_SHIFT)) | \
343                 (((n)&0x1) << (EMC_##reg##_##bit##_SHIFT)))
344
345 struct emc_sel {
346         struct clk      *input;
347         u32             value;
348         unsigned long   input_rate;
349 };
350 static struct emc_sel tegra_emc_clk_sel[TEGRA_EMC_TABLE_MAX_SIZE];
351 static struct tegra12_emc_table start_timing;
352 static const struct tegra12_emc_table *emc_timing;
353
354 static ktime_t clkchange_time;
355 static int clkchange_delay = 100;
356
357 static const u32 *dram_to_soc_bit_map;
358 static const struct tegra12_emc_table *tegra_emc_table;
359 static int tegra_emc_table_size;
360
361 static u32 dram_dev_num;
362 static u32 dram_type = -1;
363
364 static struct clk *emc;
365
366 static struct {
367         cputime64_t time_at_clock[TEGRA_EMC_TABLE_MAX_SIZE];
368         int last_sel;
369         u64 last_update;
370         u64 clkchange_count;
371         spinlock_t spinlock;
372 } emc_stats;
373
374 static DEFINE_SPINLOCK(emc_access_lock);
375
376
377 static void __iomem *emc_base = IO_ADDRESS(TEGRA_EMC_BASE);
378 static void __iomem *mc_base = IO_ADDRESS(TEGRA_MC_BASE);
379 static void __iomem *clk_base = IO_ADDRESS(TEGRA_CLK_RESET_BASE);
380
381 static inline void emc_writel(u32 val, unsigned long addr)
382 {
383         writel(val, emc_base + addr);
384 }
385
386 static inline u32 emc_readl(unsigned long addr)
387 {
388         return readl(emc_base + addr);
389 }
390 static inline void mc_writel(u32 val, unsigned long addr)
391 {
392         writel(val, mc_base + addr);
393 }
394 static inline u32 mc_readl(unsigned long addr)
395 {
396         return readl(mc_base + addr);
397 }
398 static inline void ccfifo_writel(u32 val, unsigned long addr)
399 {
400         writel(val, emc_base + EMC_CCFIFO_DATA);
401         writel(addr, emc_base + EMC_CCFIFO_ADDR);
402 }
403
404 static inline u32 disable_power_features(u32 inreg)
405 {
406         u32 mod_reg = inreg;
407         mod_reg &= ~(EMC_CFG_DYN_SREF);
408         mod_reg &= ~(EMC_CFG_DRAM_ACPD);
409         mod_reg &= ~(EMC_CFG_DRAM_CLKSTOP_SR);
410         mod_reg &= ~(EMC_CFG_DRAM_CLKSTOP_PD);
411         mod_reg &= ~(EMC_CFG_DSR_VTTGEN_DRV_EN);
412         return mod_reg;
413 }
414
415 static inline u32 emc_sel_dpd_ctrl_enabled(u32 inreg)
416 {
417         if (dram_type == DRAM_TYPE_DDR3)
418                 return inreg & (EMC_SEL_DPD_CTRL_DDR3_MASK);
419         else
420                 return inreg & (EMC_SEL_DPD_CTRL_MASK);
421 }
422
423 static inline u32 disable_emc_sel_dpd_ctrl(u32 inreg)
424 {
425         u32 mod_reg = inreg;
426         mod_reg &= ~(EMC_SEL_DPD_CTRL_DATA_SEL_DPD);
427         mod_reg &= ~(EMC_SEL_DPD_CTRL_ODT_SEL_DPD);
428         if (dram_type == DRAM_TYPE_DDR3)
429                 mod_reg &= ~(EMC_SEL_DPD_CTRL_RESET_SEL_DPD);
430         mod_reg &= ~(EMC_SEL_DPD_CTRL_CA_SEL_DPD);
431         mod_reg &= ~(EMC_SEL_DPD_CTRL_CLK_SEL_DPD);
432         return mod_reg;
433 }
434
435 static int last_round_idx;
436 static inline int get_start_idx(unsigned long rate)
437 {
438         if (tegra_emc_table[last_round_idx].rate == rate)
439                 return last_round_idx;
440         return 0;
441 }
442 static void emc_last_stats_update(int last_sel)
443 {
444         unsigned long flags;
445         u64 cur_jiffies = get_jiffies_64();
446
447         spin_lock_irqsave(&emc_stats.spinlock, flags);
448
449         if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
450                 emc_stats.time_at_clock[emc_stats.last_sel] =
451                         emc_stats.time_at_clock[emc_stats.last_sel] +
452                         (cur_jiffies - emc_stats.last_update);
453
454         emc_stats.last_update = cur_jiffies;
455
456         if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
457                 emc_stats.clkchange_count++;
458                 emc_stats.last_sel = last_sel;
459         }
460         spin_unlock_irqrestore(&emc_stats.spinlock, flags);
461 }
462
463 static int wait_for_update(u32 status_reg, u32 bit_mask, bool updated_state)
464 {
465         int i;
466         for (i = 0; i < EMC_STATUS_UPDATE_TIMEOUT; i++) {
467                 if (!!(emc_readl(status_reg) & bit_mask) == updated_state)
468                         return 0;
469                 udelay(1);
470         }
471         return -ETIMEDOUT;
472 }
473
474 static inline void emc_timing_update(void)
475 {
476         int err;
477
478         emc_writel(0x1, EMC_TIMING_CONTROL);
479         err = wait_for_update(EMC_STATUS,
480                               EMC_STATUS_TIMING_UPDATE_STALLED, false);
481         if (err) {
482                 pr_err("%s: timing update error: %d", __func__, err);
483                 BUG();
484         }
485 }
486
487 static inline void auto_cal_disable(void)
488 {
489         int err;
490
491         emc_writel(0, EMC_AUTO_CAL_INTERVAL);
492         err = wait_for_update(EMC_AUTO_CAL_STATUS,
493                               EMC_AUTO_CAL_STATUS_ACTIVE, false);
494         if (err) {
495                 pr_err("%s: disable auto-cal error: %d", __func__, err);
496                 BUG();
497         }
498 }
499
500 static inline void wait_auto_cal_disable(void)
501 {
502         int err;
503
504         err = wait_for_update(EMC_AUTO_CAL_STATUS,
505                               EMC_AUTO_CAL_STATUS_ACTIVE, false);
506         if (err) {
507                 pr_err("%s: wait disable auto-cal error: %d", __func__, err);
508                 BUG();
509         }
510 }
511 static inline unsigned int bgbias_preset(const struct tegra12_emc_table *next_timing,
512                               const struct tegra12_emc_table *last_timing)
513 {
514         static unsigned int ret;
515         static unsigned int data, reg_val;
516         ret = 0;
517         data = last_timing->emc_bgbias_ctl0;
518         reg_val = emc_readl(EMC_BGBIAS_CTL0);
519
520         if ((BGBIAS_VAL(next_timing->emc_bgbias_ctl0, BGBIAS_CTL0,
521                         BIAS0_DSC_E_PWRD_IBIAS_RX) == 0) &&
522                         (BGBIAS_VAL(reg_val, BGBIAS_CTL0,
523                         BIAS0_DSC_E_PWRD_IBIAS_RX) == 1)) {
524                 data = BGBIAS_SET_NUM(data, 0, BGBIAS_CTL0,
525                                 BIAS0_DSC_E_PWRD_IBIAS_RX);
526                 ret = 1;
527         }
528
529         if ((BGBIAS_VAL(reg_val, BGBIAS_CTL0,
530                         BIAS0_DSC_E_PWRD) == 1) ||
531                         (BGBIAS_VAL(reg_val, BGBIAS_CTL0,
532                         BIAS0_DSC_E_PWRD_IBIAS_VTTGEN) == 1))
533                         ret = 1;
534
535         if (ret == 1)
536                 emc_writel(data, EMC_BGBIAS_CTL0);
537         return ret;
538
539 }
540
541 static inline bool dqs_preset(const struct tegra12_emc_table *next_timing,
542                               const struct tegra12_emc_table *last_timing)
543 {
544         bool ret = false;
545         static unsigned int data;
546         data = emc_readl(EMC_XM2DQSPADCTRL2);
547
548 #define DQS_SET(reg, bit)                                               \
549         do {                                            \
550                 if ((next_timing->burst_regs[EMC_##reg##_INDEX] &       \
551                      EMC_##reg##_##bit##_ENABLE) &&                     \
552                         (!(data &       \
553                        EMC_##reg##_##bit##_ENABLE)))   {                \
554                                 data = (data \
555                                    | EMC_##reg##_##bit##_ENABLE); \
556                         ret = true;                                     \
557                 }                                                       \
558         } while (0)
559         DQS_SET(XM2DQSPADCTRL2, VREF);
560         DQS_SET(XM2DQSPADCTRL2, RX_FT_REC);
561         if (ret == 1)
562                         emc_writel(data, EMC_XM2DQSPADCTRL2);
563         return ret;
564 }
565
566 static inline void overwrite_mrs_wait_cnt(
567         const struct tegra12_emc_table *next_timing,
568         bool zcal_long)
569 {
570         u32 reg;
571         u32 cnt = 512;
572
573         /* For ddr3 when DLL is re-started: overwrite EMC DFS table settings
574            for MRS_WAIT_LONG with maximum of MRS_WAIT_SHORT settings and
575            expected operation length. Reduce the latter by the overlapping
576            zq-calibration, if any */
577         if (zcal_long)
578                 cnt -= dram_dev_num * 256;
579
580         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
581                 EMC_MRS_WAIT_CNT_SHORT_WAIT_MASK) >>
582                 EMC_MRS_WAIT_CNT_SHORT_WAIT_SHIFT;
583         if (cnt < reg)
584                 cnt = reg;
585
586         reg = (next_timing->burst_regs[EMC_MRS_WAIT_CNT_INDEX] &
587                 (~EMC_MRS_WAIT_CNT_LONG_WAIT_MASK));
588         reg |= (cnt << EMC_MRS_WAIT_CNT_LONG_WAIT_SHIFT) &
589                 EMC_MRS_WAIT_CNT_LONG_WAIT_MASK;
590
591         emc_writel(reg, EMC_MRS_WAIT_CNT);
592 }
593
594 static inline int get_dll_change(const struct tegra12_emc_table *next_timing,
595                                  const struct tegra12_emc_table *last_timing)
596 {
597         bool next_dll_enabled = !(next_timing->emc_mode_1 & 0x1);
598         bool last_dll_enabled = !(last_timing->emc_mode_1 & 0x1);
599
600         if (next_dll_enabled == last_dll_enabled)
601                 return DLL_CHANGE_NONE;
602         else if (next_dll_enabled)
603                 return DLL_CHANGE_ON;
604         else
605                 return DLL_CHANGE_OFF;
606 }
607
608 static inline void set_dram_mode(const struct tegra12_emc_table *next_timing,
609                                  const struct tegra12_emc_table *last_timing,
610                                  int dll_change)
611 {
612         if (dram_type == DRAM_TYPE_DDR3) {
613                 /* first mode_1, then mode_2, then mode_reset*/
614                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
615                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
616                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
617                         ccfifo_writel(next_timing->emc_mode_2, EMC_EMRS2);
618
619                 if ((next_timing->emc_mode_reset !=
620                      last_timing->emc_mode_reset) ||
621                     (dll_change == DLL_CHANGE_ON)) {
622                         u32 reg = next_timing->emc_mode_reset &
623                                 (~EMC_MODE_SET_DLL_RESET);
624                         if (dll_change == DLL_CHANGE_ON) {
625                                 reg |= EMC_MODE_SET_DLL_RESET;
626                                 reg |= EMC_MODE_SET_LONG_CNT;
627                         }
628                         ccfifo_writel(reg, EMC_MRS);
629                 }
630         } else {
631                 /* first mode_2, then mode_1; mode_reset is not applicable */
632                 if (next_timing->emc_mode_2 != last_timing->emc_mode_2)
633                         ccfifo_writel(next_timing->emc_mode_2, EMC_MRW2);
634                 if (next_timing->emc_mode_1 != last_timing->emc_mode_1)
635                         ccfifo_writel(next_timing->emc_mode_1, EMC_MRW);
636                 if (next_timing->emc_mode_4 != last_timing->emc_mode_4)
637                         ccfifo_writel(next_timing->emc_mode_4, EMC_MRW4);
638         }
639 }
640
641 static inline void do_clock_change(u32 clk_setting)
642 {
643         int err;
644
645         mc_readl(MC_EMEM_ADR_CFG);      /* completes prev writes */
646         emc_readl(EMC_INTSTATUS);
647
648         writel(clk_setting,
649                 (void __iomem *)((u32)clk_base + emc->reg));
650         readl((void __iomem *)((u32)clk_base + emc->reg));
651                                 /* completes prev write */
652
653         err = wait_for_update(EMC_INTSTATUS,
654                               EMC_INTSTATUS_CLKCHANGE_COMPLETE, true);
655         if (err) {
656                 pr_err("%s: clock change completion error: %d", __func__, err);
657                 BUG();
658         }
659 }
660
661 static noinline void emc_set_clock(const struct tegra12_emc_table *next_timing,
662                                    const struct tegra12_emc_table *last_timing,
663                                    u32 clk_setting)
664 {
665 #ifndef EMULATE_CLOCK_SWITCH
666         int i, dll_change, pre_wait;
667         int ctt_term_changed = 0;
668         bool cfg_pow_features_enabled, zcal_long;
669         u32 bgbias_ctl, auto_cal_status, auto_cal_config;
670         u32 emc_cfg_reg = emc_readl(EMC_CFG);
671         u32 sel_dpd_ctrl = emc_readl(EMC_SEL_DPD_CTRL);
672         u32 emc_cfg_2_reg = emc_readl(EMC_CFG_2);
673         auto_cal_status = emc_readl(EMC_AUTO_CAL_STATUS);
674         cfg_pow_features_enabled = (emc_cfg_reg & EMC_CFG_PWR_MASK);
675         dll_change = get_dll_change(next_timing, last_timing);
676         zcal_long = (next_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] != 0) &&
677                 (last_timing->burst_regs[EMC_ZCAL_INTERVAL_INDEX] == 0);
678
679         /* 1. clear clkchange_complete interrupts */
680         emc_writel(EMC_INTSTATUS_CLKCHANGE_COMPLETE, EMC_INTSTATUS);
681
682         /* 2. disable dynamic self-refresh and preset dqs vref, then wait for
683            possible self-refresh entry/exit and/or dqs vref settled - waiting
684            before the clock change decreases worst case change stall time */
685         pre_wait = 0;
686         if (cfg_pow_features_enabled) {
687                 emc_cfg_reg = disable_power_features(emc_cfg_reg);
688                 emc_writel(emc_cfg_reg, EMC_CFG);
689                 pre_wait = 5;           /* 5us+ for self-refresh entry/exit */
690         }
691         /* 2.1 disable sel_dpd_ctrl before starting clock change */
692         if (emc_sel_dpd_ctrl_enabled(sel_dpd_ctrl)) {
693                 sel_dpd_ctrl = disable_emc_sel_dpd_ctrl(sel_dpd_ctrl);
694                 emc_writel(sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
695         }
696
697         /* 2.5 check dq/dqs vref delay */
698         if (bgbias_preset(next_timing, last_timing)) {
699                 if (pre_wait < 5)
700                         pre_wait = 5;
701         }
702         if (dqs_preset(next_timing, last_timing)) {
703                 if (pre_wait < 30)
704                         pre_wait = 30;  /* 3us+ for dqs vref settled */
705         }
706
707         if (pre_wait) {
708                 emc_timing_update();
709                 udelay(pre_wait);
710         }
711         /* 2.5.1 Disable auto_cal for clock change*/
712         emc_writel(0, EMC_AUTO_CAL_INTERVAL);
713         auto_cal_config = emc_readl(EMC_AUTO_CAL_CONFIG);
714         auto_cal_status = emc_readl(EMC_AUTO_CAL_STATUS);
715
716         if (((next_timing->emc_auto_cal_config & 1) <<
717                 EMC_AUTO_CAL_CONFIG_AUTO_CAL_START_SHIFT) &&
718                 !((auto_cal_status >> EMC_AUTO_CAL_STATUS_SHIFT) & 1) &&
719                 !ctt_term_changed) {
720                 auto_cal_config = ((auto_cal_config &
721                         ~(1 << EMC_AUTO_CAL_CONFIG_AUTO_CAL_START_SHIFT))
722                         | (1 << EMC_AUTO_CAL_CONFIG_AUTO_CAL_START_SHIFT));
723                 emc_writel(auto_cal_config, EMC_AUTO_CAL_CONFIG);
724         }
725
726         /* 2.6 Program CTT_TERM Control if it changed since last time*/
727         /* Bug-1258083, software hack for updating */
728         /* EMC_CCT_TERM_CTRL/term-slope */
729         /* offset values instantly */
730         ctt_term_changed = (last_timing->emc_ctt_term_ctrl !=
731                                 next_timing->emc_ctt_term_ctrl);
732         if (last_timing->emc_ctt_term_ctrl !=
733                                 next_timing->emc_ctt_term_ctrl) {
734                 auto_cal_disable();
735                 emc_writel(next_timing->emc_ctt_term_ctrl, EMC_CTT_TERM_CTRL);
736         }
737         if (ctt_term_changed)
738                 emc_timing_update();
739
740         /* 3. disable auto-cal if vref mode is switching - removed */
741
742         /* 4. program burst shadow registers */
743         for (i = 0; i < next_timing->burst_regs_num; i++) {
744                 if (!burst_reg_addr[i])
745                         continue;
746                 __raw_writel(next_timing->burst_regs[i], burst_reg_addr[i]);
747         }
748         emc_cfg_reg = next_timing->emc_cfg;
749         emc_cfg_reg = disable_power_features(emc_cfg_reg);
750         ccfifo_writel(emc_cfg_reg, EMC_CFG);
751
752         /*step 4.1 , program auto_cal_config
753         registers for proper offset propagation
754         bug 1372978 */
755         if (last_timing->emc_auto_cal_config2
756                 != next_timing->emc_auto_cal_config2)
757                 ccfifo_writel(next_timing->emc_auto_cal_config2,
758                                 EMC_AUTO_CAL_CONFIG2);
759         if (last_timing->emc_auto_cal_config3 !=
760                                 next_timing->emc_auto_cal_config3)
761                 ccfifo_writel(next_timing->emc_auto_cal_config3,
762                                 EMC_AUTO_CAL_CONFIG3);
763         if (last_timing->emc_auto_cal_config !=
764                                 next_timing->emc_auto_cal_config) {
765                 auto_cal_config =
766                                 next_timing->emc_auto_cal_config;
767                 auto_cal_config = auto_cal_config &
768                         ~(1 << EMC_AUTO_CAL_CONFIG_AUTO_CAL_START_SHIFT);
769                 ccfifo_writel(auto_cal_config, EMC_AUTO_CAL_CONFIG);
770         }
771         wmb();
772         barrier();
773
774         /* 4.1 On ddr3 when DLL is re-started predict MRS long wait count and
775            overwrite DFS table setting  */
776         if ((dram_type == DRAM_TYPE_DDR3) && (dll_change == DLL_CHANGE_ON))
777                 overwrite_mrs_wait_cnt(next_timing, zcal_long);
778
779         /* 5.2 disable auto-refresh to save time after clock change */
780         /* move to ccfifo in step 6.1 */
781
782         /* 5.3 post cfg_2 write and dis ob clock gate */
783         emc_cfg_2_reg = next_timing->emc_cfg_2;
784
785         if (emc_cfg_2_reg & EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR)
786                 emc_cfg_2_reg &= ~EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR;
787         ccfifo_writel(emc_cfg_2_reg, EMC_CFG_2);
788         /* 6. turn Off dll and enter self-refresh on DDR3  */
789         if (dram_type == DRAM_TYPE_DDR3) {
790                 if (dll_change == DLL_CHANGE_OFF)
791                         ccfifo_writel(next_timing->emc_mode_1, EMC_EMRS);
792         }
793         /* 6.1, disable refresh controller using ccfifo  */
794         ccfifo_writel(EMC_REFCTRL_DISABLE_ALL(dram_dev_num), EMC_REFCTRL);
795         if (dram_type == DRAM_TYPE_DDR3) {
796                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num) |
797                               EMC_SELF_REF_CMD_ENABLED, EMC_SELF_REF);
798         }
799         /* 7. flow control marker 2 */
800         ccfifo_writel(1, EMC_STALL_THEN_EXE_AFTER_CLKCHANGE);
801
802         /* 8. exit self-refresh on DDR3 */
803         if (dram_type == DRAM_TYPE_DDR3)
804                 ccfifo_writel(DRAM_BROADCAST(dram_dev_num), EMC_SELF_REF);
805         ccfifo_writel(EMC_REFCTRL_ENABLE_ALL(dram_dev_num), EMC_REFCTRL);
806         /* 9. set dram mode registers */
807         set_dram_mode(next_timing, last_timing, dll_change);
808
809         /* 10. issue zcal command if turning zcal On */
810         if (zcal_long) {
811                 ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV0, EMC_ZQ_CAL);
812                 if (dram_dev_num > 1)
813                         ccfifo_writel(EMC_ZQ_CAL_LONG_CMD_DEV1, EMC_ZQ_CAL);
814         }
815
816         /* 10.1 dummy write to RO register to remove stall after change */
817         ccfifo_writel(0, EMC_CCFIFO_STATUS);
818
819         /* 11.1 DIS_STP_OB_CLK_DURING_NON_WR ->0 */
820         if (next_timing->emc_cfg_2 & EMC_CFG_2_DIS_STP_OB_CLK_DURING_NON_WR) {
821                 emc_cfg_2_reg = next_timing->emc_cfg_2;
822                 ccfifo_writel(emc_cfg_2_reg, EMC_CFG_2);
823         }
824
825         /* 11.2 disable auto_cal for clock change */
826         wait_auto_cal_disable();
827
828         /* 11.5 program burst_up_down registers if emc rate is going down */
829         if (next_timing->rate < last_timing->rate) {
830                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
831                         __raw_writel(next_timing->burst_up_down_regs[i],
832                                 burst_up_down_reg_addr[i]);
833                 wmb();
834         }
835
836         /* 12-14. read any MC register to ensure the programming is done
837            change EMC clock source register wait for clk change completion */
838         do_clock_change(clk_setting);
839
840         /* 14.2 program burst_up_down registers if emc rate is going up */
841         if (next_timing->rate > last_timing->rate) {
842                 for (i = 0; i < next_timing->burst_up_down_regs_num; i++)
843                         __raw_writel(next_timing->burst_up_down_regs[i],
844                                 burst_up_down_reg_addr[i]);
845                 wmb();
846         }
847
848         /* 15. restore auto-cal */
849         if (last_timing->emc_ctt_term_ctrl != next_timing->emc_ctt_term_ctrl)
850                 emc_writel(next_timing->emc_acal_interval,
851                         EMC_AUTO_CAL_INTERVAL);
852
853         /* 16. restore dynamic self-refresh */
854         if (next_timing->emc_cfg & EMC_CFG_PWR_MASK) {
855                 emc_cfg_reg = next_timing->emc_cfg;
856                 emc_writel(emc_cfg_reg, EMC_CFG);
857         }
858
859         /* 17. set zcal wait count */
860         emc_writel(next_timing->emc_zcal_cnt_long, EMC_ZCAL_WAIT_CNT);
861
862         /* 17.1 turning of bgbias if lpddr3 dram and freq is low */
863         auto_cal_config = emc_readl(EMC_AUTO_CAL_STATUS);
864         if ((dram_type == DRAM_TYPE_LPDDR2) &&
865                 (BGBIAS_VAL(next_timing->emc_bgbias_ctl0,
866                         BGBIAS_CTL0, BIAS0_DSC_E_PWRD_IBIAS_RX) == 1)) {
867
868                 /* 17.1.3 Full power down bgbias */
869                 bgbias_ctl = next_timing->emc_bgbias_ctl0;
870                 bgbias_ctl = BGBIAS_SET_NUM(bgbias_ctl, 1,
871                                 BGBIAS_CTL0, BIAS0_DSC_E_PWRD_IBIAS_VTTGEN);
872                 bgbias_ctl = BGBIAS_SET_NUM(bgbias_ctl, 1,
873                                 BGBIAS_CTL0, BIAS0_DSC_E_PWRD);
874                 emc_writel(bgbias_ctl, EMC_BGBIAS_CTL0);
875         } else {
876                 if (dram_type == DRAM_TYPE_DDR3)
877                         if (emc_readl(EMC_BGBIAS_CTL0) !=
878                                 next_timing->emc_bgbias_ctl0)
879                                 emc_writel(next_timing->emc_bgbias_ctl0,
880                                                 EMC_BGBIAS_CTL0);
881                 emc_writel(next_timing->emc_acal_interval,
882                         EMC_AUTO_CAL_INTERVAL);
883         }
884
885         /* 18. update restored timing */
886         udelay(2);
887         /* 18.1. program sel_dpd at end so if any enabling needs to happen.*/
888         /* It happens at last,as dpd should off during clock change. */
889         /* bug 1342517 */
890         emc_writel(next_timing->emc_sel_dpd_ctrl, EMC_SEL_DPD_CTRL);
891         emc_timing_update();
892 #else
893         /* FIXME: implement */
894         pr_info("tegra12_emc: Configuring EMC rate %lu (setting: 0x%x)\n",
895                 next_timing->rate, clk_setting);
896 #endif
897
898 }
899
900 static inline void emc_get_timing(struct tegra12_emc_table *timing)
901 {
902         int i;
903
904         /* Burst updates depends on previous state; burst_up_down are
905          * stateless. */
906         for (i = 0; i < timing->burst_regs_num; i++) {
907                 if (burst_reg_addr[i])
908                         timing->burst_regs[i] = __raw_readl(burst_reg_addr[i]);
909                 else
910                         timing->burst_regs[i] = 0;
911         }
912         timing->emc_acal_interval = 0;
913         timing->emc_zcal_cnt_long = 0;
914         timing->emc_mode_reset = 0;
915         timing->emc_mode_1 = 0;
916         timing->emc_mode_2 = 0;
917         timing->emc_mode_4 = 0;
918         timing->emc_cfg = emc_readl(EMC_CFG);
919         timing->rate = clk_get_rate_locked(emc) / 1000;
920 }
921
922 /* The EMC registers have shadow registers. When the EMC clock is updated
923  * in the clock controller, the shadow registers are copied to the active
924  * registers, allowing glitchless memory bus frequency changes.
925  * This function updates the shadow registers for a new clock frequency,
926  * and relies on the clock lock on the emc clock to avoid races between
927  * multiple frequency changes. In addition access lock prevents concurrent
928  * access to EMC registers from reading MRR registers */
929 int tegra_emc_set_rate(unsigned long rate)
930 {
931         int i;
932         u32 clk_setting;
933         const struct tegra12_emc_table *last_timing;
934         unsigned long flags;
935         s64 last_change_delay;
936
937         if (!tegra_emc_table)
938                 return -EINVAL;
939
940         /* Table entries specify rate in kHz */
941         rate = rate / 1000;
942
943         i = get_start_idx(rate);
944         for (; i < tegra_emc_table_size; i++) {
945                 if (tegra_emc_clk_sel[i].input == NULL)
946                         continue;       /* invalid entry */
947
948                 if (tegra_emc_table[i].rate == rate)
949                         break;
950         }
951
952         if (i >= tegra_emc_table_size)
953                 return -EINVAL;
954
955         if (!emc_timing) {
956                 /* can not assume that boot timing matches dfs table even
957                    if boot frequency matches one of the table nodes */
958                 emc_get_timing(&start_timing);
959                 last_timing = &start_timing;
960         } else
961                 last_timing = emc_timing;
962
963         clk_setting = tegra_emc_clk_sel[i].value;
964
965         last_change_delay = ktime_us_delta(ktime_get(), clkchange_time);
966         if ((last_change_delay >= 0) && (last_change_delay < clkchange_delay))
967                 udelay(clkchange_delay - (int)last_change_delay);
968
969         spin_lock_irqsave(&emc_access_lock, flags);
970         emc_set_clock(&tegra_emc_table[i], last_timing, clk_setting);
971         clkchange_time = ktime_get();
972         emc_timing = &tegra_emc_table[i];
973         tegra_mc_divider_update(emc);
974         spin_unlock_irqrestore(&emc_access_lock, flags);
975
976         emc_last_stats_update(i);
977
978         pr_debug("%s: rate %lu setting 0x%x\n", __func__, rate, clk_setting);
979
980         return 0;
981 }
982
983 long tegra_emc_round_rate_updown(unsigned long rate, bool up)
984 {
985         int i;
986         unsigned long table_rate;
987
988         if (!tegra_emc_table)
989                 return clk_get_rate_locked(emc); /* no table - no rate change */
990
991         if (!emc_enable)
992                 return -EINVAL;
993
994         pr_debug("%s: %lu\n", __func__, rate);
995
996         /* Table entries specify rate in kHz */
997         rate = rate / 1000;
998
999         i = get_start_idx(rate);
1000         for (; i < tegra_emc_table_size; i++) {
1001                 if (tegra_emc_clk_sel[i].input == NULL)
1002                         continue;       /* invalid entry */
1003
1004                 table_rate = tegra_emc_table[i].rate;
1005                 if (table_rate >= rate) {
1006                         if (!up && i && (table_rate > rate)) {
1007                                 i--;
1008                                 table_rate = tegra_emc_table[i].rate;
1009                         }
1010                         pr_debug("%s: using %lu\n", __func__, table_rate);
1011                         last_round_idx = i;
1012                         return table_rate * 1000;
1013                 }
1014         }
1015
1016         return -EINVAL;
1017 }
1018
1019 struct clk *tegra_emc_predict_parent(unsigned long rate, u32 *div_value)
1020 {
1021         int i;
1022
1023         if (!tegra_emc_table) {
1024                 if (rate == clk_get_rate_locked(emc)) {
1025                         *div_value = emc->div - 2;
1026                         return emc->parent;
1027                 }
1028                 return NULL;
1029         }
1030
1031         pr_debug("%s: %lu\n", __func__, rate);
1032
1033         /* Table entries specify rate in kHz */
1034         rate = rate / 1000;
1035
1036         i = get_start_idx(rate);
1037         for (; i < tegra_emc_table_size; i++) {
1038                 if (tegra_emc_table[i].rate == rate) {
1039                         struct clk *p = tegra_emc_clk_sel[i].input;
1040
1041                         if (p && (tegra_emc_clk_sel[i].input_rate ==
1042                                   clk_get_rate(p))) {
1043                                 *div_value = (tegra_emc_clk_sel[i].value &
1044                                         EMC_CLK_DIV_MASK) >> EMC_CLK_DIV_SHIFT;
1045                                 return p;
1046                         }
1047                 }
1048         }
1049         return NULL;
1050 }
1051
1052 bool tegra_emc_is_parent_ready(unsigned long rate, struct clk **parent,
1053                 unsigned long *parent_rate, unsigned long *backup_rate)
1054 {
1055
1056         int i;
1057         struct clk *p = NULL;
1058         unsigned long p_rate = 0;
1059
1060         if (!tegra_emc_table)
1061                 return true;
1062
1063         pr_debug("%s: %lu\n", __func__, rate);
1064
1065         /* Table entries specify rate in kHz */
1066         rate = rate / 1000;
1067
1068         i = get_start_idx(rate);
1069         for (; i < tegra_emc_table_size; i++) {
1070                 if (tegra_emc_table[i].rate == rate) {
1071                         p = tegra_emc_clk_sel[i].input;
1072                         if (!p)
1073                                 continue;       /* invalid entry */
1074
1075                         p_rate = tegra_emc_clk_sel[i].input_rate;
1076                         if (p_rate == clk_get_rate(p))
1077                                 return true;
1078                         break;
1079                 }
1080         }
1081
1082         /* Table match not found - "non existing parent" is ready */
1083         if (!p)
1084                 return true;
1085
1086 #ifdef CONFIG_TEGRA_PLLM_SCALED
1087         /*
1088          * Table match found, but parent is not ready - check if backup entry
1089          * was found during initialization, and return the respective backup
1090          * rate
1091          */
1092         if (emc->shared_bus_backup.input &&
1093             (emc->shared_bus_backup.input != p)) {
1094                 *parent = p;
1095                 *parent_rate = p_rate;
1096                 *backup_rate = emc->shared_bus_backup.bus_rate;
1097                 return false;
1098         }
1099 #else
1100         /*
1101          * Table match found, but parent is not ready - continue search
1102          * for backup rate: min rate above requested that has different
1103          * parent source (since only pll_c is scaled and may not be ready,
1104          * any other parent can provide backup)
1105          */
1106         *parent = p;
1107         *parent_rate = p_rate;
1108
1109         for (i++; i < tegra_emc_table_size; i++) {
1110                 p = tegra_emc_clk_sel[i].input;
1111                 if (!p)
1112                         continue;       /* invalid entry */
1113
1114                 if (p != (*parent)) {
1115                         *backup_rate = tegra_emc_table[i].rate * 1000;
1116                         return false;
1117                 }
1118         }
1119 #endif
1120         /* Parent is not ready, and no backup found */
1121         *backup_rate = -EINVAL;
1122         return false;
1123 }
1124
1125 static inline const struct clk_mux_sel *get_emc_input(u32 val)
1126 {
1127         const struct clk_mux_sel *sel;
1128
1129         for (sel = emc->inputs; sel->input != NULL; sel++) {
1130                 if (sel->value == val)
1131                         break;
1132         }
1133         return sel;
1134 }
1135
1136 static int find_matching_input(const struct tegra12_emc_table *table,
1137         struct clk *pll_c, struct clk *pll_m, struct emc_sel *emc_clk_sel)
1138 {
1139         u32 div_value = (table->src_sel_reg & EMC_CLK_DIV_MASK) >>
1140                 EMC_CLK_DIV_SHIFT;
1141         u32 src_value = (table->src_sel_reg & EMC_CLK_SOURCE_MASK) >>
1142                 EMC_CLK_SOURCE_SHIFT;
1143         unsigned long input_rate = 0;
1144         unsigned long table_rate = table->rate * 1000; /* table rate in kHz */
1145         const struct clk_mux_sel *sel = get_emc_input(src_value);
1146
1147 #ifdef CONFIG_TEGRA_PLLM_SCALED
1148         struct clk *scalable_pll = pll_m;
1149 #else
1150         struct clk *scalable_pll = pll_c;
1151 #endif
1152         pr_info_once("tegra: %s is selected as scalable EMC clock source\n",
1153                      scalable_pll->name);
1154
1155         if (div_value & 0x1) {
1156                 pr_warn("tegra: invalid odd divider for EMC rate %lu\n",
1157                         table_rate);
1158                 return -EINVAL;
1159         }
1160         if (!sel->input) {
1161                 pr_warn("tegra: no matching input found for EMC rate %lu\n",
1162                         table_rate);
1163                 return -EINVAL;
1164         }
1165         if (div_value && (table->src_sel_reg & EMC_CLK_LOW_JITTER_ENABLE)) {
1166                 pr_warn("tegra: invalid LJ path for EMC rate %lu\n",
1167                         table_rate);
1168                 return -EINVAL;
1169         }
1170         if (!(table->src_sel_reg & EMC_CLK_MC_SAME_FREQ) !=
1171             !(MC_EMEM_ARB_MISC0_EMC_SAME_FREQ &
1172               table->burst_regs[MC_EMEM_ARB_MISC0_INDEX])) {
1173                 pr_warn("tegra: ambiguous EMC to MC ratio for EMC rate %lu\n",
1174                         table_rate);
1175                 return -EINVAL;
1176         }
1177
1178 #ifndef CONFIG_TEGRA_DUAL_CBUS
1179         if (sel->input == pll_c) {
1180                 pr_warn("tegra: %s is cbus source: no EMC rate %lu support\n",
1181                         sel->input->name, table_rate);
1182                 return -EINVAL;
1183         }
1184 #endif
1185
1186         if (sel->input == scalable_pll) {
1187                 input_rate = table_rate * (1 + div_value / 2);
1188         } else {
1189                 /* all other sources are fixed, must exactly match the rate */
1190                 input_rate = clk_get_rate(sel->input);
1191                 if (input_rate != (table_rate * (1 + div_value / 2))) {
1192                         pr_warn("tegra: EMC rate %lu does not match %s rate %lu\n",
1193                                 table_rate, sel->input->name, input_rate);
1194                         return -EINVAL;
1195                 }
1196         }
1197
1198 #ifdef CONFIG_TEGRA_PLLM_SCALED
1199                 if (sel->input == pll_c) {
1200                         /* maybe overwritten in a loop - end up at max rate
1201                            from pll_c */
1202                         emc->shared_bus_backup.input = pll_c;
1203                         emc->shared_bus_backup.bus_rate = table_rate;
1204                 }
1205 #endif
1206         /* Get ready emc clock selection settings for this table rate */
1207         emc_clk_sel->input = sel->input;
1208         emc_clk_sel->input_rate = input_rate;
1209         emc_clk_sel->value = table->src_sel_reg;
1210
1211         return 0;
1212 }
1213
1214
1215 static int emc_core_millivolts[MAX_DVFS_FREQS];
1216
1217 static void adjust_emc_dvfs_table(const struct tegra12_emc_table *table,
1218                                   int table_size)
1219 {
1220         int i, j, mv;
1221         unsigned long rate;
1222
1223         BUG_ON(table_size > MAX_DVFS_FREQS);
1224
1225         for (i = 0, j = 0; j < table_size; j++) {
1226                 if (tegra_emc_clk_sel[j].input == NULL)
1227                         continue;       /* invalid entry */
1228
1229                 rate = table[j].rate * 1000;
1230                 mv = table[j].emc_min_mv;
1231
1232                 if ((i == 0) || (mv > emc_core_millivolts[i-1])) {
1233                         /* advance: voltage has increased */
1234                         emc->dvfs->freqs[i] = rate;
1235                         emc_core_millivolts[i] = mv;
1236                         i++;
1237                 } else {
1238                         /* squash: voltage has not increased */
1239                         emc->dvfs->freqs[i-1] = rate;
1240                 }
1241         }
1242
1243         emc->dvfs->millivolts = emc_core_millivolts;
1244         emc->dvfs->num_freqs = i;
1245 }
1246
1247 #ifdef CONFIG_TEGRA_PLLM_SCALED
1248 /* When pll_m is scaled, pll_c must provide backup rate;
1249    if not - remove rates that require pll_m scaling */
1250 static int purge_emc_table(unsigned long max_rate)
1251 {
1252         int i;
1253         int ret = 0;
1254
1255         if (emc->shared_bus_backup.input)
1256                 return ret;
1257
1258         pr_warn("tegra: selected pll_m scaling option but no backup source:\n");
1259         pr_warn("       removed not supported entries from the table:\n");
1260
1261         /* made all entries with non matching rate invalid */
1262         for (i = 0; i < tegra_emc_table_size; i++) {
1263                 struct emc_sel *sel = &tegra_emc_clk_sel[i];
1264                 if (sel->input) {
1265                         if (clk_get_rate(sel->input) != sel->input_rate) {
1266                                 pr_warn("       EMC rate %lu\n",
1267                                         tegra_emc_table[i].rate * 1000);
1268                                 sel->input = NULL;
1269                                 sel->input_rate = 0;
1270                                 sel->value = 0;
1271                                 if (max_rate == tegra_emc_table[i].rate)
1272                                         ret = -EINVAL;
1273                         }
1274                 }
1275         }
1276         return ret;
1277 }
1278 #else
1279 /* When pll_m is fixed @ max EMC rate, it always provides backup for pll_c */
1280 #define purge_emc_table(max_rate) (0)
1281 #endif
1282
1283 static int init_emc_table(const struct tegra12_emc_table *table, int table_size)
1284 {
1285         int i, mv;
1286         u32 reg;
1287         bool max_entry = false;
1288         bool emc_max_dvfs_sel = get_emc_max_dvfs();
1289         unsigned long boot_rate, max_rate;
1290         struct clk *pll_c = tegra_get_clock_by_name("pll_c");
1291         struct clk *pll_m = tegra_get_clock_by_name("pll_m");
1292
1293         emc_stats.clkchange_count = 0;
1294         spin_lock_init(&emc_stats.spinlock);
1295         emc_stats.last_update = get_jiffies_64();
1296         emc_stats.last_sel = TEGRA_EMC_TABLE_MAX_SIZE;
1297
1298         if ((dram_type != DRAM_TYPE_DDR3) && (dram_type != DRAM_TYPE_LPDDR2)) {
1299                 pr_err("tegra: not supported DRAM type %u\n", dram_type);
1300                 return -ENODATA;
1301         }
1302
1303         if (!table || !table_size) {
1304                 pr_err("tegra: EMC DFS table is empty\n");
1305                 return -ENODATA;
1306         }
1307
1308         boot_rate = clk_get_rate(emc) / 1000;
1309         max_rate = boot_rate;
1310
1311         tegra_emc_table_size = min(table_size, TEGRA_EMC_TABLE_MAX_SIZE);
1312         switch (table[0].rev) {
1313         case 0x18:
1314         case 0x19:
1315                 start_timing.burst_regs_num = table[0].burst_regs_num;
1316                 break;
1317         default:
1318                 pr_err("tegra: invalid EMC DFS table: unknown rev 0x%x\n",
1319                         table[0].rev);
1320                 return -ENODATA;
1321         }
1322
1323         /* Match EMC source/divider settings with table entries */
1324         for (i = 0; i < tegra_emc_table_size; i++) {
1325                 unsigned long table_rate = table[i].rate;
1326
1327                 /* Stop: "no-rate" entry, or entry violating ascending order */
1328                 if (!table_rate || (i && ((table_rate <= table[i-1].rate) ||
1329                         (table[i].emc_min_mv < table[i-1].emc_min_mv)))) {
1330                         pr_warn("tegra: EMC rate entry %lu is not ascending\n",
1331                                 table_rate);
1332                         break;
1333                 }
1334
1335                 BUG_ON(table[i].rev != table[0].rev);
1336
1337                 if (find_matching_input(&table[i], pll_c, pll_m,
1338                                         &tegra_emc_clk_sel[i]))
1339                         continue;
1340
1341                 if (table_rate == boot_rate)
1342                         emc_stats.last_sel = i;
1343
1344                 if (emc_max_dvfs_sel) {
1345                         /* EMC max rate = max table entry above boot rate */
1346                         if (table_rate >= max_rate) {
1347                                 max_rate = table_rate;
1348                                 max_entry = true;
1349                         }
1350                 } else if (table_rate == max_rate) {
1351                         /* EMC max rate = boot rate */
1352                         max_entry = true;
1353                         break;
1354                 }
1355         }
1356
1357         /* Validate EMC rate and voltage limits */
1358         if (!max_entry) {
1359                 pr_err("tegra: invalid EMC DFS table: entry for max rate"
1360                        " %lu kHz is not found\n", max_rate);
1361                 return -ENODATA;
1362         }
1363
1364         tegra_emc_table = table;
1365
1366         /*
1367          * Purge rates that cannot be reached because table does not specify
1368          * proper backup source. If maximum rate was purged, fall back on boot
1369          * rate as maximum limit. In any case propagate new maximum limit
1370          * down stream to shared users, and check it against nominal voltage.
1371          */
1372         if (purge_emc_table(max_rate))
1373                 max_rate = boot_rate;
1374         tegra_init_max_rate(emc, max_rate * 1000);
1375
1376         if (emc->dvfs) {
1377                 adjust_emc_dvfs_table(tegra_emc_table, tegra_emc_table_size);
1378                 mv = tegra_dvfs_predict_peak_millivolts(emc, max_rate * 1000);
1379                 if ((mv <= 0) || (mv > emc->dvfs->max_millivolts)) {
1380                         tegra_emc_table = NULL;
1381                         pr_err("tegra: invalid EMC DFS table: maximum rate %lu"
1382                                " kHz does not match nominal voltage %d\n",
1383                                max_rate, emc->dvfs->max_millivolts);
1384                         return -ENODATA;
1385                 }
1386         }
1387
1388         pr_info("tegra: validated EMC DFS table\n");
1389
1390         /* Configure clock change mode according to dram type */
1391         reg = emc_readl(EMC_CFG_2) & (~EMC_CFG_2_MODE_MASK);
1392         reg |= ((dram_type == DRAM_TYPE_LPDDR2) ? EMC_CFG_2_PD_MODE :
1393                 EMC_CFG_2_SREF_MODE) << EMC_CFG_2_MODE_SHIFT;
1394         emc_writel(reg, EMC_CFG_2);
1395         return 0;
1396 }
1397
1398 #ifdef CONFIG_PASR
1399 static bool tegra12_is_lpddr3(void)
1400 {
1401         return (dram_type == DRAM_TYPE_LPDDR2);
1402 }
1403
1404 static void tegra12_pasr_apply_mask(u16 *mem_reg, void *cookie)
1405 {
1406         u32 val = 0;
1407         int device = (int)cookie;
1408
1409         val = TEGRA_EMC_MODE_REG_17 | *mem_reg;
1410         val |= device << TEGRA_EMC_MRW_DEV_SHIFT;
1411
1412         emc_writel(val, EMC_MRW);
1413
1414         pr_debug("%s: cookie = %d mem_reg = 0x%04x val = 0x%08x\n", __func__,
1415                         (int)cookie, *mem_reg, val);
1416 }
1417
1418 static void tegra12_pasr_remove_mask(phys_addr_t base, void *cookie)
1419 {
1420         u16 mem_reg = 0;
1421
1422         if (!pasr_register_mask_function(base, NULL, cookie))
1423                         tegra12_pasr_apply_mask(&mem_reg, cookie);
1424
1425 }
1426
1427 static int tegra12_pasr_set_mask(phys_addr_t base, void *cookie)
1428 {
1429         return pasr_register_mask_function(base, &tegra12_pasr_apply_mask,
1430                                         cookie);
1431 }
1432
1433 static int tegra12_pasr_enable(const char *arg, const struct kernel_param *kp)
1434 {
1435         unsigned int old_pasr_enable;
1436         void *cookie;
1437         int num_devices;
1438         u64 device_size;
1439         u64 size_mul;
1440         int ret = 0;
1441
1442         if (!tegra12_is_lpddr3())
1443                 return -ENOSYS;
1444
1445         old_pasr_enable = pasr_enable;
1446         param_set_int(arg, kp);
1447
1448         if (old_pasr_enable == pasr_enable)
1449                 return ret;
1450
1451         num_devices = 1 << (mc_readl(MC_EMEM_ADR_CFG) & BIT(0));
1452         size_mul = 1 << ((emc_readl(EMC_FBIO_CFG5) >> 4) & BIT(0));
1453
1454         /* Cookie represents the device number to write to MRW register.
1455          * 0x2 to for only dev0, 0x1 for dev1.
1456          */
1457         if (pasr_enable == 0) {
1458                 cookie = (void *)(int)TEGRA_EMC_MRW_DEV1;
1459
1460                 tegra12_pasr_remove_mask(TEGRA_DRAM_BASE, cookie);
1461
1462                 if (num_devices == 1)
1463                         goto exit;
1464
1465                 cookie = (void *)(int)TEGRA_EMC_MRW_DEV2;
1466                 /* Next device is located after first device, so read DEV0 size
1467                  * to decide base address for DEV1 */
1468                 device_size = 1 << ((mc_readl(MC_EMEM_ADR_CFG_DEV0) >>
1469                                         MC_EMEM_DEV_SIZE_SHIFT) &
1470                                         MC_EMEM_DEV_SIZE_MASK);
1471                 device_size = device_size * size_mul * SZ_4M;
1472
1473                 tegra12_pasr_remove_mask(TEGRA_DRAM_BASE + device_size, cookie);
1474         } else {
1475                 cookie = (void *)(int)TEGRA_EMC_MRW_DEV1;
1476
1477                 ret = tegra12_pasr_set_mask(TEGRA_DRAM_BASE, cookie);
1478
1479                 if (num_devices == 1 || ret)
1480                         goto exit;
1481
1482                 cookie = (void *)(int)TEGRA_EMC_MRW_DEV2;
1483
1484                 /* Next device is located after first device, so read DEV0 size
1485                  * to decide base address for DEV1 */
1486                 device_size = 1 << ((mc_readl(MC_EMEM_ADR_CFG_DEV0) >>
1487                                         MC_EMEM_DEV_SIZE_SHIFT) &
1488                                         MC_EMEM_DEV_SIZE_MASK);
1489                 device_size = device_size * size_mul * SZ_4M;
1490
1491                 ret = tegra12_pasr_set_mask(TEGRA_DRAM_BASE + device_size, cookie);
1492         }
1493
1494 exit:
1495         return ret;
1496 }
1497
1498 static struct kernel_param_ops tegra12_pasr_enable_ops = {
1499         .set = tegra12_pasr_enable,
1500         .get = param_get_int,
1501 };
1502 module_param_cb(pasr_enable, &tegra12_pasr_enable_ops, &pasr_enable, 0644);
1503 #endif
1504
1505 void tegra12_mc_holdoff_enable(void)
1506 {
1507         mc_writel(HYST_MSENCSRD | HYST_DISPLAYHCB | HYST_DISPLAYHC |
1508                 HYST_DISPLAY0CB | HYST_DISPLAY0C | YST_DISPLAY0BB |
1509                 YST_DISPLAY0B | YST_DISPLAY0AB | YST_DISPLAY0A,
1510                 MC_EMEM_ARB_HYSTERESIS_0_0);
1511         mc_writel(HYST_VDEDBGW | HYST_VDEBSEVW | HYST_MSENCSWR |
1512                 YST_VDETPER | YST_VDEMCER | YST_VDEMBER | YST_VDEBSEVR,
1513                 MC_EMEM_ARB_HYSTERESIS_1_0);
1514         mc_writel(HYST_DISPLAYT | HYST_GPUSWR | HYST_ISPWBB |
1515                 HYST_ISPWAB | HYST_ISPRAB | YST_ISPWB | YST_ISPWA |
1516                 YST_ISPRA | YST_VDETPMW | YST_VDEMBEW,
1517                 MC_EMEM_ARB_HYSTERESIS_2_0);
1518         mc_writel(HYST_DISPLAYD | HYST_VIW | HYST_VICSWR | HYST_VICSRD,
1519                 MC_EMEM_ARB_HYSTERESIS_3_0);
1520 }
1521
1522 static int tegra12_emc_probe(struct platform_device *pdev)
1523 {
1524         struct tegra12_emc_pdata *pdata;
1525         struct resource *res;
1526
1527         if (tegra_emc_table)
1528                 return -EINVAL;
1529
1530         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1531         if (!res) {
1532                 dev_err(&pdev->dev, "missing register base\n");
1533                 return -ENOMEM;
1534         }
1535
1536         pdata = pdev->dev.platform_data;
1537
1538         if (!pdata) {
1539                 pdata = tegra_emc_dt_parse_pdata(pdev);
1540         }
1541
1542         if (!pdata) {
1543                 dev_err(&pdev->dev, "missing platform data\n");
1544                 return -ENODATA;
1545         }
1546
1547         return init_emc_table(pdata->tables, pdata->num_tables);
1548 }
1549
1550 static struct of_device_id tegra12_emc_of_match[] = {
1551         { .compatible = "nvidia,tegra12-emc", },
1552         { },
1553 };
1554
1555 static struct platform_driver tegra12_emc_driver = {
1556         .driver         = {
1557                 .name   = "tegra-emc",
1558                 .owner  = THIS_MODULE,
1559         },
1560         .probe          = tegra12_emc_probe,
1561 };
1562
1563 int __init tegra12_emc_init(void)
1564 {
1565         int ret;
1566
1567         if (!tegra_emc_device.dev.platform_data)
1568                 tegra12_emc_driver.driver.of_match_table = tegra12_emc_of_match;
1569         ret = platform_driver_register(&tegra12_emc_driver);
1570
1571         if (!ret) {
1572                 tegra_emc_iso_usage_table_init(tegra12_emc_iso_usage,
1573                                 ARRAY_SIZE(tegra12_emc_iso_usage));
1574                 if (emc_enable) {
1575                         unsigned long rate = tegra_emc_round_rate_updown(
1576                                 emc->boot_rate, false);
1577                         if (!IS_ERR_VALUE(rate))
1578                                 tegra_clk_preset_emc_monitor(rate);
1579                 }
1580         }
1581         tegra12_mc_holdoff_enable();
1582         return ret;
1583 }
1584
1585 void tegra_emc_timing_invalidate(void)
1586 {
1587         emc_timing = NULL;
1588         tegra_mc_divider_update(emc);
1589 }
1590
1591 void tegra_emc_dram_type_init(struct clk *c)
1592 {
1593         emc = c;
1594
1595         dram_type = (emc_readl(EMC_FBIO_CFG5) &
1596                      EMC_CFG5_TYPE_MASK) >> EMC_CFG5_TYPE_SHIFT;
1597
1598         dram_dev_num = (mc_readl(MC_EMEM_ADR_CFG) & 0x1) + 1; /* 2 dev max */
1599 }
1600
1601 int tegra_emc_get_dram_type(void)
1602 {
1603         return dram_type;
1604 }
1605
1606 static u32 soc_to_dram_bit_swap(u32 soc_val, u32 dram_mask, u32 dram_shift)
1607 {
1608         int bit;
1609         u32 dram_val = 0;
1610
1611         /* tegra clocks definitions use shifted mask always */
1612         if (!dram_to_soc_bit_map)
1613                 return soc_val & dram_mask;
1614
1615         for (bit = dram_shift; bit < 32; bit++) {
1616                 u32 dram_bit_mask = 0x1 << bit;
1617                 u32 soc_bit_mask = dram_to_soc_bit_map[bit];
1618
1619                 if (!(dram_bit_mask & dram_mask))
1620                         break;
1621
1622                 if (soc_bit_mask & soc_val)
1623                         dram_val |= dram_bit_mask;
1624         }
1625
1626         return dram_val;
1627 }
1628
1629 static int emc_read_mrr(int dev, int addr)
1630 {
1631         int ret;
1632         u32 val, emc_cfg;
1633
1634         if (dram_type != DRAM_TYPE_LPDDR2)
1635                 return -ENODEV;
1636
1637         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, false);
1638         if (ret)
1639                 return ret;
1640
1641         emc_cfg = emc_readl(EMC_CFG);
1642         if (emc_cfg & EMC_CFG_DRAM_ACPD) {
1643                 emc_writel(emc_cfg & ~EMC_CFG_DRAM_ACPD, EMC_CFG);
1644                 emc_timing_update();
1645         }
1646
1647         val = dev ? DRAM_DEV_SEL_1 : DRAM_DEV_SEL_0;
1648         val |= (addr << EMC_MRR_MA_SHIFT) & EMC_MRR_MA_MASK;
1649         emc_writel(val, EMC_MRR);
1650
1651         ret = wait_for_update(EMC_STATUS, EMC_STATUS_MRR_DIVLD, true);
1652         if (emc_cfg & EMC_CFG_DRAM_ACPD) {
1653                 emc_writel(emc_cfg, EMC_CFG);
1654                 emc_timing_update();
1655         }
1656         if (ret)
1657                 return ret;
1658
1659         val = emc_readl(EMC_MRR) & EMC_MRR_DATA_MASK;
1660         return val;
1661 }
1662
1663 int tegra_emc_get_dram_temperature(void)
1664 {
1665         int mr4;
1666         unsigned long flags;
1667
1668         spin_lock_irqsave(&emc_access_lock, flags);
1669
1670         mr4 = emc_read_mrr(0, 4);
1671         if (IS_ERR_VALUE(mr4)) {
1672                 spin_unlock_irqrestore(&emc_access_lock, flags);
1673                 return mr4;
1674         }
1675         spin_unlock_irqrestore(&emc_access_lock, flags);
1676
1677         mr4 = soc_to_dram_bit_swap(
1678                 mr4, LPDDR2_MR4_TEMP_MASK, LPDDR2_MR4_TEMP_SHIFT);
1679         return mr4;
1680 }
1681
1682
1683 #ifdef CONFIG_TEGRA_USE_NCT
1684 int tegra12_nct_emc_table_init(struct tegra12_emc_pdata *nct_emc_pdata)
1685 {
1686         union nct_item_type *entry = NULL;
1687         struct tegra12_emc_table *mem_table_ptr;
1688         u8 *src, *dest;
1689         unsigned int i, non_zero_freqs;
1690         int ret = 0;
1691
1692         /* Allocating memory for holding a single NCT entry */
1693         entry = kmalloc(sizeof(union nct_item_type), GFP_KERNEL);
1694         if (!entry) {
1695                 pr_err("%s: failed to allocate buffer for single entry. ",
1696                                                                 __func__);
1697                 ret = -ENOMEM;
1698                 goto done;
1699         }
1700         src = (u8 *)entry;
1701
1702         /* Counting the actual number of frequencies present in the table */
1703         non_zero_freqs = 0;
1704         for (i = 0; i < TEGRA_EMC_MAX_FREQS; i++) {
1705                 if (!tegra_nct_read_item(NCT_ID_MEMTABLE + i, entry)) {
1706                         if (entry->tegra_emc_table.tegra12_emc_table.rate > 0) {
1707                                 non_zero_freqs++;
1708                                 pr_info("%s: Found NCT item for freq %lu.\n",
1709                                  __func__,
1710                                  entry->tegra_emc_table.tegra12_emc_table.rate);
1711                         } else
1712                                 break;
1713                 } else {
1714                         pr_err("%s: NCT: Could not read item for %dth freq.\n",
1715                                                                 __func__, i);
1716                         ret = -EIO;
1717                         goto free_entry;
1718                 }
1719         }
1720
1721         /* Allocating memory for the DVFS table */
1722         mem_table_ptr = kmalloc(sizeof(struct tegra12_emc_table) *
1723                                 non_zero_freqs, GFP_KERNEL);
1724         if (!mem_table_ptr) {
1725                 pr_err("%s: Memory allocation for emc table failed.",
1726                                                             __func__);
1727                 ret = -ENOMEM;
1728                 goto free_entry;
1729         }
1730
1731         /* Copy paste the emc table from NCT partition */
1732         for (i = 0; i < non_zero_freqs; i++) {
1733                 /*
1734                  * We reset the whole buffer, to emulate the property
1735                  * of a static variable being initialized to zero
1736                  */
1737                 memset(entry, 0, sizeof(*entry));
1738                 ret = tegra_nct_read_item(NCT_ID_MEMTABLE + i, entry);
1739                 if (!ret) {
1740                         dest = (u8 *)mem_table_ptr + (i * sizeof(struct
1741                                                         tegra12_emc_table));
1742                         memcpy(dest, src, sizeof(struct tegra12_emc_table));
1743                 } else {
1744                         pr_err("%s: Could not copy item for %dth freq.\n",
1745                                                                 __func__, i);
1746                         goto free_mem_table_ptr;
1747                 }
1748         }
1749
1750         /* Setting appropriate pointers */
1751         nct_emc_pdata->tables = mem_table_ptr;
1752         nct_emc_pdata->num_tables = non_zero_freqs;
1753
1754         goto free_entry;
1755
1756 free_mem_table_ptr:
1757         kfree(mem_table_ptr);
1758 free_entry:
1759         kfree(entry);
1760 done:
1761         return ret;
1762 }
1763 #endif
1764
1765 static inline int bw_calc_get_freq_idx(unsigned long bw)
1766 {
1767         int idx = 0;
1768
1769         if (bw > bw_calc_freqs[TEGRA_EMC_ISO_USE_FREQ_MAX_NUM-1] * MHZ)
1770                 idx = TEGRA_EMC_ISO_USE_FREQ_MAX_NUM;
1771
1772         for (; idx < TEGRA_EMC_ISO_USE_FREQ_MAX_NUM; idx++) {
1773                 u32 freq = bw_calc_freqs[idx] * MHZ;
1774                 if (bw < freq) {
1775                         if (idx)
1776                                 idx--;
1777                         break;
1778                 } else if (bw == freq)
1779                         break;
1780         }
1781
1782         return idx;
1783 }
1784
1785 static u8 iso_share_calc_t124_os_idle(unsigned long iso_bw)
1786 {
1787         int freq_idx = bw_calc_get_freq_idx(iso_bw);
1788         return tegra12_emc_usage_shared_os_idle[freq_idx];
1789 }
1790
1791 static u8 iso_share_calc_t124_general(unsigned long iso_bw)
1792 {
1793         int freq_idx = bw_calc_get_freq_idx(iso_bw);
1794         return tegra12_emc_usage_shared_general[freq_idx];
1795 }
1796
1797
1798 #ifdef CONFIG_DEBUG_FS
1799
1800 static struct dentry *emc_debugfs_root;
1801
1802 static int emc_stats_show(struct seq_file *s, void *data)
1803 {
1804         int i;
1805
1806         emc_last_stats_update(TEGRA_EMC_TABLE_MAX_SIZE);
1807
1808         seq_printf(s, "%-10s %-10s\n", "rate kHz", "time");
1809         for (i = 0; i < tegra_emc_table_size; i++) {
1810                 if (tegra_emc_clk_sel[i].input == NULL)
1811                         continue;       /* invalid entry */
1812
1813                 seq_printf(s, "%-10lu %-10llu\n", tegra_emc_table[i].rate,
1814                         cputime64_to_clock_t(emc_stats.time_at_clock[i]));
1815         }
1816         seq_printf(s, "%-15s %llu\n", "transitions:",
1817                    emc_stats.clkchange_count);
1818         seq_printf(s, "%-15s %llu\n", "time-stamp:",
1819                    cputime64_to_clock_t(emc_stats.last_update));
1820
1821         return 0;
1822 }
1823
1824 static int emc_stats_open(struct inode *inode, struct file *file)
1825 {
1826         return single_open(file, emc_stats_show, inode->i_private);
1827 }
1828
1829 static const struct file_operations emc_stats_fops = {
1830         .open           = emc_stats_open,
1831         .read           = seq_read,
1832         .llseek         = seq_lseek,
1833         .release        = single_release,
1834 };
1835
1836 static int emc_table_info_show(struct seq_file *s, void *data)
1837 {
1838         int i;
1839         for (i = 0; i < tegra_emc_table_size; i++) {
1840                 if (tegra_emc_clk_sel[i].input == NULL)
1841                         continue;
1842                 seq_printf(s, "Table info:\n   Rev: 0x%02x\n"
1843                 "   Table ID: %s\n", tegra_emc_table[i].rev,
1844                 tegra_emc_table[i].table_id);
1845                 seq_printf(s, "    %lu\n", tegra_emc_table[i].rate);
1846         }
1847
1848         return 0;
1849 }
1850
1851 static int emc_table_info_open(struct inode *inode, struct file *file)
1852 {
1853         return single_open(file, emc_table_info_show, inode->i_private);
1854 }
1855
1856 static const struct file_operations emc_table_info_fops = {
1857         .open           = emc_table_info_open,
1858         .read           = seq_read,
1859         .llseek         = seq_lseek,
1860         .release        = single_release,
1861 };
1862
1863 static int dram_temperature_get(void *data, u64 *val)
1864 {
1865         *val = tegra_emc_get_dram_temperature();
1866         return 0;
1867 }
1868 DEFINE_SIMPLE_ATTRIBUTE(dram_temperature_fops, dram_temperature_get,
1869                         NULL, "%lld\n");
1870
1871 static int efficiency_get(void *data, u64 *val)
1872 {
1873         *val = tegra_emc_bw_efficiency;
1874         return 0;
1875 }
1876 static int efficiency_set(void *data, u64 val)
1877 {
1878         tegra_emc_bw_efficiency = (val > 100) ? 100 : val;
1879         if (emc)
1880                 tegra_clk_shared_bus_update(emc);
1881
1882         return 0;
1883 }
1884 DEFINE_SIMPLE_ATTRIBUTE(efficiency_fops, efficiency_get,
1885                         efficiency_set, "%llu\n");
1886
1887 static int __init tegra_emc_debug_init(void)
1888 {
1889         if (!tegra_emc_table)
1890                 return 0;
1891
1892         emc_debugfs_root = debugfs_create_dir("tegra_emc", NULL);
1893         if (!emc_debugfs_root)
1894                 return -ENOMEM;
1895
1896         if (!debugfs_create_file(
1897                 "stats", S_IRUGO, emc_debugfs_root, NULL, &emc_stats_fops))
1898                 goto err_out;
1899
1900         if (!debugfs_create_u32("clkchange_delay", S_IRUGO | S_IWUSR,
1901                 emc_debugfs_root, (u32 *)&clkchange_delay))
1902                 goto err_out;
1903
1904         /*
1905          * Reading dram temperature supported only for LP DDR variants,
1906          * Currently two variants of DDR are supported i.e. LPDDR2 and DDR3
1907          */
1908         if (dram_type == DRAM_TYPE_LPDDR2 &&
1909                 !debugfs_create_file("dram_temperature",
1910                 S_IRUGO, emc_debugfs_root, NULL, &dram_temperature_fops))
1911                 goto err_out;
1912
1913         if (!debugfs_create_file("efficiency", S_IRUGO | S_IWUSR,
1914                                  emc_debugfs_root, NULL, &efficiency_fops))
1915                 goto err_out;
1916
1917
1918         if (tegra_emc_iso_usage_debugfs_init(emc_debugfs_root))
1919                 goto err_out;
1920
1921         if (!debugfs_create_file("table_info", S_IRUGO,
1922                                  emc_debugfs_root, NULL, &emc_table_info_fops))
1923                 goto err_out;
1924
1925         return 0;
1926
1927 err_out:
1928         debugfs_remove_recursive(emc_debugfs_root);
1929         return -ENOMEM;
1930 }
1931
1932 late_initcall(tegra_emc_debug_init);
1933 #endif