2 * Copyright (C) 2010 Google, Inc.
4 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
17 #include <linux/err.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/platform_device.h>
21 #include <linux/clk.h>
24 #include <linux/of_device.h>
25 #include <linux/of_gpio.h>
26 #include <linux/gpio.h>
27 #include <linux/slab.h>
28 #include <linux/mmc/card.h>
29 #include <linux/mmc/host.h>
30 #include <linux/module.h>
31 #include <linux/mmc/sd.h>
32 #include <linux/regulator/consumer.h>
33 #include <linux/delay.h>
34 #include <linux/pm_runtime.h>
39 #include <linux/debugfs.h>
40 #include <linux/seq_file.h>
41 #include <linux/reboot.h>
42 #include <linux/devfreq.h>
44 #include <mach/hardware.h>
45 #include <linux/platform_data/mmc-sdhci-tegra.h>
46 #include <mach/pinmux.h>
47 #include <mach/pm_domains.h>
50 #include "sdhci-pltfm.h"
52 #define SDHCI_VNDR_CLK_CTRL 0x100
53 #define SDHCI_VNDR_CLK_CTRL_SDMMC_CLK 0x1
54 #define SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE 0x8
55 #define SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE 0x4
56 #define SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK 0x2
57 #define SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT 16
58 #define SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT 24
59 #define SDHCI_VNDR_CLK_CTRL_SDR50_TUNING 0x20
60 #define SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK 0x2
62 #define SDHCI_VNDR_MISC_CTRL 0x120
63 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT 0x8
64 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT 0x10
65 #define SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT 0x200
66 #define SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0 0x20
67 #define SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT 0x1
68 #define SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK 0x180
70 #define SDHCI_VNDR_PRESET_VAL0_0 0x1d4
71 #define SDCLK_FREQ_SEL_HS_SHIFT 20
72 #define SDCLK_FREQ_SEL_DEFAULT_SHIFT 10
74 #define SDHCI_VNDR_PRESET_VAL1_0 0x1d8
75 #define SDCLK_FREQ_SEL_SDR50_SHIFT 20
76 #define SDCLK_FREQ_SEL_SDR25_SHIFT 10
78 #define SDHCI_VNDR_PRESET_VAL2_0 0x1dc
79 #define SDCLK_FREQ_SEL_DDR50_SHIFT 10
81 #define SDMMC_SDMEMCOMPPADCTRL 0x1E0
82 #define SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK 0xF
83 #define SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK 0x80000000
85 #define SDMMC_AUTO_CAL_CONFIG 0x1E4
86 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START 0x80000000
87 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE 0x20000000
88 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT 0x8
89 #if defined(CONFIG_ARCH_TEGRA_14x_SOC)
90 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET 0x1
91 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PU_OFFSET 0x1
93 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET 0x70
94 #define SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PU_OFFSET 0x62
97 #define SDMMC_AUTO_CAL_STATUS 0x1EC
98 #define SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE 0x80000000
99 #define SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET 24
100 #define PULLUP_ADJUSTMENT_OFFSET 20
102 #define SDHOST_1V8_OCR_MASK 0x8
103 #define SDHOST_HIGH_VOLT_MIN 2700000
104 #define SDHOST_HIGH_VOLT_MAX 3600000
105 #define SDHOST_HIGH_VOLT_2V8 2800000
106 #define SDHOST_LOW_VOLT_MIN 1800000
107 #define SDHOST_LOW_VOLT_MAX 1800000
108 #define SDHOST_HIGH_VOLT_3V2 3200000
110 #define MAX_DIVISOR_VALUE 128
111 #define DEFAULT_SDHOST_FREQ 50000000
113 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8 128
114 #define MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4 64
115 #define MAX_TAP_VALUES 255
116 #define TUNING_FREQ_COUNT 2
117 #define TUNING_VOLTAGES_COUNT 2
119 #define TUNING_RETRIES 1
120 #define SDMMC_AHB_MAX_FREQ 150000000
121 #define SDMMC_EMC_MAX_FREQ 150000000
123 static unsigned int uhs_max_freq_MHz[] = {
124 [MMC_TIMING_UHS_SDR50] = 100,
125 [MMC_TIMING_UHS_SDR104] = 208,
126 [MMC_TIMING_MMC_HS200] = 200,
130 /* Erratum: Version register is invalid in HW */
131 #define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
132 /* Erratum: Enable block gap interrupt detection */
133 #define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
134 /* Do not enable auto calibration if the platform doesn't support */
135 #define NVQUIRK_DISABLE_AUTO_CALIBRATION BIT(2)
136 /* Set Calibration Offsets */
137 #define NVQUIRK_SET_CALIBRATION_OFFSETS BIT(3)
138 /* Set Drive Strengths */
139 #define NVQUIRK_SET_DRIVE_STRENGTH BIT(4)
140 /* Enable PADPIPE CLKEN */
141 #define NVQUIRK_ENABLE_PADPIPE_CLKEN BIT(5)
142 /* DISABLE SPI_MODE CLKEN */
143 #define NVQUIRK_DISABLE_SPI_MODE_CLKEN BIT(6)
145 #define NVQUIRK_SET_TAP_DELAY BIT(7)
147 #define NVQUIRK_SET_TRIM_DELAY BIT(8)
148 /* Enable SDHOST v3.0 support */
149 #define NVQUIRK_ENABLE_SD_3_0 BIT(9)
150 /* Enable SDR50 mode */
151 #define NVQUIRK_ENABLE_SDR50 BIT(10)
152 /* Enable SDR104 mode */
153 #define NVQUIRK_ENABLE_SDR104 BIT(11)
154 /*Enable DDR50 mode */
155 #define NVQUIRK_ENABLE_DDR50 BIT(12)
156 /* Enable Frequency Tuning for SDR50 mode */
157 #define NVQUIRK_ENABLE_SDR50_TUNING BIT(13)
158 /* Enable Infinite Erase Timeout*/
159 #define NVQUIRK_INFINITE_ERASE_TIMEOUT BIT(14)
160 /* No Calibration for sdmmc4 */
161 #define NVQUIRK_DISABLE_SDMMC4_CALIB BIT(15)
162 /* ENAABLE FEEDBACK IO CLOCK */
163 #define NVQUIRK_EN_FEEDBACK_CLK BIT(16)
164 /* Disable AUTO CMD23 */
165 #define NVQUIRK_DISABLE_AUTO_CMD23 BIT(17)
166 /* update PAD_E_INPUT_OR_E_PWRD bit */
167 #define NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD BIT(18)
168 /* Shadow write xfer mode reg and write it alongwith CMD register */
169 #define NVQUIRK_SHADOW_XFER_MODE_REG BIT(18)
170 /* In SDR50 mode, run the sdmmc controller at freq greater than
171 * 104MHz to ensure the core voltage is at 1.2V. If the core voltage
172 * is below 1.2V, CRC errors would occur during data transfers
174 #define NVQUIRK_BROKEN_SDR50_CONTROLLER_CLOCK BIT(19)
175 /* Set Pipe stages value o zero */
176 #define NVQUIRK_SET_PIPE_STAGES_MASK_0 BIT(20)
178 struct sdhci_tegra_soc_data {
179 const struct sdhci_pltfm_data *pdata;
183 struct sdhci_tegra_sd_stats {
184 unsigned int data_crc_count;
185 unsigned int cmd_crc_count;
186 unsigned int data_to_count;
187 unsigned int cmd_to_count;
190 #ifdef CONFIG_MMC_FREQ_SCALING
191 struct freq_gov_params {
193 u8 polling_interval_ms;
194 u8 active_load_threshold;
197 static struct freq_gov_params gov_params[3] = {
199 .idle_mon_cycles = 3,
200 .polling_interval_ms = 50,
201 .active_load_threshold = 25,
204 .idle_mon_cycles = 3,
205 .polling_interval_ms = 50,
206 .active_load_threshold = 25,
209 .idle_mon_cycles = 3,
210 .polling_interval_ms = 50,
211 .active_load_threshold = 25,
216 enum tegra_tuning_freq {
221 struct freq_tuning_params {
222 unsigned int freq_hz;
223 unsigned int nr_voltages;
224 unsigned int voltages[TUNING_VOLTAGES_COUNT];
227 static struct freq_tuning_params tuning_params[TUNING_FREQ_COUNT] = {
228 [TUNING_LOW_FREQ] = {
231 .voltages = {UINT_MAX},
233 #ifdef CONFIG_ARCH_TEGRA_14x_SOC
234 [TUNING_HIGH_FREQ] = {
235 .freq_hz = 136000000,
237 .voltages = {ULONG_MAX, 1100},
240 [TUNING_HIGH_FREQ] = {
241 .freq_hz = 156000000,
243 .voltages = {ULONG_MAX, 1100},
248 struct tap_window_data {
249 unsigned int partial_win;
250 unsigned int full_win_begin;
251 unsigned int full_win_end;
252 unsigned int tuning_ui;
253 unsigned int sampling_point;
254 bool abandon_partial_win;
255 bool abandon_full_win;
258 struct tegra_tuning_data {
259 unsigned int best_tap_value;
260 bool select_partial_win;
261 bool nominal_vcore_tun_done;
262 bool override_vcore_tun_done;
263 bool one_shot_tuning;
264 struct tap_window_data *tap_data[TUNING_VOLTAGES_COUNT];
267 struct tegra_freq_gov_data {
268 unsigned int curr_active_load;
269 unsigned int avg_active_load;
270 unsigned int act_load_high_threshold;
271 unsigned int max_idle_monitor_cycles;
272 unsigned int curr_freq;
273 unsigned int freqs[TUNING_FREQ_COUNT];
274 unsigned int freq_switch_count;
275 bool monitor_idle_load;
279 const struct tegra_sdhci_platform_data *plat;
280 const struct sdhci_tegra_soc_data *soc_data;
282 struct regulator *vdd_io_reg;
283 struct regulator *vdd_slot_reg;
284 struct regulator *vcore_reg;
285 /* Host controller instance */
286 unsigned int instance;
288 unsigned int vddio_min_uv;
290 unsigned int vddio_max_uv;
291 /* max clk supported by the platform */
292 unsigned int max_clk_limit;
293 /* max ddr clk supported by the platform */
294 unsigned int ddr_clk_limit;
296 bool is_rail_enabled;
298 bool is_sdmmc_emc_clk_on;
300 bool is_sdmmc_sclk_on;
301 unsigned int emc_max_clk;
302 struct sdhci_tegra_sd_stats *sd_stat_head;
303 struct notifier_block reboot_notify;
305 unsigned int nominal_vcore_mv;
306 unsigned int min_vcore_override_mv;
307 /* Tuning related structures and variables */
308 /* Tuning opcode to be used */
309 unsigned int tuning_opcode;
310 /* Tuning packet size */
311 unsigned int tuning_bsize;
313 unsigned int tuning_status;
314 #define TUNING_STATUS_DONE 1
315 #define TUNING_STATUS_RETUNE 2
316 /* Freq tuning information for each sampling clock freq */
317 struct tegra_tuning_data *tuning_data[TUNING_FREQ_COUNT];
318 bool set_tuning_override;
319 unsigned int best_tap_values[TUNING_FREQ_COUNT];
320 struct tegra_freq_gov_data *gov_data;
323 static struct clk *pll_c;
324 static struct clk *pll_p;
325 static unsigned long pll_c_rate;
326 static unsigned long pll_p_rate;
328 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
329 unsigned int tap_delay);
330 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
331 unsigned long desired_rate);
333 static int show_error_stats_dump(struct seq_file *s, void *data)
335 struct sdhci_host *host = s->private;
336 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
337 struct sdhci_tegra *tegra_host = pltfm_host->priv;
338 struct sdhci_tegra_sd_stats *head;
340 seq_printf(s, "ErrorStatistics:\n");
341 seq_printf(s, "DataCRC\tCmdCRC\tDataTimeout\tCmdTimeout\n");
342 head = tegra_host->sd_stat_head;
344 seq_printf(s, "%d\t%d\t%d\t%d\n", head->data_crc_count,
345 head->cmd_crc_count, head->data_to_count,
350 static int show_dfs_stats_dump(struct seq_file *s, void *data)
352 struct sdhci_host *host = s->private;
353 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
354 struct sdhci_tegra *tegra_host = pltfm_host->priv;
355 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
357 seq_printf(s, "DFS statistics:\n");
359 if (host->mmc->dev_stats != NULL)
360 seq_printf(s, "Polling_period: %d\n",
361 host->mmc->dev_stats->polling_interval);
363 if (gov_data != NULL) {
364 seq_printf(s, "cur_active_load: %d\n",
365 gov_data->curr_active_load);
366 seq_printf(s, "avg_active_load: %d\n",
367 gov_data->avg_active_load);
368 seq_printf(s, "act_load_high_threshold: %d\n",
369 gov_data->act_load_high_threshold);
370 seq_printf(s, "freq_switch_count: %d\n",
371 gov_data->freq_switch_count);
376 static int sdhci_error_stats_dump(struct inode *inode, struct file *file)
378 return single_open(file, show_error_stats_dump, inode->i_private);
381 static int sdhci_dfs_stats_dump(struct inode *inode, struct file *file)
383 return single_open(file, show_dfs_stats_dump, inode->i_private);
387 static const struct file_operations sdhci_host_fops = {
388 .open = sdhci_error_stats_dump,
391 .release = single_release,
394 static const struct file_operations sdhci_host_dfs_fops = {
395 .open = sdhci_dfs_stats_dump,
398 .release = single_release,
402 static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg)
406 if (unlikely(reg == SDHCI_PRESENT_STATE)) {
407 /* Use wp_gpio here instead? */
408 val = readl(host->ioaddr + reg);
409 return val | SDHCI_WRITE_PROTECT;
411 return readl(host->ioaddr + reg);
414 static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
416 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
417 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
418 struct sdhci_tegra *tegra_host = pltfm_host->priv;
419 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
421 if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) &&
422 (reg == SDHCI_HOST_VERSION))) {
423 return SDHCI_SPEC_200;
426 return readw(host->ioaddr + reg);
429 static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg)
431 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
432 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
433 struct sdhci_tegra *tegra_host = pltfm_host->priv;
434 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
437 /* Seems like we're getting spurious timeout and crc errors, so
438 * disable signalling of them. In case of real errors software
439 * timers should take care of eventually detecting them.
441 if (unlikely(reg == SDHCI_SIGNAL_ENABLE))
442 val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC);
444 writel(val, host->ioaddr + reg);
446 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
447 if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) &&
448 (reg == SDHCI_INT_ENABLE))) {
449 u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
450 if (val & SDHCI_INT_CARD_INT)
454 writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL);
459 static void tegra_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
461 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
462 struct sdhci_tegra *tegra_host = pltfm_host->priv;
463 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
465 if (soc_data->nvquirks & NVQUIRK_SHADOW_XFER_MODE_REG) {
467 case SDHCI_TRANSFER_MODE:
469 * Postpone this write, we must do it together with a
470 * command write that is down below.
472 pltfm_host->xfer_mode_shadow = val;
475 writel((val << 16) | pltfm_host->xfer_mode_shadow,
476 host->ioaddr + SDHCI_TRANSFER_MODE);
477 pltfm_host->xfer_mode_shadow = 0;
482 writew(val, host->ioaddr + reg);
485 #ifdef CONFIG_MMC_FREQ_SCALING
487 * Dynamic frequency calculation.
488 * The active load for the current period and the average active load
489 * are calculated at the end of each polling interval.
491 * If the current active load is greater than the threshold load, then the
492 * frequency is boosted(156MHz).
493 * If the active load is lower than the threshold, then the load is monitored
494 * for a max of three cycles before reducing the frequency(82MHz). If the
495 * average active load is lower, then the monitoring cycles is reduced.
497 * The active load threshold value for both eMMC and SDIO is set to 25 which
498 * is found to give the optimal power and performance. The polling interval is
501 * The polling interval and active load threshold values can be changed by
502 * the user through sysfs.
504 static unsigned long calculate_mmc_target_freq(
505 struct tegra_freq_gov_data *gov_data)
507 unsigned long desired_freq = gov_data->curr_freq;
508 unsigned int type = MMC_TYPE_MMC;
510 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
511 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
512 gov_data->monitor_idle_load = false;
513 gov_data->max_idle_monitor_cycles =
514 gov_params[type].idle_mon_cycles;
516 if (gov_data->monitor_idle_load) {
517 if (!gov_data->max_idle_monitor_cycles) {
518 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
519 gov_data->max_idle_monitor_cycles =
520 gov_params[type].idle_mon_cycles;
522 gov_data->max_idle_monitor_cycles--;
525 gov_data->monitor_idle_load = true;
526 gov_data->max_idle_monitor_cycles *=
527 gov_data->avg_active_load;
528 gov_data->max_idle_monitor_cycles /= 100;
535 static unsigned long calculate_sdio_target_freq(
536 struct tegra_freq_gov_data *gov_data)
538 unsigned long desired_freq = gov_data->curr_freq;
539 unsigned int type = MMC_TYPE_SDIO;
541 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
542 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
543 gov_data->monitor_idle_load = false;
544 gov_data->max_idle_monitor_cycles =
545 gov_params[type].idle_mon_cycles;
547 if (gov_data->monitor_idle_load) {
548 if (!gov_data->max_idle_monitor_cycles) {
549 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
550 gov_data->max_idle_monitor_cycles =
551 gov_params[type].idle_mon_cycles;
553 gov_data->max_idle_monitor_cycles--;
556 gov_data->monitor_idle_load = true;
557 gov_data->max_idle_monitor_cycles *=
558 gov_data->avg_active_load;
559 gov_data->max_idle_monitor_cycles /= 100;
566 static unsigned long calculate_sd_target_freq(
567 struct tegra_freq_gov_data *gov_data)
569 unsigned long desired_freq = gov_data->curr_freq;
570 unsigned int type = MMC_TYPE_SD;
572 if (gov_data->curr_active_load >= gov_data->act_load_high_threshold) {
573 desired_freq = gov_data->freqs[TUNING_HIGH_FREQ];
574 gov_data->monitor_idle_load = false;
575 gov_data->max_idle_monitor_cycles =
576 gov_params[type].idle_mon_cycles;
578 if (gov_data->monitor_idle_load) {
579 if (!gov_data->max_idle_monitor_cycles) {
580 desired_freq = gov_data->freqs[TUNING_LOW_FREQ];
581 gov_data->max_idle_monitor_cycles =
582 gov_params[type].idle_mon_cycles;
584 gov_data->max_idle_monitor_cycles--;
587 gov_data->monitor_idle_load = true;
588 gov_data->max_idle_monitor_cycles *=
589 gov_data->avg_active_load;
590 gov_data->max_idle_monitor_cycles /= 100;
597 static unsigned long sdhci_tegra_get_target_freq(struct sdhci_host *sdhci,
598 struct devfreq_dev_status *dfs_stats)
600 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
601 struct sdhci_tegra *tegra_host = pltfm_host->priv;
602 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
603 unsigned long freq = sdhci->mmc->actual_clock;
606 dev_err(mmc_dev(sdhci->mmc),
607 "No gov data. Continue using current freq %ld", freq);
612 * If clock gating is enabled and clock is currently disabled, then
615 if (!tegra_host->clk_enabled)
618 if (dfs_stats->total_time) {
619 gov_data->curr_active_load = (dfs_stats->busy_time * 100) /
620 dfs_stats->total_time;
622 gov_data->curr_active_load = 0;
625 gov_data->avg_active_load += gov_data->curr_active_load;
626 gov_data->avg_active_load >>= 1;
628 if (sdhci->mmc->card) {
629 if (sdhci->mmc->card->type == MMC_TYPE_SDIO)
630 freq = calculate_sdio_target_freq(gov_data);
631 else if (sdhci->mmc->card->type == MMC_TYPE_MMC)
632 freq = calculate_mmc_target_freq(gov_data);
633 else if (sdhci->mmc->card->type == MMC_TYPE_SD)
634 freq = calculate_sd_target_freq(gov_data);
635 if (gov_data->curr_freq != freq)
636 gov_data->freq_switch_count++;
637 gov_data->curr_freq = freq;
643 static int sdhci_tegra_freq_gov_init(struct sdhci_host *sdhci)
645 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
646 struct sdhci_tegra *tegra_host = pltfm_host->priv;
651 if (!((sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR104) ||
652 (sdhci->mmc->ios.timing == MMC_TIMING_MMC_HS200))) {
653 dev_info(mmc_dev(sdhci->mmc),
654 "DFS not required for current operating mode\n");
658 if (!tegra_host->gov_data) {
659 tegra_host->gov_data = devm_kzalloc(mmc_dev(sdhci->mmc),
660 sizeof(struct tegra_freq_gov_data), GFP_KERNEL);
661 if (!tegra_host->gov_data) {
662 dev_err(mmc_dev(sdhci->mmc),
663 "Failed to allocate memory for dfs data\n");
668 /* Find the supported frequencies */
669 for (i = 0; i < TUNING_FREQ_COUNT; i++) {
670 freq = tuning_params[i].freq_hz;
672 * Check the nearest possible clock with pll_c and pll_p as
673 * the clock sources. Choose the higher frequency.
675 tegra_host->gov_data->freqs[i] =
676 get_nearest_clock_freq(pll_c_rate, freq);
677 freq = get_nearest_clock_freq(pll_p_rate, freq);
678 if (freq > tegra_host->gov_data->freqs[i])
679 tegra_host->gov_data->freqs[i] = freq;
682 tegra_host->gov_data->monitor_idle_load = false;
683 tegra_host->gov_data->curr_freq = sdhci->mmc->actual_clock;
684 if (sdhci->mmc->card) {
685 type = sdhci->mmc->card->type;
686 sdhci->mmc->dev_stats->polling_interval =
687 gov_params[type].polling_interval_ms;
688 tegra_host->gov_data->act_load_high_threshold =
689 gov_params[type].active_load_threshold;
690 tegra_host->gov_data->max_idle_monitor_cycles =
691 gov_params[type].idle_mon_cycles;
699 static unsigned int tegra_sdhci_get_cd(struct sdhci_host *sdhci)
701 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
702 struct sdhci_tegra *tegra_host = pltfm_host->priv;
704 return tegra_host->card_present;
707 static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci)
709 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
710 struct sdhci_tegra *tegra_host = pltfm_host->priv;
711 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
713 if (!gpio_is_valid(plat->wp_gpio))
716 return gpio_get_value_cansleep(plat->wp_gpio);
719 static int tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
724 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
725 struct sdhci_tegra *tegra_host = pltfm_host->priv;
726 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
728 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
730 /* Select Bus Speed Mode for host */
731 /* For HS200 we need to set UHS_MODE_SEL to SDR104.
732 * It works as SDR 104 in SD 4-bit mode and HS200 in eMMC 8-bit mode.
734 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
736 case MMC_TIMING_UHS_SDR12:
737 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
739 case MMC_TIMING_UHS_SDR25:
740 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
742 case MMC_TIMING_UHS_SDR50:
743 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
745 case MMC_TIMING_UHS_SDR104:
746 case MMC_TIMING_MMC_HS200:
747 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
749 case MMC_TIMING_UHS_DDR50:
750 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
754 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
756 if (uhs == MMC_TIMING_UHS_DDR50) {
757 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
758 clk &= ~(0xFF << SDHCI_DIVIDER_SHIFT);
759 clk |= 1 << SDHCI_DIVIDER_SHIFT;
760 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
762 /* Set the ddr mode trim delay if required */
763 if (plat->ddr_trim_delay != -1) {
764 vndr_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
765 vndr_ctrl &= ~(0x1F <<
766 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
767 vndr_ctrl |= (plat->ddr_trim_delay <<
768 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
769 sdhci_writel(host, vndr_ctrl, SDHCI_VNDR_CLK_CTRL);
775 static void sdhci_status_notify_cb(int card_present, void *dev_id)
777 struct sdhci_host *sdhci = (struct sdhci_host *)dev_id;
778 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
779 struct tegra_sdhci_platform_data *plat;
780 unsigned int status, oldstat;
782 pr_debug("%s: card_present %d\n", mmc_hostname(sdhci->mmc),
785 plat = pdev->dev.platform_data;
786 if (!plat->mmc_data.status) {
787 mmc_detect_change(sdhci->mmc, 0);
791 status = plat->mmc_data.status(mmc_dev(sdhci->mmc));
793 oldstat = plat->mmc_data.card_present;
794 plat->mmc_data.card_present = status;
795 if (status ^ oldstat) {
796 pr_debug("%s: Slot status change detected (%d -> %d)\n",
797 mmc_hostname(sdhci->mmc), oldstat, status);
798 if (status && !plat->mmc_data.built_in)
799 mmc_detect_change(sdhci->mmc, (5 * HZ) / 2);
801 mmc_detect_change(sdhci->mmc, 0);
805 static irqreturn_t carddetect_irq(int irq, void *data)
807 struct sdhci_host *sdhost = (struct sdhci_host *)data;
808 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhost);
809 struct sdhci_tegra *tegra_host = pltfm_host->priv;
810 struct platform_device *pdev = to_platform_device(mmc_dev(sdhost->mmc));
811 struct tegra_sdhci_platform_data *plat;
813 plat = pdev->dev.platform_data;
815 tegra_host->card_present =
816 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
818 if (tegra_host->card_present) {
819 if (!tegra_host->is_rail_enabled) {
820 if (tegra_host->vdd_slot_reg)
821 regulator_enable(tegra_host->vdd_slot_reg);
822 if (tegra_host->vdd_io_reg)
823 regulator_enable(tegra_host->vdd_io_reg);
824 tegra_host->is_rail_enabled = 1;
827 if (tegra_host->is_rail_enabled) {
828 if (tegra_host->vdd_io_reg)
829 regulator_disable(tegra_host->vdd_io_reg);
830 if (tegra_host->vdd_slot_reg)
831 regulator_disable(tegra_host->vdd_slot_reg);
832 tegra_host->is_rail_enabled = 0;
835 * Set retune request as tuning should be done next time
836 * a card is inserted.
838 tegra_host->tuning_status = TUNING_STATUS_RETUNE;
841 tasklet_schedule(&sdhost->card_tasklet);
845 static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
849 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
850 struct sdhci_tegra *tegra_host = pltfm_host->priv;
851 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
852 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
853 unsigned int best_tap_value;
855 if (!(mask & SDHCI_RESET_ALL))
858 if (tegra_host->sd_stat_head != NULL) {
859 tegra_host->sd_stat_head->data_crc_count = 0;
860 tegra_host->sd_stat_head->cmd_crc_count = 0;
861 tegra_host->sd_stat_head->data_to_count = 0;
862 tegra_host->sd_stat_head->cmd_to_count = 0;
865 if (tegra_host->gov_data != NULL)
866 tegra_host->gov_data->freq_switch_count = 0;
868 vendor_ctrl = sdhci_readl(host, SDHCI_VNDR_CLK_CTRL);
869 if (soc_data->nvquirks & NVQUIRK_ENABLE_PADPIPE_CLKEN) {
871 SDHCI_VNDR_CLK_CTRL_PADPIPE_CLKEN_OVERRIDE;
873 if (soc_data->nvquirks & NVQUIRK_DISABLE_SPI_MODE_CLKEN) {
875 ~SDHCI_VNDR_CLK_CTRL_SPI_MODE_CLKEN_OVERRIDE;
877 if (soc_data->nvquirks & NVQUIRK_EN_FEEDBACK_CLK) {
879 ~SDHCI_VNDR_CLK_CTRL_INPUT_IO_CLK;
881 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_INTERNAL_CLK;
884 if (soc_data->nvquirks & NVQUIRK_SET_TAP_DELAY) {
885 if ((tegra_host->tuning_status == TUNING_STATUS_DONE)
886 && (host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
887 if (host->mmc->ios.clock >
888 tuning_params[TUNING_LOW_FREQ].freq_hz)
889 best_tap_value = tegra_host->best_tap_values[1];
891 best_tap_value = tegra_host->best_tap_values[0];
893 best_tap_value = plat->tap_delay;
895 vendor_ctrl &= ~(0xFF << SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
896 vendor_ctrl |= (best_tap_value <<
897 SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
900 if (soc_data->nvquirks & NVQUIRK_SET_TRIM_DELAY) {
901 vendor_ctrl &= ~(0x1F <<
902 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
903 vendor_ctrl |= (plat->trim_delay <<
904 SDHCI_VNDR_CLK_CTRL_TRIM_VALUE_SHIFT);
906 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50_TUNING)
907 vendor_ctrl |= SDHCI_VNDR_CLK_CTRL_SDR50_TUNING;
908 sdhci_writel(host, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
910 misc_ctrl = sdhci_readw(host, SDHCI_VNDR_MISC_CTRL);
911 if (soc_data->nvquirks & NVQUIRK_ENABLE_SD_3_0)
912 misc_ctrl |= SDHCI_VNDR_MISC_CTRL_ENABLE_SD_3_0;
913 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) {
915 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR104_SUPPORT;
917 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) {
919 SDHCI_VNDR_MISC_CTRL_ENABLE_SDR50_SUPPORT;
921 /* Enable DDR mode support only for SDMMC4 */
922 if (soc_data->nvquirks & NVQUIRK_ENABLE_DDR50) {
923 if (tegra_host->instance == 3) {
925 SDHCI_VNDR_MISC_CTRL_ENABLE_DDR50_SUPPORT;
928 if (soc_data->nvquirks & NVQUIRK_INFINITE_ERASE_TIMEOUT) {
930 SDHCI_VNDR_MISC_CTRL_INFINITE_ERASE_TIMEOUT;
932 if (soc_data->nvquirks & NVQUIRK_SET_PIPE_STAGES_MASK_0)
933 misc_ctrl &= ~SDHCI_VNDR_MISC_CTRL_PIPE_STAGES_MASK;
934 sdhci_writew(host, misc_ctrl, SDHCI_VNDR_MISC_CTRL);
936 if (soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CMD23)
937 host->flags &= ~SDHCI_AUTO_CMD23;
939 /* Mask the support for any UHS modes if specified */
940 if (plat->uhs_mask & MMC_UHS_MASK_SDR104)
941 host->mmc->caps &= ~MMC_CAP_UHS_SDR104;
943 if (plat->uhs_mask & MMC_UHS_MASK_DDR50)
944 host->mmc->caps &= ~MMC_CAP_UHS_DDR50;
946 if (plat->uhs_mask & MMC_UHS_MASK_SDR50)
947 host->mmc->caps &= ~MMC_CAP_UHS_SDR50;
949 if (plat->uhs_mask & MMC_UHS_MASK_SDR25)
950 host->mmc->caps &= ~MMC_CAP_UHS_SDR25;
952 if (plat->uhs_mask & MMC_UHS_MASK_SDR12)
953 host->mmc->caps &= ~MMC_CAP_UHS_SDR12;
955 if (plat->uhs_mask & MMC_MASK_HS200)
956 host->mmc->caps2 &= ~MMC_CAP2_HS200;
959 static int tegra_sdhci_buswidth(struct sdhci_host *sdhci, int bus_width)
961 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
962 const struct tegra_sdhci_platform_data *plat;
965 plat = pdev->dev.platform_data;
967 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL);
968 if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) {
969 ctrl &= ~SDHCI_CTRL_4BITBUS;
970 ctrl |= SDHCI_CTRL_8BITBUS;
972 ctrl &= ~SDHCI_CTRL_8BITBUS;
973 if (bus_width == MMC_BUS_WIDTH_4)
974 ctrl |= SDHCI_CTRL_4BITBUS;
976 ctrl &= ~SDHCI_CTRL_4BITBUS;
978 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL);
983 * Calculation of nearest clock frequency for desired rate:
984 * Get the divisor value, div = p / d_rate
985 * 1. If it is nearer to ceil(p/d_rate) then increment the div value by 0.5 and
986 * nearest_rate, i.e. result = p / (div + 0.5) = (p << 1)/((div << 1) + 1).
987 * 2. If not, result = p / div
988 * As the nearest clk freq should be <= to desired_rate,
989 * 3. If result > desired_rate then increment the div by 0.5
990 * and do, (p << 1)/((div << 1) + 1)
991 * 4. Else return result
992 * Here, If condtions 1 & 3 are both satisfied then to keep track of div value,
993 * defined index variable.
995 static unsigned long get_nearest_clock_freq(unsigned long pll_rate,
996 unsigned long desired_rate)
998 unsigned long result;
1002 div = pll_rate / desired_rate;
1003 if (div > MAX_DIVISOR_VALUE) {
1004 div = MAX_DIVISOR_VALUE;
1005 result = pll_rate / div;
1007 if ((pll_rate % desired_rate) >= (desired_rate / 2))
1008 result = (pll_rate << 1) / ((div << 1) + index++);
1010 result = pll_rate / div;
1012 if (desired_rate < result) {
1014 * Trying to get lower clock freq than desired clock,
1015 * by increasing the divisor value by 0.5
1017 result = (pll_rate << 1) / ((div << 1) + index);
1024 static void tegra_sdhci_clock_set_parent(struct sdhci_host *host,
1025 unsigned long desired_rate)
1027 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1028 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1029 struct clk *parent_clk;
1030 unsigned long pll_c_freq;
1031 unsigned long pll_p_freq;
1034 #ifdef CONFIG_TEGRA_FPGA_PLATFORM
1037 pll_c_freq = (pll_c_rate >= desired_rate) ?
1038 get_nearest_clock_freq(pll_c_rate, desired_rate) : pll_c_rate;
1039 pll_p_freq = (pll_p_rate >= desired_rate) ?
1040 get_nearest_clock_freq(pll_p_rate, desired_rate) : pll_p_rate;
1042 if (pll_c_freq > pll_p_freq) {
1043 if (!tegra_host->is_parent_pllc) {
1045 tegra_host->is_parent_pllc = true;
1046 clk_set_rate(pltfm_host->clk, DEFAULT_SDHOST_FREQ);
1049 } else if (tegra_host->is_parent_pllc) {
1051 tegra_host->is_parent_pllc = false;
1055 rc = clk_set_parent(pltfm_host->clk, parent_clk);
1057 pr_err("%s: failed to set pll parent clock %d\n",
1058 mmc_hostname(host->mmc), rc);
1061 static void tegra_sdhci_set_clk_rate(struct sdhci_host *sdhci,
1064 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1065 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1066 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1067 unsigned int clk_rate;
1068 unsigned int emc_clk;
1070 if (sdhci->mmc->ios.timing == MMC_TIMING_UHS_DDR50) {
1072 * In ddr mode, tegra sdmmc controller clock frequency
1073 * should be double the card clock frequency.
1075 if (tegra_host->ddr_clk_limit) {
1076 clk_rate = tegra_host->ddr_clk_limit * 2;
1077 if (tegra_host->emc_clk) {
1078 emc_clk = clk_get_rate(tegra_host->emc_clk);
1079 if (emc_clk == tegra_host->emc_max_clk)
1080 clk_rate = clock * 2;
1083 clk_rate = clock * 2;
1086 if ((sdhci->mmc->ios.timing == MMC_TIMING_UHS_SDR50) &&
1087 (soc_data->nvquirks & NVQUIRK_BROKEN_SDR50_CONTROLLER_CLOCK))
1088 clk_rate = clock * 2;
1092 if (tegra_host->max_clk_limit &&
1093 (clk_rate > tegra_host->max_clk_limit))
1094 clk_rate = tegra_host->max_clk_limit;
1096 tegra_sdhci_clock_set_parent(sdhci, clk_rate);
1097 clk_set_rate(pltfm_host->clk, clk_rate);
1098 sdhci->max_clk = clk_get_rate(pltfm_host->clk);
1100 /* FPGA supports 26MHz of clock for SDMMC. */
1101 if (tegra_platform_is_fpga())
1102 sdhci->max_clk = 26000000;
1103 #ifdef CONFIG_MMC_FREQ_SCALING
1104 /* Set the tap delay if tuning is done and dfs is enabled */
1105 if (sdhci->mmc->df &&
1106 (tegra_host->tuning_status == TUNING_STATUS_DONE)) {
1107 if (clock > tuning_params[TUNING_LOW_FREQ].freq_hz)
1108 sdhci_tegra_set_tap_delay(sdhci,
1109 tegra_host->best_tap_values[TUNING_HIGH_FREQ]);
1111 sdhci_tegra_set_tap_delay(sdhci,
1112 tegra_host->best_tap_values[TUNING_LOW_FREQ]);
1118 static void tegra_sdhci_set_clock(struct sdhci_host *sdhci, unsigned int clock)
1120 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1121 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1122 struct platform_device *pdev = to_platform_device(mmc_dev(sdhci->mmc));
1125 pr_debug("%s %s %u enabled=%u\n", __func__,
1126 mmc_hostname(sdhci->mmc), clock, tegra_host->clk_enabled);
1129 if (!tegra_host->clk_enabled) {
1130 pm_runtime_get_sync(&pdev->dev);
1131 clk_prepare_enable(pltfm_host->clk);
1132 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1133 ctrl |= SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1134 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1135 tegra_host->clk_enabled = true;
1137 tegra_sdhci_set_clk_rate(sdhci, clock);
1138 if (tegra_host->emc_clk && (!tegra_host->is_sdmmc_emc_clk_on)) {
1139 clk_prepare_enable(tegra_host->emc_clk);
1140 tegra_host->is_sdmmc_emc_clk_on = true;
1142 if (tegra_host->sclk && (!tegra_host->is_sdmmc_sclk_on)) {
1143 clk_prepare_enable(tegra_host->sclk);
1144 tegra_host->is_sdmmc_sclk_on = true;
1146 } else if (!clock && tegra_host->clk_enabled) {
1147 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on) {
1148 clk_disable_unprepare(tegra_host->emc_clk);
1149 tegra_host->is_sdmmc_emc_clk_on = false;
1151 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on) {
1152 clk_disable_unprepare(tegra_host->sclk);
1153 tegra_host->is_sdmmc_sclk_on = false;
1155 ctrl = sdhci_readb(sdhci, SDHCI_VNDR_CLK_CTRL);
1156 ctrl &= ~SDHCI_VNDR_CLK_CTRL_SDMMC_CLK;
1157 sdhci_writeb(sdhci, ctrl, SDHCI_VNDR_CLK_CTRL);
1158 clk_disable_unprepare(pltfm_host->clk);
1159 pm_runtime_put_sync(&pdev->dev);
1160 tegra_host->clk_enabled = false;
1164 static void tegra_sdhci_do_calibration(struct sdhci_host *sdhci)
1167 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1168 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1169 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
1170 unsigned int timeout = 10;
1172 /* No Calibration for sdmmc4 */
1173 if (unlikely(soc_data->nvquirks & NVQUIRK_DISABLE_SDMMC4_CALIB) &&
1174 (tegra_host->instance == 3))
1177 if (unlikely(soc_data->nvquirks & NVQUIRK_DISABLE_AUTO_CALIBRATION))
1180 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
1181 val &= ~SDMMC_SDMEMCOMPPADCTRL_VREF_SEL_MASK;
1182 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD)
1183 val |= SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
1185 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
1187 /* Enable Auto Calibration*/
1188 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
1189 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
1190 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_START;
1191 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_CALIBRATION_OFFSETS)) {
1192 #ifdef CONFIG_ARCH_TEGRA_14x_SOC
1194 * Based on characterization results for T14x platforms,
1195 * calibration offsets should be set only sdmmc4.
1197 if (tegra_host->instance != 3)
1198 goto skip_setting_calib_offsets;
1200 /* Program Auto cal PD offset(bits 8:14) */
1202 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
1203 val |= (SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET <<
1204 SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PD_OFFSET_SHIFT);
1205 /* Program Auto cal PU offset(bits 0:6) */
1207 val |= SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_PU_OFFSET;
1209 #ifdef CONFIG_ARCH_TEGRA_14x_SOC
1210 skip_setting_calib_offsets:
1212 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
1214 /* Wait until the calibration is done */
1216 if (!(sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) &
1217 SDMMC_AUTO_CAL_STATUS_AUTO_CAL_ACTIVE))
1225 dev_err(mmc_dev(sdhci->mmc), "Auto calibration failed\n");
1227 /* Disable Auto calibration */
1228 val = sdhci_readl(sdhci, SDMMC_AUTO_CAL_CONFIG);
1229 val &= ~SDMMC_AUTO_CAL_CONFIG_AUTO_CAL_ENABLE;
1230 sdhci_writel(sdhci, val, SDMMC_AUTO_CAL_CONFIG);
1232 if (soc_data->nvquirks & NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD) {
1233 val = sdhci_readl(sdhci, SDMMC_SDMEMCOMPPADCTRL);
1234 val &= ~SDMMC_SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD_MASK;
1235 sdhci_writel(sdhci, val, SDMMC_SDMEMCOMPPADCTRL);
1238 if (unlikely(soc_data->nvquirks & NVQUIRK_SET_DRIVE_STRENGTH)) {
1239 unsigned int pulldown_code;
1240 unsigned int pullup_code;
1244 pg = tegra_drive_get_pingroup(mmc_dev(sdhci->mmc));
1246 /* Get the pull down codes from auto cal status reg */
1248 sdhci_readl(sdhci, SDMMC_AUTO_CAL_STATUS) >>
1249 SDMMC_AUTO_CAL_STATUS_PULLDOWN_OFFSET);
1250 /* Set the pull down in the pinmux reg */
1251 err = tegra_drive_pinmux_set_pull_down(pg,
1254 dev_err(mmc_dev(sdhci->mmc),
1255 "Failed to set pulldown codes %d err %d\n",
1256 pulldown_code, err);
1258 /* Calculate the pull up codes */
1259 pullup_code = pulldown_code + PULLUP_ADJUSTMENT_OFFSET;
1260 if (pullup_code >= TEGRA_MAX_PULL)
1261 pullup_code = TEGRA_MAX_PULL - 1;
1262 /* Set the pull up code in the pinmux reg */
1263 err = tegra_drive_pinmux_set_pull_up(pg, pullup_code);
1265 dev_err(mmc_dev(sdhci->mmc),
1266 "Failed to set pullup codes %d err %d\n",
1272 static int tegra_sdhci_signal_voltage_switch(struct sdhci_host *sdhci,
1273 unsigned int signal_voltage)
1275 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1276 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1277 unsigned int min_uV = tegra_host->vddio_min_uv;
1278 unsigned int max_uV = tegra_host->vddio_max_uv;
1279 unsigned int rc = 0;
1283 ctrl = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
1284 if (signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
1285 ctrl |= SDHCI_CTRL_VDD_180;
1286 min_uV = SDHOST_LOW_VOLT_MIN;
1287 max_uV = SDHOST_LOW_VOLT_MAX;
1288 } else if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
1289 if (ctrl & SDHCI_CTRL_VDD_180)
1290 ctrl &= ~SDHCI_CTRL_VDD_180;
1293 /* Check if the slot can support the required voltage */
1294 if (min_uV > tegra_host->vddio_max_uv)
1297 /* Switch OFF the card clock to prevent glitches on the clock line */
1298 clk = sdhci_readw(sdhci, SDHCI_CLOCK_CONTROL);
1299 clk &= ~SDHCI_CLOCK_CARD_EN;
1300 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
1302 /* Set/clear the 1.8V signalling */
1303 sdhci_writew(sdhci, ctrl, SDHCI_HOST_CONTROL2);
1305 /* Switch the I/O rail voltage */
1306 if (tegra_host->vdd_io_reg) {
1307 rc = regulator_set_voltage(tegra_host->vdd_io_reg,
1310 dev_err(mmc_dev(sdhci->mmc), "switching to 1.8V"
1311 "failed . Switching back to 3.3V\n");
1312 rc = regulator_set_voltage(tegra_host->vdd_io_reg,
1313 SDHOST_HIGH_VOLT_MIN,
1314 SDHOST_HIGH_VOLT_MAX);
1316 dev_err(mmc_dev(sdhci->mmc),
1317 "switching to 3.3V also failed\n");
1321 /* Wait for 10 msec for the voltage to be switched */
1324 /* Enable the card clock */
1325 clk |= SDHCI_CLOCK_CARD_EN;
1326 sdhci_writew(sdhci, clk, SDHCI_CLOCK_CONTROL);
1328 /* Wait for 1 msec after enabling clock */
1334 static void tegra_sdhci_reset(struct sdhci_host *sdhci, u8 mask)
1336 unsigned long timeout;
1338 sdhci_writeb(sdhci, mask, SDHCI_SOFTWARE_RESET);
1340 /* Wait max 100 ms */
1343 /* hw clears the bit when it's done */
1344 while (sdhci_readb(sdhci, SDHCI_SOFTWARE_RESET) & mask) {
1346 dev_err(mmc_dev(sdhci->mmc), "Reset 0x%x never"
1347 "completed.\n", (int)mask);
1354 tegra_sdhci_reset_exit(sdhci, mask);
1357 static void sdhci_tegra_set_tap_delay(struct sdhci_host *sdhci,
1358 unsigned int tap_delay)
1362 /* Max tap delay value is 255 */
1363 BUG_ON(tap_delay > MAX_TAP_VALUES);
1365 vendor_ctrl = sdhci_readl(sdhci, SDHCI_VNDR_CLK_CTRL);
1366 vendor_ctrl &= ~(0xFF << SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1367 vendor_ctrl |= (tap_delay << SDHCI_VNDR_CLK_CTRL_TAP_VALUE_SHIFT);
1368 sdhci_writel(sdhci, vendor_ctrl, SDHCI_VNDR_CLK_CTRL);
1371 static int sdhci_tegra_sd_error_stats(struct sdhci_host *host, u32 int_status)
1373 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
1374 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1375 struct sdhci_tegra_sd_stats *head = tegra_host->sd_stat_head;
1377 if (int_status & SDHCI_INT_DATA_CRC)
1378 head->data_crc_count++;
1379 if (int_status & SDHCI_INT_CRC)
1380 head->cmd_crc_count++;
1381 if (int_status & SDHCI_INT_TIMEOUT)
1382 head->cmd_to_count++;
1383 if (int_status & SDHCI_INT_DATA_TIMEOUT)
1384 head->data_to_count++;
1388 static void sdhci_tegra_dump_tuning_data(struct sdhci_host *sdhci, u8 freq_band)
1390 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1391 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1392 struct tegra_tuning_data *tuning_data;
1394 tuning_data = tegra_host->tuning_data[freq_band];
1395 if (tuning_data->tap_data[0]) {
1396 dev_info(mmc_dev(sdhci->mmc), "Tuning window data at 1.25V\n");
1397 pr_info("Partial window %d\n",
1398 tuning_data->tap_data[0]->partial_win);
1399 pr_info("full window start %d\n",
1400 tuning_data->tap_data[0]->full_win_begin);
1401 pr_info("full window end %d\n",
1402 tuning_data->tap_data[0]->full_win_end);
1405 if ((freq_band == TUNING_HIGH_FREQ) &&
1406 (tuning_data->tap_data[1])) {
1407 dev_info(mmc_dev(sdhci->mmc), "Tuning window data at 1.1V\n");
1408 pr_info("partial window %d\n",
1409 tuning_data->tap_data[1]->partial_win);
1410 pr_info("full window being %d\n",
1411 tuning_data->tap_data[1]->full_win_begin);
1412 pr_info("full window end %d\n",
1413 tuning_data->tap_data[1]->full_win_end);
1415 pr_info("%s window chosen\n",
1416 tuning_data->select_partial_win ? "partial" : "full");
1417 pr_info("Best tap value %d\n",
1418 tuning_data->best_tap_value);
1422 * Calculation of best tap value for low frequencies(82MHz).
1423 * X = Partial win, Y = Full win start, Z = Full win end.
1425 * Full Window = Z - Y.
1426 * Taps margin = mid-point of 1/2*(curr_freq/max_frequency)*UI
1427 * = (1/2)*(1/2)*(82/200)*UI
1429 * if Partial win<(0.22)*UI
1430 * best tap = Y+(0.1025*UI)
1432 * best tap = (X-(Z-Y))+(0.1025*UI)
1433 * If best tap<0, best tap = 0
1435 static void calculate_low_freq_tap_value(struct sdhci_host *sdhci)
1437 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1438 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1439 unsigned int curr_clock;
1440 unsigned int max_clock;
1442 struct tap_window_data *tap_data;
1443 struct tegra_tuning_data *tuning_data;
1445 tuning_data = tegra_host->tuning_data[TUNING_LOW_FREQ];
1446 tap_data = tuning_data->tap_data[0];
1448 if (tap_data->abandon_full_win) {
1449 if (tap_data->abandon_partial_win) {
1450 tuning_data->best_tap_value = 0;
1453 tuning_data->select_partial_win = true;
1454 goto calculate_best_tap;
1458 tap_data->tuning_ui = tap_data->full_win_end - tap_data->partial_win;
1460 /* Calculate the sampling point */
1461 curr_clock = sdhci->max_clk / 1000000;
1462 max_clock = uhs_max_freq_MHz[sdhci->mmc->ios.timing];
1463 tap_data->sampling_point = ((tap_data->tuning_ui * curr_clock) /
1465 tap_data->sampling_point >>= 2;
1468 * Check whether partial window should be used. Use partial window
1469 * if partial window > 0.22(UI).
1471 if ((!tap_data->abandon_partial_win) &&
1472 (tap_data->partial_win > ((22 * tap_data->tuning_ui) / 100)))
1473 tuning_data->select_partial_win = true;
1476 if (tuning_data->select_partial_win) {
1477 best_tap_value = (tap_data->partial_win -
1478 (tap_data->full_win_end - tap_data->full_win_begin)) +
1479 tap_data->sampling_point;
1480 tuning_data->best_tap_value = (best_tap_value < 0) ? 0 :
1483 tuning_data->best_tap_value = tap_data->full_win_begin +
1484 tap_data->sampling_point;
1489 * Calculation of best tap value for high frequencies(156MHz).
1490 * Tap window data at 1.25V core voltage
1491 * X = Partial win, Y = Full win start, Z = Full win end.
1492 * Full Window = Z-Y.
1494 * Tap_margin = (0.20375)UI
1496 * Tap window data at 1.1V core voltage
1497 * X' = Partial win, Y' = Full win start, Z' = Full win end.
1499 * Full Window' = Z'-Y'.
1500 * Tap_margin' = (0.20375)UI'
1502 * Full_window_tap=[(Z'-0.20375UI')+(Y+0.20375UI)]/2
1503 * Partial_window_tap=[(X'-0.20375UI')+(X-(Z-Y)+0x20375UI)]/2
1504 * if(Partial_window_tap < 0), Partial_window_tap=0
1506 * Full_window_quality=[(Z'-0.20375UI')-(Y+0.20375UI)]/2
1507 * Partial_window_quality=(X'-0.20375UI')-Partial_window_tap
1508 * if(Full_window_quality>Partial_window_quality) choose full window,
1509 * else choose partial window.
1510 * If there is no margin window for both cases,
1511 * best tap=(Y+Z')/2.
1513 static void calculate_high_freq_tap_value(struct sdhci_host *sdhci)
1515 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1516 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1517 unsigned int curr_clock;
1518 unsigned int max_clock;
1519 struct tap_window_data *vmax_tap_data;
1520 struct tap_window_data *vmid_tap_data;
1521 struct tegra_tuning_data *tuning_data;
1522 unsigned int full_win_tap;
1523 int partial_win_start;
1524 int partial_win_tap;
1525 int full_win_quality;
1526 int partial_win_quality;
1528 tuning_data = tegra_host->tuning_data[TUNING_HIGH_FREQ];
1529 vmax_tap_data = tuning_data->tap_data[0];
1530 vmid_tap_data = tuning_data->tap_data[1];
1533 * If tuning at min override voltage is not done or one shot tuning is
1534 * done, set the best tap value as 50% of the full window.
1536 if (!tuning_data->override_vcore_tun_done ||
1537 tuning_data->one_shot_tuning) {
1538 dev_info(mmc_dev(sdhci->mmc),
1539 "Setting best tap as 50 percent of the full window\n");
1540 tuning_data->best_tap_value = (vmax_tap_data->full_win_begin +
1541 ((vmax_tap_data->full_win_end -
1542 vmax_tap_data->full_win_begin) >> 1));
1546 curr_clock = sdhci->max_clk / 1000000;
1547 max_clock = uhs_max_freq_MHz[sdhci->mmc->ios.timing];
1550 * Calculate the tuning_ui and sampling points for tap windows found
1551 * at all core voltages.
1553 vmax_tap_data->tuning_ui = vmax_tap_data->full_win_end -
1554 vmax_tap_data->partial_win;
1555 vmax_tap_data->sampling_point =
1556 (vmax_tap_data->tuning_ui * curr_clock) / max_clock;
1557 vmax_tap_data->sampling_point >>= 2;
1559 vmid_tap_data->tuning_ui = vmid_tap_data->full_win_end -
1560 vmid_tap_data->partial_win;
1561 vmid_tap_data->sampling_point =
1562 (vmid_tap_data->tuning_ui * curr_clock) / max_clock;
1563 vmid_tap_data->sampling_point >>= 2;
1565 full_win_tap = ((vmid_tap_data->full_win_end -
1566 vmid_tap_data->sampling_point) +
1567 (vmax_tap_data->full_win_begin +
1568 vmax_tap_data->sampling_point));
1570 full_win_quality = (vmid_tap_data->full_win_end -
1571 vmid_tap_data->sampling_point) -
1572 (vmax_tap_data->full_win_begin +
1573 vmax_tap_data->sampling_point);
1574 full_win_quality >>= 1;
1576 partial_win_start = (vmax_tap_data->partial_win -
1577 (vmax_tap_data->full_win_end -
1578 vmax_tap_data->full_win_begin));
1579 partial_win_tap = ((vmid_tap_data->partial_win -
1580 vmid_tap_data->sampling_point) +
1581 (partial_win_start + vmax_tap_data->sampling_point));
1582 partial_win_tap >>= 1;
1584 if (partial_win_tap < 0)
1585 partial_win_tap = 0;
1586 partial_win_quality = (vmid_tap_data->partial_win -
1587 vmid_tap_data->sampling_point) - partial_win_tap;
1589 if ((full_win_quality <= 0) && (partial_win_quality <= 0)) {
1590 dev_warn(mmc_dev(sdhci->mmc),
1591 "No margin window for both windows\n");
1592 tuning_data->best_tap_value = vmax_tap_data->full_win_begin +
1593 vmid_tap_data->full_win_end;
1594 tuning_data->best_tap_value >>= 1;
1596 if (full_win_quality > partial_win_quality) {
1597 tuning_data->best_tap_value = full_win_tap;
1598 tuning_data->select_partial_win = false;
1600 tuning_data->best_tap_value = partial_win_tap;
1601 tuning_data->select_partial_win = true;
1606 static int sdhci_tegra_run_frequency_tuning(struct sdhci_host *sdhci)
1608 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1609 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1613 unsigned int timeout = 10;
1617 mask = SDHCI_CMD_INHIBIT | SDHCI_DATA_INHIBIT;
1618 while (sdhci_readl(sdhci, SDHCI_PRESENT_STATE) & mask) {
1620 dev_err(mmc_dev(sdhci->mmc), "Controller never"
1621 "released inhibit bit(s).\n");
1629 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
1630 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
1631 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
1633 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
1634 ctrl |= SDHCI_CTRL_EXEC_TUNING;
1635 sdhci_writeb(sdhci, ctrl, SDHCI_HOST_CONTROL2);
1638 * In response to CMD19, the card sends 64 bytes of tuning
1639 * block to the Host Controller. So we set the block size
1641 * In response to CMD21, the card sends 128 bytes of tuning
1642 * block for MMC_BUS_WIDTH_8 and 64 bytes for MMC_BUS_WIDTH_4
1643 * to the Host Controller. So we set the block size to 64 here.
1645 sdhci_writew(sdhci, SDHCI_MAKE_BLKSZ(7, tegra_host->tuning_bsize),
1648 sdhci_writeb(sdhci, 0xE, SDHCI_TIMEOUT_CONTROL);
1650 sdhci_writew(sdhci, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
1652 sdhci_writel(sdhci, 0x0, SDHCI_ARGUMENT);
1654 /* Set the cmd flags */
1655 flags = SDHCI_CMD_RESP_SHORT | SDHCI_CMD_CRC | SDHCI_CMD_DATA;
1656 /* Issue the command */
1657 sdhci_writew(sdhci, SDHCI_MAKE_CMD(
1658 tegra_host->tuning_opcode, flags), SDHCI_COMMAND);
1664 intstatus = sdhci_readl(sdhci, SDHCI_INT_STATUS);
1666 sdhci_writel(sdhci, intstatus, SDHCI_INT_STATUS);
1671 if ((intstatus & SDHCI_INT_DATA_AVAIL) &&
1672 !(intstatus & SDHCI_INT_DATA_CRC)) {
1674 sdhci->tuning_done = 1;
1676 tegra_sdhci_reset(sdhci, SDHCI_RESET_CMD);
1677 tegra_sdhci_reset(sdhci, SDHCI_RESET_DATA);
1681 if (sdhci->tuning_done) {
1682 sdhci->tuning_done = 0;
1683 ctrl = sdhci_readb(sdhci, SDHCI_HOST_CONTROL2);
1684 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING) &&
1685 (ctrl & SDHCI_CTRL_TUNED_CLK))
1695 static int sdhci_tegra_scan_tap_values(struct sdhci_host *sdhci,
1696 unsigned int starting_tap, bool expect_failure)
1698 unsigned int tap_value = starting_tap;
1700 unsigned int retry = TUNING_RETRIES;
1703 /* Set the tap delay */
1704 sdhci_tegra_set_tap_delay(sdhci, tap_value);
1706 /* Run frequency tuning */
1707 err = sdhci_tegra_run_frequency_tuning(sdhci);
1712 retry = TUNING_RETRIES;
1713 if ((expect_failure && !err) ||
1714 (!expect_failure && err))
1718 } while (tap_value <= MAX_TAP_VALUES);
1724 * While scanning for tap values, first get the partial window followed by the
1725 * full window. Note that, when scanning for full win start, tuning has to be
1726 * run until a passing tap value is found. Hence, failure is expected during
1727 * this process and ignored.
1729 static int sdhci_tegra_get_tap_window_data(struct sdhci_host *sdhci,
1730 struct tap_window_data *tap_data)
1732 unsigned int tap_value;
1733 unsigned int full_win_percentage = 0;
1737 dev_err(mmc_dev(sdhci->mmc), "Invalid tap data\n");
1741 /* Get the partial window data */
1743 tap_value = sdhci_tegra_scan_tap_values(sdhci, tap_value, false);
1745 tap_data->abandon_partial_win = true;
1746 tap_data->partial_win = 0;
1747 } else if (tap_value > MAX_TAP_VALUES) {
1749 * If tap value is more than 0xFF, we have hit the miracle case
1750 * of all tap values passing. Discard full window as passing
1751 * window has covered all taps.
1753 tap_data->partial_win = MAX_TAP_VALUES;
1754 tap_data->abandon_full_win = true;
1757 tap_data->partial_win = tap_value - 1;
1758 if (tap_value == MAX_TAP_VALUES) {
1759 /* All tap values exhausted. No full window */
1760 tap_data->abandon_full_win = true;
1766 /* Get the full window start */
1768 tap_value = sdhci_tegra_scan_tap_values(sdhci, tap_value, true);
1769 if (tap_value > MAX_TAP_VALUES) {
1770 /* All tap values exhausted. No full window */
1771 tap_data->abandon_full_win = true;
1774 tap_data->full_win_begin = tap_value;
1776 * If full win start is 0xFF, then set that as
1777 * full win end and exit.
1779 if (tap_value == MAX_TAP_VALUES) {
1780 tap_data->full_win_end = tap_value;
1785 /* Get the full window end */
1787 tap_value = sdhci_tegra_scan_tap_values(sdhci,
1789 tap_data->full_win_end = tap_value - 1;
1790 if (tap_value > MAX_TAP_VALUES)
1791 tap_data->full_win_end = MAX_TAP_VALUES;
1792 full_win_percentage = ((tap_data->full_win_end -
1793 tap_data->full_win_begin) * 100) /
1794 (tap_data->partial_win + 1);
1795 } while (full_win_percentage < 50 && tap_value < MAX_TAP_VALUES);
1797 if (full_win_percentage < 50)
1798 tap_data->abandon_full_win = true;
1801 * Mark tuning as failed if both partial and full windows are
1804 if (tap_data->abandon_partial_win && tap_data->abandon_full_win)
1809 static int sdhci_tegra_execute_tuning(struct sdhci_host *sdhci, u32 opcode)
1811 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
1812 struct sdhci_tegra *tegra_host = pltfm_host->priv;
1813 struct tegra_tuning_data *tuning_data;
1814 struct tap_window_data *tap_data;
1818 unsigned int freq_band;
1820 unsigned int voltage = 0;
1821 #ifdef CONFIG_MMC_FREQ_SCALING
1822 unsigned int dfs_freq = 0;
1823 bool single_freq_tuning = false;
1825 bool vcore_override_failed = false;
1826 static unsigned int vcore_lvl;
1828 /* Tuning is valid only in SDR104 and SDR50 modes */
1829 ctrl_2 = sdhci_readw(sdhci, SDHCI_HOST_CONTROL2);
1830 if (!(((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
1831 (((ctrl_2 & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
1832 (sdhci->flags & SDHCI_SDR50_NEEDS_TUNING))))
1835 /* Tuning should be done only for MMC_BUS_WIDTH_8 and MMC_BUS_WIDTH_4 */
1836 if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
1837 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_8;
1838 else if (sdhci->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
1839 tegra_host->tuning_bsize = MMC_TUNING_BLOCK_SIZE_BUS_WIDTH_4;
1843 sdhci->flags &= ~SDHCI_NEEDS_RETUNING;
1845 /* Set the tuning command to be used */
1846 tegra_host->tuning_opcode = opcode;
1849 * Disable all interrupts signalling.Enable interrupt status
1850 * detection for buffer read ready and data crc. We use
1851 * polling for tuning as it involves less overhead.
1853 ier = sdhci_readl(sdhci, SDHCI_INT_ENABLE);
1854 sdhci_writel(sdhci, 0, SDHCI_SIGNAL_ENABLE);
1855 sdhci_writel(sdhci, SDHCI_INT_DATA_AVAIL |
1856 SDHCI_INT_DATA_CRC, SDHCI_INT_ENABLE);
1858 if (sdhci->max_clk > tuning_params[TUNING_LOW_FREQ].freq_hz)
1859 freq_band = TUNING_HIGH_FREQ;
1861 freq_band = TUNING_LOW_FREQ;
1862 tuning_data = tegra_host->tuning_data[freq_band];
1865 * If tuning is already done and retune request is not set, then skip
1866 * best tap value calculation and use the old best tap value.
1868 if (tegra_host->tuning_status == TUNING_STATUS_DONE) {
1869 dev_info(mmc_dev(sdhci->mmc),
1870 "Tuning already done. Setting tuned tap value %d\n",
1871 tegra_host->tuning_data[freq_band]->best_tap_value);
1875 #ifdef CONFIG_MMC_FREQ_SCALING
1876 for (dfs_freq = 0; dfs_freq < TUNING_FREQ_COUNT; dfs_freq++) {
1877 if (sdhci->mmc->caps2 & MMC_CAP2_FREQ_SCALING) {
1878 spin_unlock(&sdhci->lock);
1879 tegra_sdhci_set_clk_rate(sdhci,
1880 tuning_params[dfs_freq].freq_hz);
1881 spin_lock(&sdhci->lock);
1883 single_freq_tuning = true;
1886 if (sdhci->max_clk > tuning_params[TUNING_LOW_FREQ].freq_hz)
1887 freq_band = TUNING_HIGH_FREQ;
1889 freq_band = TUNING_LOW_FREQ;
1890 /* Remove any previously set override voltages */
1891 if (tegra_host->set_tuning_override) {
1892 spin_unlock(&sdhci->lock);
1893 tegra_dvfs_override_core_voltage(0);
1894 spin_lock(&sdhci->lock);
1896 tegra_host->set_tuning_override = false;
1900 * Run tuning and get the passing tap window info for all
1901 * frequencies and core voltages required to calculate the
1902 * final tap value. The standard driver calls this platform
1903 * specific tuning callback after holding a lock. The spinlock
1904 * needs to be released when calling non-atomic context
1905 * functions like regulator calls etc.
1907 spin_unlock(&sdhci->lock);
1908 if (!tegra_host->tuning_data[freq_band]) {
1909 tegra_host->tuning_data[freq_band] =
1910 devm_kzalloc(mmc_dev(sdhci->mmc),
1911 sizeof(struct tegra_tuning_data),
1913 if (!tegra_host->tuning_data[freq_band]) {
1915 dev_err(mmc_dev(sdhci->mmc),
1916 "Insufficient memory for tap window info\n");
1917 spin_lock(&sdhci->lock);
1921 spin_lock(&sdhci->lock);
1922 tuning_data = tegra_host->tuning_data[freq_band];
1923 for (i = 0; i < tuning_params[freq_band].nr_voltages; i++) {
1924 spin_unlock(&sdhci->lock);
1925 if (!tuning_data->tap_data[i]) {
1926 tuning_data->tap_data[i] = devm_kzalloc(
1927 mmc_dev(sdhci->mmc),
1928 sizeof(struct tap_window_data),
1930 if (!tuning_data->tap_data[i]) {
1932 dev_err(mmc_dev(sdhci->mmc),
1933 "Insufficient memory for tap window info\n");
1934 spin_lock(&sdhci->lock);
1938 tap_data = tuning_data->tap_data[i];
1940 * If nominal vcore is not specified, run tuning once
1941 * and set the tap value. Tuning might fail but this is
1942 * a better option than not trying tuning at all.
1944 if (!tegra_host->nominal_vcore_mv) {
1945 dev_err(mmc_dev(sdhci->mmc),
1946 "Missing nominal vcore. Tuning might fail\n");
1947 tuning_data->one_shot_tuning = true;
1948 spin_lock(&sdhci->lock);
1949 goto skip_vcore_override;
1952 voltage = tuning_params[freq_band].voltages[i];
1953 if (voltage > tegra_host->nominal_vcore_mv) {
1954 voltage = tegra_host->nominal_vcore_mv;
1955 if ((tuning_data->nominal_vcore_tun_done) &&
1956 (tuning_params[freq_band].nr_voltages == 1)) {
1957 spin_lock(&sdhci->lock);
1960 } else if (voltage <
1961 tegra_host->min_vcore_override_mv) {
1962 voltage = tegra_host->min_vcore_override_mv;
1963 if ((tuning_data->override_vcore_tun_done) &&
1964 (tuning_params[freq_band].nr_voltages == 1)) {
1965 spin_lock(&sdhci->lock);
1970 if (voltage != vcore_lvl) {
1971 err = tegra_dvfs_override_core_voltage(voltage);
1973 vcore_override_failed = true;
1974 dev_err(mmc_dev(sdhci->mmc),
1975 "Setting tuning override_mv %d failed %d\n",
1978 vcore_lvl = voltage;
1981 spin_lock(&sdhci->lock);
1983 skip_vcore_override:
1984 /* Get the tuning window info */
1985 err = sdhci_tegra_get_tap_window_data(sdhci, tap_data);
1987 dev_err(mmc_dev(sdhci->mmc), "No tuning window\n");
1992 * Nominal and min override core voltages are missing.
1993 * Set tuning as done for one shot tuning.
1995 if (tuning_data->one_shot_tuning) {
1996 tuning_data->nominal_vcore_tun_done = true;
1997 tuning_data->override_vcore_tun_done = true;
2001 /* Release the override voltage setting */
2002 spin_unlock(&sdhci->lock);
2003 err = tegra_dvfs_override_core_voltage(0);
2005 dev_err(mmc_dev(sdhci->mmc),
2006 "Clearing tuning override voltage failed %d\n",
2010 spin_lock(&sdhci->lock);
2012 if (!vcore_override_failed) {
2013 if (voltage == tegra_host->nominal_vcore_mv)
2014 tuning_data->nominal_vcore_tun_done =
2017 tegra_host->min_vcore_override_mv)
2018 tuning_data->override_vcore_tun_done =
2024 * If tuning is required only at nominal core voltage, set the
2025 * min override tuning as done to avoid unnecessary
2026 * vcore override settings.
2028 if ((tuning_params[freq_band].nr_voltages == 1) &&
2029 tuning_data->nominal_vcore_tun_done)
2030 tuning_data->override_vcore_tun_done = true;
2033 * If setting min override voltage failed for the first time,
2034 * set nominal core voltage as override until retuning is done.
2036 if ((tegra_host->tuning_status != TUNING_STATUS_RETUNE) &&
2037 tuning_data->nominal_vcore_tun_done &&
2038 !tuning_data->override_vcore_tun_done)
2039 tegra_host->set_tuning_override = true;
2041 /* Calculate best tap for current freq band */
2042 if (freq_band == TUNING_LOW_FREQ)
2043 calculate_low_freq_tap_value(sdhci);
2045 calculate_high_freq_tap_value(sdhci);
2048 /* Dump the tap window data */
2049 sdhci_tegra_dump_tuning_data(sdhci, freq_band);
2051 sdhci_tegra_set_tap_delay(sdhci,
2052 tegra_host->tuning_data[freq_band]->best_tap_value);
2054 * Run tuning with the best tap value. If tuning fails, set the
2055 * status for retuning next time enumeration is done.
2057 err = sdhci_tegra_run_frequency_tuning(sdhci);
2059 dev_err(mmc_dev(sdhci->mmc),
2060 "Freq tuning with best tap value failed %d\n",
2062 tuning_data->nominal_vcore_tun_done = false;
2063 tuning_data->override_vcore_tun_done = false;
2064 tegra_host->tuning_status = TUNING_STATUS_RETUNE;
2066 if (tuning_data->nominal_vcore_tun_done &&
2067 tuning_data->override_vcore_tun_done)
2068 tegra_host->tuning_status = TUNING_STATUS_DONE;
2070 tegra_host->tuning_status =
2071 TUNING_STATUS_RETUNE;
2073 tegra_host->best_tap_values[freq_band] =
2074 tegra_host->tuning_data[freq_band]->best_tap_value;
2075 #ifdef CONFIG_MMC_FREQ_SCALING
2076 if (single_freq_tuning)
2082 * Lock down the core voltage if tuning at override voltage failed
2083 * for the first time. The override setting will be removed once
2084 * retuning is called.
2086 if (tegra_host->set_tuning_override) {
2087 dev_info(mmc_dev(sdhci->mmc),
2088 "Nominal core voltage being set until retuning\n");
2089 spin_unlock(&sdhci->lock);
2090 err = tegra_dvfs_override_core_voltage(
2091 tegra_host->nominal_vcore_mv);
2093 dev_err(mmc_dev(sdhci->mmc),
2094 "Setting tuning override voltage failed %d\n",
2097 vcore_lvl = tegra_host->nominal_vcore_mv;
2098 spin_lock(&sdhci->lock);
2100 /* Schedule for the retuning */
2101 mod_timer(&sdhci->tuning_timer, jiffies +
2105 /* Enable interrupts. Enable full range for core voltage */
2106 sdhci_writel(sdhci, ier, SDHCI_INT_ENABLE);
2107 sdhci_writel(sdhci, ier, SDHCI_SIGNAL_ENABLE);
2111 static int tegra_sdhci_suspend(struct sdhci_host *sdhci)
2113 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2114 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2116 tegra_sdhci_set_clock(sdhci, 0);
2118 /* Disable the power rails if any */
2119 if (tegra_host->card_present) {
2120 if (tegra_host->is_rail_enabled) {
2121 if (tegra_host->vdd_io_reg)
2122 regulator_disable(tegra_host->vdd_io_reg);
2123 if (tegra_host->vdd_slot_reg)
2124 regulator_disable(tegra_host->vdd_slot_reg);
2125 tegra_host->is_rail_enabled = 0;
2132 static int tegra_sdhci_resume(struct sdhci_host *sdhci)
2134 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2135 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2136 struct platform_device *pdev;
2137 struct tegra_sdhci_platform_data *plat;
2139 pdev = to_platform_device(mmc_dev(sdhci->mmc));
2140 plat = pdev->dev.platform_data;
2142 if (gpio_is_valid(plat->cd_gpio)) {
2143 tegra_host->card_present =
2144 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
2147 /* Enable the power rails if any */
2148 if (tegra_host->card_present) {
2149 if (!tegra_host->is_rail_enabled) {
2150 if (tegra_host->vdd_slot_reg)
2151 regulator_enable(tegra_host->vdd_slot_reg);
2152 if (tegra_host->vdd_io_reg) {
2153 regulator_enable(tegra_host->vdd_io_reg);
2154 if (plat->mmc_data.ocr_mask &
2155 SDHOST_1V8_OCR_MASK)
2156 tegra_sdhci_signal_voltage_switch(sdhci,
2157 MMC_SIGNAL_VOLTAGE_180);
2159 tegra_sdhci_signal_voltage_switch(sdhci,
2160 MMC_SIGNAL_VOLTAGE_330);
2162 tegra_host->is_rail_enabled = 1;
2166 /* Setting the min identification clock of freq 400KHz */
2167 tegra_sdhci_set_clock(sdhci, 400000);
2169 /* Reset the controller and power on if MMC_KEEP_POWER flag is set*/
2170 if (sdhci->mmc->pm_flags & MMC_PM_KEEP_POWER) {
2171 tegra_sdhci_reset(sdhci, SDHCI_RESET_ALL);
2172 sdhci_writeb(sdhci, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
2179 static int show_polling_period(void *data, u64 *value)
2181 struct sdhci_host *host = (struct sdhci_host *)data;
2183 if (host->mmc->dev_stats != NULL)
2184 *value = host->mmc->dev_stats->polling_interval;
2189 static int set_polling_period(void *data, u64 value)
2191 struct sdhci_host *host = (struct sdhci_host *)data;
2193 if (host->mmc->dev_stats != NULL) {
2194 /* Limiting the maximum polling period to 1 sec */
2197 host->mmc->dev_stats->polling_interval = value;
2202 static int show_active_load_high_threshold(void *data, u64 *value)
2204 struct sdhci_host *host = (struct sdhci_host *)data;
2205 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2206 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2207 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
2209 if (gov_data != NULL)
2210 *value = gov_data->act_load_high_threshold;
2215 static int set_active_load_high_threshold(void *data, u64 value)
2217 struct sdhci_host *host = (struct sdhci_host *)data;
2218 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2219 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2220 struct tegra_freq_gov_data *gov_data = tegra_host->gov_data;
2222 if (gov_data != NULL) {
2223 /* Maximum threshold load percentage is 100.*/
2226 gov_data->act_load_high_threshold = value;
2232 DEFINE_SIMPLE_ATTRIBUTE(sdhci_polling_period_fops, show_polling_period,
2233 set_polling_period, "%llu\n");
2234 DEFINE_SIMPLE_ATTRIBUTE(sdhci_active_load_high_threshold_fops,
2235 show_active_load_high_threshold,
2236 set_active_load_high_threshold, "%llu\n");
2238 static void sdhci_tegra_error_stats_debugfs(struct sdhci_host *host)
2240 struct dentry *root;
2241 struct dentry *dfs_root;
2243 root = debugfs_create_dir(dev_name(mmc_dev(host->mmc)), NULL);
2244 if (IS_ERR_OR_NULL(root))
2246 host->debugfs_root = root;
2248 dfs_root = debugfs_create_dir("dfs_stats_dir", root);
2249 if (IS_ERR_OR_NULL(dfs_root))
2252 if (!debugfs_create_file("error_stats", S_IRUSR, root, host,
2255 if (!debugfs_create_file("dfs_stats", S_IRUSR, dfs_root, host,
2256 &sdhci_host_dfs_fops))
2258 if (!debugfs_create_file("polling_period", 0644, dfs_root, (void *)host,
2259 &sdhci_polling_period_fops))
2261 if (!debugfs_create_file("active_load_high_threshold", 0644,
2262 dfs_root, (void *)host,
2263 &sdhci_active_load_high_threshold_fops))
2268 debugfs_remove_recursive(root);
2269 host->debugfs_root = NULL;
2271 pr_err("%s: Failed to initialize debugfs functionality\n", __func__);
2275 static void tegra_sdhci_post_resume(struct sdhci_host *sdhci)
2277 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci);
2278 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2280 /* Turn OFF the clocks if the card is not present */
2281 if (!(tegra_host->card_present) && tegra_host->clk_enabled)
2282 tegra_sdhci_set_clock(sdhci, 0);
2285 static const struct sdhci_ops tegra_sdhci_ops = {
2286 .get_ro = tegra_sdhci_get_ro,
2287 .get_cd = tegra_sdhci_get_cd,
2288 .read_l = tegra_sdhci_readl,
2289 .read_w = tegra_sdhci_readw,
2290 .write_l = tegra_sdhci_writel,
2291 .write_w = tegra_sdhci_writew,
2292 .platform_bus_width = tegra_sdhci_buswidth,
2293 .set_clock = tegra_sdhci_set_clock,
2294 .suspend = tegra_sdhci_suspend,
2295 .resume = tegra_sdhci_resume,
2296 .platform_resume = tegra_sdhci_post_resume,
2297 .platform_reset_exit = tegra_sdhci_reset_exit,
2298 .set_uhs_signaling = tegra_sdhci_set_uhs_signaling,
2299 .switch_signal_voltage = tegra_sdhci_signal_voltage_switch,
2300 .switch_signal_voltage_exit = tegra_sdhci_do_calibration,
2301 .execute_freq_tuning = sdhci_tegra_execute_tuning,
2302 .sd_error_stats = sdhci_tegra_sd_error_stats,
2303 #ifdef CONFIG_MMC_FREQ_SCALING
2304 .dfs_gov_init = sdhci_tegra_freq_gov_init,
2305 .dfs_gov_get_target_freq = sdhci_tegra_get_target_freq,
2309 static const struct sdhci_pltfm_data sdhci_tegra20_pdata = {
2310 .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
2311 #ifndef CONFIG_ARCH_TEGRA_2x_SOC
2312 SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
2313 SDHCI_QUIRK_NON_STD_VOLTAGE_SWITCHING |
2314 SDHCI_QUIRK_NON_STANDARD_TUNING |
2316 #if defined(CONFIG_ARCH_TEGRA_3x_SOC)
2317 SDHCI_QUIRK_DISABLE_CARD_CLOCK |
2318 SDHCI_QUIRK_DO_DUMMY_WRITE |
2320 #if defined(CONFIG_ARCH_TEGRA_12x_SOC)
2321 SDHCI_QUIRK_SUPPORT_64BIT_DMA |
2323 SDHCI_QUIRK_SINGLE_POWER_WRITE |
2324 SDHCI_QUIRK_NO_HISPD_BIT |
2325 SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC |
2326 SDHCI_QUIRK_BROKEN_CARD_DETECTION |
2327 SDHCI_QUIRK_NO_CALC_MAX_DISCARD_TO,
2328 .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN,
2329 .ops = &tegra_sdhci_ops,
2332 static struct sdhci_tegra_soc_data soc_data_tegra20 = {
2333 .pdata = &sdhci_tegra20_pdata,
2334 .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 |
2335 #if defined(CONFIG_ARCH_TEGRA_3x_SOC) || defined(CONFIG_ARCH_TEGRA_14x_SOC)
2336 NVQUIRK_SET_CALIBRATION_OFFSETS |
2338 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
2339 NVQUIRK_ENABLE_PADPIPE_CLKEN |
2340 NVQUIRK_DISABLE_SPI_MODE_CLKEN |
2341 #ifndef CONFIG_TEGRA_FPGA_PLATFORM
2342 NVQUIRK_EN_FEEDBACK_CLK |
2344 NVQUIRK_SET_TAP_DELAY |
2345 NVQUIRK_ENABLE_SDR50_TUNING |
2346 NVQUIRK_ENABLE_SDR50 |
2347 NVQUIRK_ENABLE_SDR104 |
2348 NVQUIRK_SHADOW_XFER_MODE_REG |
2350 #if defined(CONFIG_ARCH_TEGRA_11x_SOC)
2351 NVQUIRK_SET_DRIVE_STRENGTH |
2352 NVQUIRK_DISABLE_SDMMC4_CALIB |
2354 #if defined(CONFIG_ARCH_TEGRA_2x_SOC)
2355 NVQUIRK_DISABLE_AUTO_CALIBRATION |
2356 #elif defined(CONFIG_ARCH_TEGRA_3x_SOC)
2357 NVQUIRK_ENABLE_SD_3_0 |
2358 NVQUIRK_BROKEN_SDR50_CONTROLLER_CLOCK |
2360 NVQUIRK_SET_TRIM_DELAY |
2361 NVQUIRK_ENABLE_DDR50 |
2362 NVQUIRK_INFINITE_ERASE_TIMEOUT |
2363 NVQUIRK_DISABLE_AUTO_CMD23 |
2364 #ifdef CONFIG_TEGRA_FPGA_PLATFORM
2365 NVQUIRK_DISABLE_AUTO_CALIBRATION |
2368 #if defined(CONFIG_ARCH_TEGRA_12x_SOC)
2369 NVQUIRK_SET_PAD_E_INPUT_OR_E_PWRD |
2371 NVQUIRK_ENABLE_BLOCK_GAP_DET,
2374 static const struct of_device_id sdhci_tegra_dt_match[] = {
2375 #ifdef CONFIG_ARCH_TEGRA_14x_SOC
2376 { .compatible = "nvidia,tegra148-sdhci", .data = &soc_data_tegra20 },
2378 #ifdef CONFIG_ARCH_TEGRA_11x_SOC
2379 { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra20 },
2381 #ifdef CONFIG_ARCH_TEGRA_3x_SOC
2382 { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra20 },
2384 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
2385 { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 },
2389 MODULE_DEVICE_TABLE(of, sdhci_dt_ids);
2391 static struct tegra_sdhci_platform_data *sdhci_tegra_dt_parse_pdata(
2392 struct platform_device *pdev)
2395 struct tegra_sdhci_platform_data *plat;
2396 struct device_node *np = pdev->dev.of_node;
2398 struct property *prop;
2406 plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
2408 dev_err(&pdev->dev, "Can't allocate platform data\n");
2412 plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
2413 plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
2414 plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0);
2416 if (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
2420 if (of_find_property(np, "edp_support", NULL)) {
2421 plat->edp_support = true;
2422 of_property_for_each_u32(np, "edp_states", prop, p, u) {
2423 if (i == SD_EDP_NUM_STATES)
2425 plat->edp_states[i] = u;
2432 of_property_read_u32(np, "tap-delay", &plat->tap_delay);
2433 of_property_read_u32(np, "trim-delay", &plat->trim_delay);
2434 of_property_read_u32(np, "ddr-clk-limit", &plat->ddr_clk_limit);
2435 of_property_read_u32(np, "max-clk-limit", &plat->max_clk_limit);
2437 of_property_read_u32(np, "uhs_mask", &plat->uhs_mask);
2439 if (of_find_property(np, "built-in", NULL))
2440 plat->mmc_data.built_in = 1;
2442 if (!of_property_read_u32(np, "mmc-ocr-mask", &val)) {
2444 plat->mmc_data.ocr_mask = MMC_OCR_1V8_MASK;
2446 plat->mmc_data.ocr_mask = MMC_OCR_2V8_MASK;
2448 plat->mmc_data.ocr_mask = MMC_OCR_3V2_MASK;
2453 static void tegra_sdhci_rail_off(struct sdhci_tegra *tegra_host)
2455 if (tegra_host->is_rail_enabled) {
2456 if (tegra_host->vdd_slot_reg)
2457 regulator_disable(tegra_host->vdd_slot_reg);
2458 if (tegra_host->vdd_io_reg)
2459 regulator_disable(tegra_host->vdd_io_reg);
2460 tegra_host->is_rail_enabled = false;
2464 static int tegra_sdhci_reboot_notify(struct notifier_block *nb,
2465 unsigned long event, void *data)
2467 struct sdhci_tegra *tegra_host =
2468 container_of(nb, struct sdhci_tegra, reboot_notify);
2473 tegra_sdhci_rail_off(tegra_host);
2479 static int sdhci_tegra_probe(struct platform_device *pdev)
2481 const struct of_device_id *match;
2482 const struct sdhci_tegra_soc_data *soc_data;
2483 struct sdhci_host *host;
2484 struct sdhci_pltfm_host *pltfm_host;
2485 struct tegra_sdhci_platform_data *plat;
2486 struct sdhci_tegra *tegra_host;
2489 match = of_match_device(sdhci_tegra_dt_match, &pdev->dev);
2491 soc_data = match->data;
2493 soc_data = &soc_data_tegra20;
2495 host = sdhci_pltfm_init(pdev, soc_data->pdata);
2497 return PTR_ERR(host);
2499 pltfm_host = sdhci_priv(host);
2501 plat = pdev->dev.platform_data;
2504 plat = sdhci_tegra_dt_parse_pdata(pdev);
2507 dev_err(mmc_dev(host->mmc), "missing platform data\n");
2512 tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL);
2514 dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n");
2519 tegra_host->plat = plat;
2520 pdev->dev.platform_data = plat;
2522 tegra_host->sd_stat_head = devm_kzalloc(&pdev->dev,
2523 sizeof(struct sdhci_tegra_sd_stats), GFP_KERNEL);
2524 if (!tegra_host->sd_stat_head) {
2525 dev_err(mmc_dev(host->mmc), "failed to allocate sd_stat_head\n");
2530 tegra_host->soc_data = soc_data;
2531 pltfm_host->priv = tegra_host;
2533 pll_c = clk_get_sys(NULL, "pll_c");
2534 if (IS_ERR(pll_c)) {
2535 rc = PTR_ERR(pll_c);
2536 dev_err(mmc_dev(host->mmc),
2537 "clk error in getting pll_c: %d\n", rc);
2540 pll_p = clk_get_sys(NULL, "pll_p");
2541 if (IS_ERR(pll_p)) {
2542 rc = PTR_ERR(pll_p);
2543 dev_err(mmc_dev(host->mmc),
2544 "clk error in getting pll_p: %d\n", rc);
2547 pll_c_rate = clk_get_rate(pll_c);
2548 pll_p_rate = clk_get_rate(pll_p);
2550 #ifdef CONFIG_MMC_EMBEDDED_SDIO
2551 if (plat->mmc_data.embedded_sdio)
2552 mmc_set_embedded_sdio_data(host->mmc,
2553 &plat->mmc_data.embedded_sdio->cis,
2554 &plat->mmc_data.embedded_sdio->cccr,
2555 plat->mmc_data.embedded_sdio->funcs,
2556 plat->mmc_data.embedded_sdio->num_funcs);
2559 if (gpio_is_valid(plat->power_gpio)) {
2560 rc = gpio_request(plat->power_gpio, "sdhci_power");
2562 dev_err(mmc_dev(host->mmc),
2563 "failed to allocate power gpio\n");
2566 gpio_direction_output(plat->power_gpio, 1);
2569 if (gpio_is_valid(plat->cd_gpio)) {
2570 rc = gpio_request(plat->cd_gpio, "sdhci_cd");
2572 dev_err(mmc_dev(host->mmc),
2573 "failed to allocate cd gpio\n");
2576 gpio_direction_input(plat->cd_gpio);
2578 tegra_host->card_present =
2579 (gpio_get_value_cansleep(plat->cd_gpio) == 0);
2581 } else if (plat->mmc_data.register_status_notify) {
2582 plat->mmc_data.register_status_notify(sdhci_status_notify_cb, host);
2585 if (plat->mmc_data.status) {
2586 plat->mmc_data.card_present = plat->mmc_data.status(mmc_dev(host->mmc));
2589 if (gpio_is_valid(plat->wp_gpio)) {
2590 rc = gpio_request(plat->wp_gpio, "sdhci_wp");
2592 dev_err(mmc_dev(host->mmc),
2593 "failed to allocate wp gpio\n");
2596 gpio_direction_input(plat->wp_gpio);
2600 * If there is no card detect gpio, assume that the
2601 * card is always present.
2603 if (!gpio_is_valid(plat->cd_gpio))
2604 tegra_host->card_present = 1;
2606 if (plat->mmc_data.ocr_mask & SDHOST_1V8_OCR_MASK) {
2607 tegra_host->vddio_min_uv = SDHOST_LOW_VOLT_MIN;
2608 tegra_host->vddio_max_uv = SDHOST_LOW_VOLT_MAX;
2609 } else if (plat->mmc_data.ocr_mask & MMC_OCR_2V8_MASK) {
2610 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_2V8;
2611 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
2612 } else if (plat->mmc_data.ocr_mask & MMC_OCR_3V2_MASK) {
2613 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_3V2;
2614 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
2617 * Set the minV and maxV to default
2618 * voltage range of 2.7V - 3.6V
2620 tegra_host->vddio_min_uv = SDHOST_HIGH_VOLT_MIN;
2621 tegra_host->vddio_max_uv = SDHOST_HIGH_VOLT_MAX;
2624 tegra_host->vdd_io_reg = regulator_get(mmc_dev(host->mmc),
2626 if (IS_ERR_OR_NULL(tegra_host->vdd_io_reg)) {
2627 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
2628 "Assuming vddio_sdmmc is not required.\n",
2629 "vddio_sdmmc", PTR_ERR(tegra_host->vdd_io_reg));
2630 tegra_host->vdd_io_reg = NULL;
2632 rc = regulator_set_voltage(tegra_host->vdd_io_reg,
2633 tegra_host->vddio_min_uv,
2634 tegra_host->vddio_max_uv);
2636 dev_err(mmc_dev(host->mmc), "%s regulator_set_voltage failed: %d",
2638 regulator_put(tegra_host->vdd_io_reg);
2639 tegra_host->vdd_io_reg = NULL;
2643 tegra_host->vdd_slot_reg = regulator_get(mmc_dev(host->mmc),
2645 if (IS_ERR_OR_NULL(tegra_host->vdd_slot_reg)) {
2646 dev_info(mmc_dev(host->mmc), "%s regulator not found: %ld."
2647 " Assuming vddio_sd_slot is not required.\n",
2648 "vddio_sd_slot", PTR_ERR(tegra_host->vdd_slot_reg));
2649 tegra_host->vdd_slot_reg = NULL;
2652 if (tegra_host->card_present) {
2653 if (tegra_host->vdd_slot_reg)
2654 regulator_enable(tegra_host->vdd_slot_reg);
2655 if (tegra_host->vdd_io_reg)
2656 regulator_enable(tegra_host->vdd_io_reg);
2657 tegra_host->is_rail_enabled = 1;
2660 tegra_pd_add_device(&pdev->dev);
2661 pm_runtime_enable(&pdev->dev);
2662 pltfm_host->clk = clk_get(mmc_dev(host->mmc), NULL);
2663 if (IS_ERR(pltfm_host->clk)) {
2664 dev_err(mmc_dev(host->mmc), "clk err\n");
2665 rc = PTR_ERR(pltfm_host->clk);
2669 if (clk_get_parent(pltfm_host->clk) == pll_c)
2670 tegra_host->is_parent_pllc = true;
2672 pm_runtime_get_sync(&pdev->dev);
2673 rc = clk_prepare_enable(pltfm_host->clk);
2677 tegra_host->emc_clk = devm_clk_get(mmc_dev(host->mmc), "emc");
2678 if (IS_ERR_OR_NULL(tegra_host->emc_clk)) {
2679 dev_err(mmc_dev(host->mmc), "Can't get emc clk\n");
2680 tegra_host->emc_clk = NULL;
2682 tegra_host->emc_max_clk =
2683 clk_round_rate(tegra_host->emc_clk, ULONG_MAX);
2684 clk_set_rate(tegra_host->emc_clk, SDMMC_EMC_MAX_FREQ);
2687 tegra_host->sclk = devm_clk_get(mmc_dev(host->mmc), "sclk");
2688 if (IS_ERR_OR_NULL(tegra_host->sclk)) {
2689 dev_err(mmc_dev(host->mmc), "Can't get sclk clock\n");
2690 tegra_host->sclk = NULL;
2692 clk_set_rate(tegra_host->sclk, SDMMC_AHB_MAX_FREQ);
2694 pltfm_host->priv = tegra_host;
2695 tegra_host->clk_enabled = true;
2696 tegra_host->max_clk_limit = plat->max_clk_limit;
2697 tegra_host->ddr_clk_limit = plat->ddr_clk_limit;
2698 tegra_host->instance = pdev->id;
2700 host->mmc->pm_caps |= plat->pm_caps;
2701 host->mmc->pm_flags |= plat->pm_flags;
2703 host->mmc->caps |= MMC_CAP_ERASE;
2704 /* enable 1/8V DDR capable */
2705 host->mmc->caps |= MMC_CAP_1_8V_DDR;
2707 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
2708 host->mmc->caps |= MMC_CAP_SDIO_IRQ;
2709 host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_IGNORE_PM_NOTIFY;
2710 if (plat->mmc_data.built_in) {
2711 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
2713 host->mmc->pm_flags |= MMC_PM_IGNORE_PM_NOTIFY;
2715 /* disable access to boot partitions */
2716 host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC;
2718 #if !defined(CONFIG_ARCH_TEGRA_2x_SOC) && !defined(CONFIG_ARCH_TEGRA_3x_SOC)
2719 host->mmc->caps2 |= MMC_CAP2_HS200;
2720 #ifdef CONFIG_TEGRA_FPGA_PLATFORM
2721 /* Enable HS200 mode */
2722 host->mmc->caps2 |= MMC_CAP2_HS200;
2724 host->mmc->caps2 |= MMC_CAP2_CACHE_CTRL;
2725 host->mmc->caps |= MMC_CAP_CMD23;
2726 host->mmc->caps2 |= MMC_CAP2_PACKED_CMD;
2730 #ifdef CONFIG_MMC_FREQ_SCALING
2732 * Enable dyamic frequency scaling support only if the platform clock
2733 * limit is higher than the lowest supported frequency by tuning.
2735 if (plat->en_freq_scaling && (plat->max_clk_limit >
2736 tuning_params[TUNING_LOW_FREQ].freq_hz))
2737 host->mmc->caps2 |= MMC_CAP2_FREQ_SCALING;
2741 if (plat->nominal_vcore_mv)
2742 tegra_host->nominal_vcore_mv = plat->nominal_vcore_mv;
2743 if (plat->min_vcore_override_mv)
2744 tegra_host->min_vcore_override_mv = plat->min_vcore_override_mv;
2746 host->edp_support = plat->edp_support ? true : false;
2747 if (host->edp_support)
2748 for (rc = 0; rc < SD_EDP_NUM_STATES; rc++)
2749 host->edp_states[rc] = plat->edp_states[rc];
2751 rc = sdhci_add_host(host);
2755 if (gpio_is_valid(plat->cd_gpio)) {
2756 rc = request_threaded_irq(gpio_to_irq(plat->cd_gpio), NULL,
2758 IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
2759 mmc_hostname(host->mmc), host);
2761 dev_err(mmc_dev(host->mmc), "request irq error\n");
2762 goto err_cd_irq_req;
2764 if (!plat->cd_wakeup_incapable) {
2765 rc = enable_irq_wake(gpio_to_irq(plat->cd_gpio));
2767 dev_err(mmc_dev(host->mmc),
2768 "SD card wake-up event registration "
2769 "failed with error: %d\n", rc);
2772 sdhci_tegra_error_stats_debugfs(host);
2774 /* Enable async suspend/resume to reduce LP0 latency */
2775 device_enable_async_suspend(&pdev->dev);
2777 if (plat->power_off_rail) {
2778 tegra_host->reboot_notify.notifier_call =
2779 tegra_sdhci_reboot_notify;
2780 register_reboot_notifier(&tegra_host->reboot_notify);
2785 if (gpio_is_valid(plat->cd_gpio))
2786 gpio_free(plat->cd_gpio);
2788 clk_disable_unprepare(pltfm_host->clk);
2789 pm_runtime_put_sync(&pdev->dev);
2791 clk_put(pltfm_host->clk);
2793 if (gpio_is_valid(plat->wp_gpio))
2794 gpio_free(plat->wp_gpio);
2796 if (gpio_is_valid(plat->cd_gpio))
2797 free_irq(gpio_to_irq(plat->cd_gpio), host);
2799 if (gpio_is_valid(plat->power_gpio))
2800 gpio_free(plat->power_gpio);
2803 sdhci_pltfm_free(pdev);
2807 static int sdhci_tegra_remove(struct platform_device *pdev)
2809 struct sdhci_host *host = platform_get_drvdata(pdev);
2810 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
2811 struct sdhci_tegra *tegra_host = pltfm_host->priv;
2812 const struct tegra_sdhci_platform_data *plat = tegra_host->plat;
2813 int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
2815 sdhci_remove_host(host, dead);
2817 disable_irq_wake(gpio_to_irq(plat->cd_gpio));
2819 if (tegra_host->vdd_slot_reg) {
2820 regulator_disable(tegra_host->vdd_slot_reg);
2821 regulator_put(tegra_host->vdd_slot_reg);
2824 if (tegra_host->vdd_io_reg) {
2825 regulator_disable(tegra_host->vdd_io_reg);
2826 regulator_put(tegra_host->vdd_io_reg);
2829 if (gpio_is_valid(plat->wp_gpio))
2830 gpio_free(plat->wp_gpio);
2832 if (gpio_is_valid(plat->cd_gpio)) {
2833 free_irq(gpio_to_irq(plat->cd_gpio), host);
2834 gpio_free(plat->cd_gpio);
2837 if (gpio_is_valid(plat->power_gpio))
2838 gpio_free(plat->power_gpio);
2840 if (tegra_host->clk_enabled) {
2841 clk_disable_unprepare(pltfm_host->clk);
2842 pm_runtime_put_sync(&pdev->dev);
2844 clk_put(pltfm_host->clk);
2846 if (tegra_host->emc_clk && tegra_host->is_sdmmc_emc_clk_on)
2847 clk_disable_unprepare(tegra_host->emc_clk);
2848 if (tegra_host->sclk && tegra_host->is_sdmmc_sclk_on)
2849 clk_disable_unprepare(tegra_host->sclk);
2850 if (plat->power_off_rail)
2851 unregister_reboot_notifier(&tegra_host->reboot_notify);
2853 sdhci_pltfm_free(pdev);
2858 static struct platform_driver sdhci_tegra_driver = {
2860 .name = "sdhci-tegra",
2861 .owner = THIS_MODULE,
2862 .of_match_table = sdhci_tegra_dt_match,
2863 .pm = SDHCI_PLTFM_PMOPS,
2865 .probe = sdhci_tegra_probe,
2866 .remove = sdhci_tegra_remove,
2869 module_platform_driver(sdhci_tegra_driver);
2871 MODULE_DESCRIPTION("SDHCI driver for Tegra");
2872 MODULE_AUTHOR("Google, Inc.");
2873 MODULE_LICENSE("GPL v2");