2 * arch/arm/mach-tegra/tegra_cl_dvfs.c
4 * Copyright (c) 2012-2013 NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/kernel.h>
20 #include <linux/spinlock.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
24 #include <linux/clk.h>
25 #include <linux/interrupt.h>
26 #include <linux/suspend.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/uaccess.h>
30 #include <linux/module.h>
31 #include <linux/platform_device.h>
33 #include <mach/iomap.h>
34 #include <mach/irqs.h>
35 #include <mach/hardware.h>
37 #include "tegra_cl_dvfs.h"
43 #define CL_DVFS_CTRL 0x00
44 #define CL_DVFS_CONFIG 0x04
45 #define CL_DVFS_CONFIG_DIV_MASK 0xff
47 #define CL_DVFS_PARAMS 0x08
48 #define CL_DVFS_PARAMS_CG_SCALE (0x1 << 24)
49 #define CL_DVFS_PARAMS_FORCE_MODE_SHIFT 22
50 #define CL_DVFS_PARAMS_FORCE_MODE_MASK (0x3 << CL_DVFS_PARAMS_FORCE_MODE_SHIFT)
51 #define CL_DVFS_PARAMS_CF_PARAM_SHIFT 16
52 #define CL_DVFS_PARAMS_CF_PARAM_MASK (0x3f << CL_DVFS_PARAMS_CF_PARAM_SHIFT)
53 #define CL_DVFS_PARAMS_CI_PARAM_SHIFT 8
54 #define CL_DVFS_PARAMS_CI_PARAM_MASK (0x7 << CL_DVFS_PARAMS_CI_PARAM_SHIFT)
55 #define CL_DVFS_PARAMS_CG_PARAM_SHIFT 0
56 #define CL_DVFS_PARAMS_CG_PARAM_MASK (0xff << CL_DVFS_PARAMS_CG_PARAM_SHIFT)
58 #define CL_DVFS_TUNE0 0x0c
59 #define CL_DVFS_TUNE1 0x10
61 #define CL_DVFS_FREQ_REQ 0x14
62 #define CL_DVFS_FREQ_REQ_FORCE_ENABLE (0x1 << 28)
63 #define CL_DVFS_FREQ_REQ_FORCE_SHIFT 16
64 #define CL_DVFS_FREQ_REQ_FORCE_MASK (0xfff << CL_DVFS_FREQ_REQ_FORCE_SHIFT)
65 #define FORCE_MAX 2047
66 #define FORCE_MIN -2048
67 #define CL_DVFS_FREQ_REQ_SCALE_SHIFT 8
68 #define CL_DVFS_FREQ_REQ_SCALE_MASK (0xff << CL_DVFS_FREQ_REQ_SCALE_SHIFT)
70 #define CL_DVFS_FREQ_REQ_FREQ_VALID (0x1 << 7)
71 #define CL_DVFS_FREQ_REQ_FREQ_SHIFT 0
72 #define CL_DVFS_FREQ_REQ_FREQ_MASK (0x7f << CL_DVFS_FREQ_REQ_FREQ_SHIFT)
75 #define CL_DVFS_SCALE_RAMP 0x18
77 #define CL_DVFS_DROOP_CTRL 0x1c
78 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT 16
79 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK \
80 (0xff << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT)
81 #define CL_DVFS_DROOP_CTRL_CUT_SHIFT 8
82 #define CL_DVFS_DROOP_CTRL_CUT_MASK (0xf << CL_DVFS_DROOP_CTRL_CUT_SHIFT)
83 #define CL_DVFS_DROOP_CTRL_RAMP_SHIFT 0
84 #define CL_DVFS_DROOP_CTRL_RAMP_MASK (0xff << CL_DVFS_DROOP_CTRL_RAMP_SHIFT)
86 #define CL_DVFS_OUTPUT_CFG 0x20
87 #define CL_DVFS_OUTPUT_CFG_I2C_ENABLE (0x1 << 30)
88 #define CL_DVFS_OUTPUT_CFG_SAFE_SHIFT 24
89 #define CL_DVFS_OUTPUT_CFG_SAFE_MASK \
90 (OUT_MASK << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT)
91 #define CL_DVFS_OUTPUT_CFG_MAX_SHIFT 16
92 #define CL_DVFS_OUTPUT_CFG_MAX_MASK \
93 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MAX_SHIFT)
94 #define CL_DVFS_OUTPUT_CFG_MIN_SHIFT 8
95 #define CL_DVFS_OUTPUT_CFG_MIN_MASK \
96 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MIN_SHIFT)
98 #define CL_DVFS_OUTPUT_FORCE 0x24
99 #define CL_DVFS_MONITOR_CTRL 0x28
100 #define CL_DVFS_MONITOR_CTRL_DISABLE 0
101 #define CL_DVFS_MONITOR_CTRL_FREQ 6
102 #define CL_DVFS_MONITOR_DATA 0x2c
103 #define CL_DVFS_MONITOR_DATA_NEW (0x1 << 16)
104 #define CL_DVFS_MONITOR_DATA_MASK 0xFFFF
106 #define CL_DVFS_I2C_CFG 0x40
107 #define CL_DVFS_I2C_CFG_ARB_ENABLE (0x1 << 20)
108 #define CL_DVFS_I2C_CFG_HS_CODE_SHIFT 16
109 #define CL_DVFS_I2C_CFG_HS_CODE_MASK (0x7 << CL_DVFS_I2C_CFG_HS_CODE_SHIFT)
110 #define CL_DVFS_I2C_CFG_PACKET_ENABLE (0x1 << 15)
111 #define CL_DVFS_I2C_CFG_SIZE_SHIFT 12
112 #define CL_DVFS_I2C_CFG_SIZE_MASK (0x7 << CL_DVFS_I2C_CFG_SIZE_SHIFT)
113 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_10 (0x1 << 10)
114 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT 0
115 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_MASK \
116 (0x3ff << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT)
118 #define CL_DVFS_I2C_VDD_REG_ADDR 0x44
119 #define CL_DVFS_I2C_STS 0x48
120 #define CL_DVFS_I2C_STS_I2C_LAST_SHIFT 1
121 #define CL_DVFS_I2C_STS_I2C_REQ_PENDING 0x1
123 #define CL_DVFS_INTR_STS 0x5c
124 #define CL_DVFS_INTR_EN 0x60
125 #define CL_DVFS_INTR_MIN_MASK 0x1
126 #define CL_DVFS_INTR_MAX_MASK 0x2
128 #define CL_DVFS_I2C_CLK_DIVISOR 0x16c
129 #define CL_DVFS_I2C_CLK_DIVISOR_MASK 0xffff
130 #define CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT 16
131 #define CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT 0
133 #define CL_DVFS_OUTPUT_LUT 0x200
135 #define CL_DVFS_CALIBR_TIME 40000
136 #define CL_DVFS_OUTPUT_PENDING_TIMEOUT 1000
137 #define CL_DVFS_OUTPUT_RAMP_DELAY 100
138 #define CL_DVFS_TUNE_HIGH_DELAY 2000
140 #define CL_DVFS_TUNE_HIGH_MARGIN_STEPS 2
142 #define CL_DVFS_DYNAMIC_OUTPUT_CFG 0
144 enum tegra_cl_dvfs_ctrl_mode {
145 TEGRA_CL_DVFS_UNINITIALIZED = 0,
146 TEGRA_CL_DVFS_DISABLED = 1,
147 TEGRA_CL_DVFS_OPEN_LOOP = 2,
148 TEGRA_CL_DVFS_CLOSED_LOOP = 3,
151 enum tegra_cl_dvfs_tune_state {
152 TEGRA_CL_DVFS_TUNE_LOW = 0,
153 TEGRA_CL_DVFS_TUNE_HIGH_REQUEST,
154 TEGRA_CL_DVFS_TUNE_HIGH,
157 struct dfll_rate_req {
165 struct tegra_cl_dvfs {
167 struct tegra_cl_dvfs_platform_data *p_data;
169 struct dvfs *safe_dvfs;
170 struct tegra_cooling_device *vmin_cdev;
171 struct work_struct init_cdev_work;
176 struct clk *dfll_clk;
177 unsigned long ref_rate;
178 unsigned long i2c_rate;
180 /* output voltage mapping:
181 * legacy dvfs table index -to- cl_dvfs output LUT index
182 * cl_dvfs output LUT index -to- PMU value/voltage pair ptr
184 u8 clk_dvfs_map[MAX_DVFS_FREQS];
185 struct voltage_reg_map *out_map[MAX_CL_DVFS_VOLTAGES];
188 u8 tune_high_out_start;
189 u8 tune_high_out_min;
191 u8 thermal_out_floors[MAX_THERMAL_FLOORS];
192 int therm_floors_num;
193 unsigned long dvco_rate_min;
198 struct dfll_rate_req last_req;
199 enum tegra_cl_dvfs_tune_state tune_state;
200 enum tegra_cl_dvfs_ctrl_mode mode;
202 struct timer_list tune_timer;
203 unsigned long tune_delay;
204 struct timer_list calibration_timer;
205 unsigned long calibration_delay;
206 ktime_t last_calibration;
207 unsigned long calibration_range_min;
208 unsigned long calibration_range_max;
211 /* Conversion macros (different scales for frequency request, and monitored
212 rate is not a typo) */
213 #define RATE_STEP(cld) ((cld)->ref_rate / 2)
214 #define GET_REQUEST_FREQ(rate, ref_rate) ((rate) / ((ref_rate) / 2))
215 #define GET_REQUEST_RATE(freq, ref_rate) ((freq) * ((ref_rate) / 2))
216 #define GET_MONITORED_RATE(freq, ref_rate) ((freq) * ((ref_rate) / 4))
217 #define GET_DROOP_FREQ(rate, ref_rate) ((rate) / ((ref_rate) / 4))
218 #define ROUND_MIN_RATE(rate, ref_rate) \
219 (DIV_ROUND_UP(rate, (ref_rate) / 2) * ((ref_rate) / 2))
220 #define GET_DIV(ref_rate, out_rate, scale) \
221 DIV_ROUND_UP((ref_rate), (out_rate) * (scale))
223 static const char *mode_name[] = {
224 [TEGRA_CL_DVFS_UNINITIALIZED] = "uninitialized",
225 [TEGRA_CL_DVFS_DISABLED] = "disabled",
226 [TEGRA_CL_DVFS_OPEN_LOOP] = "open_loop",
227 [TEGRA_CL_DVFS_CLOSED_LOOP] = "closed_loop",
230 static inline u32 cl_dvfs_readl(struct tegra_cl_dvfs *cld, u32 offs)
232 return __raw_readl(cld->cl_base + offs);
234 static inline void cl_dvfs_writel(struct tegra_cl_dvfs *cld, u32 val, u32 offs)
236 __raw_writel(val, cld->cl_base + offs);
238 static inline void cl_dvfs_wmb(struct tegra_cl_dvfs *cld)
241 cl_dvfs_readl(cld, CL_DVFS_CTRL);
244 static inline int output_enable(struct tegra_cl_dvfs *cld)
246 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
248 /* FIXME: PWM output control */
249 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
250 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
255 static noinline int output_flush_disable(struct tegra_cl_dvfs *cld)
259 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
261 /* Flush transactions in flight, and then disable */
262 for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
263 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
265 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
266 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
267 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
268 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
269 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
271 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
272 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
273 return 0; /* no pending rqst */
275 /* Re-enable, continue wait */
276 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
277 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
283 /* I2C request is still pending - disable, anyway, but report error */
284 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
285 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
290 static noinline int output_disable_flush(struct tegra_cl_dvfs *cld)
294 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
296 /* Disable output interface right away */
297 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
298 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
301 /* Flush possible transaction in flight */
302 for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
303 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
305 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
306 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
307 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
312 /* I2C request is still pending - report error */
316 static inline int output_disable_ol_prepare(struct tegra_cl_dvfs *cld)
318 /* FIXME: PWM output control */
320 * If cl-dvfs h/w does not require output to be quiet before disable,
321 * s/w can stop I2C communications at any time (including operations
322 * in closed loop mode), and I2C bus integrity is guaranteed even in
323 * case of flush timeout.
325 if (!cld->p_data->out_quiet_then_disable) {
326 int ret = output_disable_flush(cld);
328 pr_debug("cl_dvfs: I2C pending timeout ol_prepare\n");
334 static inline int output_disable_post_ol(struct tegra_cl_dvfs *cld)
336 /* FIXME: PWM output control */
338 * If cl-dvfs h/w requires output to be quiet before disable, s/w
339 * should stop I2C communications only after the switch to open loop
340 * mode, and I2C bus integrity is not guaranteed in case of flush
343 if (cld->p_data->out_quiet_then_disable) {
344 int ret = output_flush_disable(cld);
346 pr_err("cl_dvfs: I2C pending timeout post_ol\n");
352 static inline void set_mode(struct tegra_cl_dvfs *cld,
353 enum tegra_cl_dvfs_ctrl_mode mode)
356 cl_dvfs_writel(cld, mode - 1, CL_DVFS_CTRL);
360 static inline u8 get_output_min(struct tegra_cl_dvfs *cld)
362 u32 tune_min, thermal_min;
364 tune_min = cld->tune_state == TEGRA_CL_DVFS_TUNE_LOW ?
365 0 : cld->tune_high_out_min;
367 if (cld->therm_floor_idx < cld->therm_floors_num)
368 thermal_min = cld->thermal_out_floors[cld->therm_floor_idx];
370 return max(tune_min, thermal_min);
373 static inline void _load_lut(struct tegra_cl_dvfs *cld)
378 val = cld->out_map[cld->lut_min]->reg_value;
379 for (i = 0; i <= cld->lut_min; i++)
380 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
382 for (; i < cld->lut_max; i++) {
383 val = cld->out_map[i]->reg_value;
384 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
387 val = cld->out_map[cld->lut_max]->reg_value;
388 for (; i < cld->num_voltages; i++)
389 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
394 static void cl_dvfs_load_lut(struct tegra_cl_dvfs *cld)
396 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
397 bool disable_out_for_load = !cld->p_data->out_quiet_then_disable &&
398 (val & CL_DVFS_OUTPUT_CFG_I2C_ENABLE);
400 if (disable_out_for_load) {
401 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
402 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
404 udelay(2); /* 2us (big margin) window for disable propafation */
409 if (disable_out_for_load) {
410 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
411 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
416 #define set_tune_state(cld, state) \
418 cld->tune_state = state; \
419 pr_debug("%s: set tune state %d\n", __func__, state); \
422 static inline void tune_low(struct tegra_cl_dvfs *cld)
424 if (cld->safe_dvfs->dfll_data.tune_trimmers)
425 cld->safe_dvfs->dfll_data.tune_trimmers(false);
426 cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
430 static inline void tune_high(struct tegra_cl_dvfs *cld)
432 cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0_high_mv,
435 if (cld->safe_dvfs->dfll_data.tune_trimmers)
436 cld->safe_dvfs->dfll_data.tune_trimmers(true);
439 static void set_ol_config(struct tegra_cl_dvfs *cld)
443 /* always tune low (safe) in open loop */
444 if (cld->tune_state != TEGRA_CL_DVFS_TUNE_LOW) {
445 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
448 out_min = get_output_min(cld);
449 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
450 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
451 val &= ~CL_DVFS_OUTPUT_CFG_MIN_MASK;
452 val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
453 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
455 if (cld->lut_min != out_min) {
456 cld->lut_min = out_min;
457 cl_dvfs_load_lut(cld);
462 /* 1:1 scaling in open loop */
463 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
464 val |= (SCALE_MAX - 1) << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
465 val &= ~CL_DVFS_FREQ_REQ_FORCE_ENABLE;
466 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
469 static void set_cl_config(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
471 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
474 u32 out_max, out_min;
476 switch (cld->tune_state) {
477 case TEGRA_CL_DVFS_TUNE_LOW:
478 if (req->cap > cld->tune_high_out_start) {
479 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH_REQUEST);
480 mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
484 case TEGRA_CL_DVFS_TUNE_HIGH:
485 case TEGRA_CL_DVFS_TUNE_HIGH_REQUEST:
486 if (req->cap <= cld->tune_high_out_start) {
487 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
495 out_min = get_output_min(cld);
496 if (req->cap > (out_min + 1))
497 req->output = req->cap - 1;
499 req->output = out_min + 1;
500 if (req->output == cld->safe_output)
502 out_max = max((u8)(req->output + 1), cld->minimax_output);
504 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
505 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
506 val &= ~(CL_DVFS_OUTPUT_CFG_MAX_MASK | CL_DVFS_OUTPUT_CFG_MIN_MASK);
507 val |= out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT;
508 val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
509 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
511 if ((cld->lut_min != out_min) || (cld->lut_max != out_max)) {
512 cld->lut_min = out_min;
513 cld->lut_max = out_max;
514 cl_dvfs_load_lut(cld);
519 static void tune_timer_cb(unsigned long data)
522 u32 val, out_min, out_last;
523 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
525 clk_lock_save(cld->dfll_clk, &flags);
527 /* FIXME: PWM output control */
528 if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH_REQUEST) {
529 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
530 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
531 out_min = (val >> CL_DVFS_OUTPUT_CFG_MIN_SHIFT) & OUT_MASK;
533 out_min = cld->lut_min;
535 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
536 out_last = (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK;
538 if (!(val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) &&
539 (out_last >= cld->tune_high_out_min) &&
540 (out_min >= cld->tune_high_out_min)) {
541 udelay(CL_DVFS_OUTPUT_RAMP_DELAY);
542 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH);
545 mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
548 clk_unlock_restore(cld->dfll_clk, &flags);
551 static inline void calibration_timer_update(struct tegra_cl_dvfs *cld)
553 if (!cld->calibration_delay)
555 mod_timer(&cld->calibration_timer, jiffies + cld->calibration_delay);
558 static void cl_dvfs_calibrate(struct tegra_cl_dvfs *cld)
563 u8 out_min = get_output_min(cld);
566 * Enter calibration procedure only if
567 * - closed loop operations
568 * - last request engaged clock skipper
569 * - at least specified time after the last calibration attempt
571 if ((cld->mode != TEGRA_CL_DVFS_CLOSED_LOOP) ||
572 (cld->last_req.rate > cld->dvco_rate_min))
576 if (ktime_us_delta(now, cld->last_calibration) < CL_DVFS_CALIBR_TIME)
578 cld->last_calibration = now;
580 if (cl_dvfs_readl(cld, CL_DVFS_MONITOR_CTRL) !=
581 CL_DVFS_MONITOR_CTRL_FREQ)
582 cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ,
583 CL_DVFS_MONITOR_CTRL);
585 /* Synchronize with sample period, and get rate measurements */
586 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
588 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
589 } while (!(data & CL_DVFS_MONITOR_DATA_NEW));
591 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
592 } while (!(data & CL_DVFS_MONITOR_DATA_NEW));
594 /* Skip calibration if I2C transaction is pending */
595 /* FIXME: PWM output control */
596 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
597 if (val & CL_DVFS_I2C_STS_I2C_REQ_PENDING)
600 /* Adjust minimum rate */
601 data &= CL_DVFS_MONITOR_DATA_MASK;
602 data = GET_MONITORED_RATE(data, cld->ref_rate);
603 if ((val > out_min) || (data < (cld->dvco_rate_min - RATE_STEP(cld))))
604 cld->dvco_rate_min -= RATE_STEP(cld);
605 else if (data > (cld->dvco_rate_min + RATE_STEP(cld)))
606 cld->dvco_rate_min += RATE_STEP(cld);
610 cld->dvco_rate_min = clamp(cld->dvco_rate_min,
611 cld->calibration_range_min, cld->calibration_range_max);
612 calibration_timer_update(cld);
613 pr_debug("%s: calibrated dvco_rate_min %lu\n",
614 __func__, cld->dvco_rate_min);
617 static void calibration_timer_cb(unsigned long data)
620 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
622 pr_debug("%s\n", __func__);
624 clk_lock_save(cld->dfll_clk, &flags);
625 cl_dvfs_calibrate(cld);
626 clk_unlock_restore(cld->dfll_clk, &flags);
629 static void set_request(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
632 int force_val = req->output - cld->safe_output;
633 int coef = 128; /* FIXME: cld->p_data->cfg_param->cg_scale? */;
635 force_val = force_val * coef / cld->p_data->cfg_param->cg;
636 force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
638 val = req->freq << CL_DVFS_FREQ_REQ_FREQ_SHIFT;
639 val |= req->scale << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
640 val |= ((u32)force_val << CL_DVFS_FREQ_REQ_FORCE_SHIFT) &
641 CL_DVFS_FREQ_REQ_FORCE_MASK;
642 val |= CL_DVFS_FREQ_REQ_FREQ_VALID | CL_DVFS_FREQ_REQ_FORCE_ENABLE;
644 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
648 static u8 find_mv_out_cap(struct tegra_cl_dvfs *cld, int mv)
653 for (cap = 0; cap < cld->num_voltages; cap++) {
654 uv = cld->out_map[cap]->reg_uV;
658 return cap - 1; /* maximum possible output */
661 static int find_safe_output(
662 struct tegra_cl_dvfs *cld, unsigned long rate, u8 *safe_output)
665 int n = cld->safe_dvfs->num_freqs;
666 unsigned long *freqs = cld->safe_dvfs->freqs;
668 for (i = 0; i < n; i++) {
669 if (freqs[i] >= rate) {
670 *safe_output = cld->clk_dvfs_map[i];
677 static unsigned long find_dvco_rate_min(struct tegra_cl_dvfs *cld, u8 out_min)
681 for (i = 0; i < cld->safe_dvfs->num_freqs; i++) {
682 if (cld->clk_dvfs_map[i] > out_min)
686 return cld->safe_dvfs->freqs[i];
689 static void cl_dvfs_set_dvco_rate_min(struct tegra_cl_dvfs *cld)
691 unsigned long rate = cld->safe_dvfs->dfll_data.out_rate_min;
692 if (cld->therm_floor_idx < cld->therm_floors_num)
693 rate = find_dvco_rate_min(
694 cld, cld->thermal_out_floors[cld->therm_floor_idx]);
696 /* round minimum rate to request unit (ref_rate/2) boundary */
697 cld->dvco_rate_min = ROUND_MIN_RATE(rate, cld->ref_rate);
699 /* dvco min rate is under-estimated - skewed range up */
700 cld->calibration_range_min = cld->dvco_rate_min - 2 * RATE_STEP(cld);
701 cld->calibration_range_max = cld->dvco_rate_min + 8 * RATE_STEP(cld);
704 static struct voltage_reg_map *find_vdd_map_entry(
705 struct tegra_cl_dvfs *cld, int mV, bool exact)
709 for (i = 0; i < cld->p_data->vdd_map_size; i++) {
710 /* round down to 1mV */
711 reg_mV = cld->p_data->vdd_map[i].reg_uV / 1000;
716 if (i < cld->p_data->vdd_map_size) {
717 if (!exact || (mV == reg_mV))
718 return &cld->p_data->vdd_map[i];
723 static void cl_dvfs_init_maps(struct tegra_cl_dvfs *cld)
725 int i, j, v, v_max, n;
726 const int *millivolts;
727 struct voltage_reg_map *m;
729 BUILD_BUG_ON(MAX_CL_DVFS_VOLTAGES > OUT_MASK + 1);
731 n = cld->safe_dvfs->num_freqs;
732 BUG_ON(n >= MAX_CL_DVFS_VOLTAGES);
734 millivolts = cld->safe_dvfs->dfll_millivolts;
735 v_max = millivolts[n - 1];
737 v = cld->safe_dvfs->dfll_data.min_millivolts;
738 BUG_ON(v > millivolts[0]);
740 cld->out_map[0] = find_vdd_map_entry(cld, v, true);
741 BUG_ON(!cld->out_map[0]);
743 for (i = 0, j = 1; i < n; i++) {
745 v += max(1, (v_max - v) / (MAX_CL_DVFS_VOLTAGES - j));
746 if (v >= millivolts[i])
749 m = find_vdd_map_entry(cld, v, false);
751 if (m != cld->out_map[j - 1])
752 cld->out_map[j++] = m;
756 m = find_vdd_map_entry(cld, v, true);
758 if (m != cld->out_map[j - 1])
759 cld->out_map[j++] = m;
760 cld->clk_dvfs_map[i] = j - 1;
762 BUG_ON(j > MAX_CL_DVFS_VOLTAGES);
763 cld->num_voltages = j;
766 static void cl_dvfs_init_tuning_thresholds(struct tegra_cl_dvfs *cld)
771 * Convert high tuning voltage threshold into output LUT index, and
772 * add necessary margin. If voltage threshold is outside operating
773 * range set it at maximum output level to effectively disable tuning
774 * parameters adjustment.
776 cld->tune_high_out_min = cld->num_voltages - 1;
777 cld->tune_high_out_start = cld->num_voltages - 1;
778 mv = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
779 if (mv >= cld->safe_dvfs->dfll_data.min_millivolts) {
780 u8 out_min = find_mv_out_cap(cld, mv);
781 if ((out_min + 2) < cld->num_voltages) {
782 u8 out_start = out_min + CL_DVFS_TUNE_HIGH_MARGIN_STEPS;
783 if (out_start < cld->num_voltages) {
784 cld->tune_high_out_min = out_min;
785 cld->tune_high_out_start = out_start;
786 if (cld->minimax_output <= out_min)
787 cld->minimax_output = out_min + 1;
793 static void cl_dvfs_init_cold_output_floor(struct tegra_cl_dvfs *cld)
796 if (!cld->safe_dvfs->dvfs_rail->therm_mv_floors ||
797 !cld->safe_dvfs->dvfs_rail->therm_mv_floors_num)
801 WARN(1, "%s: missing dfll floor cooling device\n",
802 cld->safe_dvfs->dvfs_rail->reg_id);
804 * Convert monotonically decreasing thermal floors at low temperature
805 * into output LUT indexes; make sure there is a room for regulation
806 * above maximum thermal floor.
808 cld->therm_floors_num = cld->safe_dvfs->dvfs_rail->therm_mv_floors_num;
809 for (i = 0; i < cld->therm_floors_num; i++) {
810 cld->thermal_out_floors[i] = find_mv_out_cap(
811 cld, cld->safe_dvfs->dvfs_rail->therm_mv_floors[i]);
813 BUG_ON(cld->thermal_out_floors[0] + 2 >= cld->num_voltages);
814 if (cld->minimax_output <= cld->thermal_out_floors[0])
815 cld->minimax_output = cld->thermal_out_floors[0] + 1;
818 static void cl_dvfs_init_output_thresholds(struct tegra_cl_dvfs *cld)
820 cld->minimax_output = 0;
821 cl_dvfs_init_tuning_thresholds(cld);
822 cl_dvfs_init_cold_output_floor(cld);
824 /* make sure safe output is safe at any temperature */
825 cld->safe_output = cld->thermal_out_floors[0] ? : 1;
826 if (cld->minimax_output <= cld->safe_output)
827 cld->minimax_output = cld->safe_output + 1;
830 static void cl_dvfs_init_pwm_if(struct tegra_cl_dvfs *cld)
832 /* FIXME: not supported */
835 static void cl_dvfs_init_i2c_if(struct tegra_cl_dvfs *cld)
838 struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
839 bool hs_mode = p_data->u.pmu_i2c.hs_rate;
841 /* PMU slave address, vdd register offset, and transfer mode */
842 val = p_data->u.pmu_i2c.slave_addr << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT;
843 if (p_data->u.pmu_i2c.addr_10)
844 val |= CL_DVFS_I2C_CFG_SLAVE_ADDR_10;
846 val |= p_data->u.pmu_i2c.hs_master_code <<
847 CL_DVFS_I2C_CFG_HS_CODE_SHIFT;
848 val |= CL_DVFS_I2C_CFG_PACKET_ENABLE;
850 val |= CL_DVFS_I2C_CFG_SIZE_MASK;
851 val |= CL_DVFS_I2C_CFG_ARB_ENABLE;
852 cl_dvfs_writel(cld, val, CL_DVFS_I2C_CFG);
853 cl_dvfs_writel(cld, p_data->u.pmu_i2c.reg, CL_DVFS_I2C_VDD_REG_ADDR);
856 val = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.fs_rate, 8);
857 BUG_ON(!val || (val > CL_DVFS_I2C_CLK_DIVISOR_MASK));
858 val = (val - 1) << CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT;
860 div = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.hs_rate, 12);
861 BUG_ON(!div || (div > CL_DVFS_I2C_CLK_DIVISOR_MASK));
863 div = 2; /* default hs divisor just in case */
865 val |= (div - 1) << CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT;
866 cl_dvfs_writel(cld, val, CL_DVFS_I2C_CLK_DIVISOR);
870 static void cl_dvfs_init_out_if(struct tegra_cl_dvfs *cld)
875 * Disable output, and set safe voltage and output limits;
876 * disable and clear limit interrupts.
878 cld->tune_state = TEGRA_CL_DVFS_TUNE_LOW;
879 cld->therm_floor_idx = 0;
880 cl_dvfs_set_dvco_rate_min(cld);
881 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
882 val = get_output_min(cld);
884 cld->lut_max = cld->num_voltages - 1;
887 * Allow the entire range of LUT indexes, but limit output voltage in
888 * LUT mapping (this "indirect" application of limits is used, because
889 * h/w does not support dynamic change of index limits, but dynamic
890 * reload of LUT is fine).
893 cld->lut_min = get_output_min(cld);
894 cld->lut_max = cld->num_voltages - 1;
897 val = (cld->safe_output << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT) |
898 ((cld->num_voltages - 1) << CL_DVFS_OUTPUT_CFG_MAX_SHIFT) |
899 (val << CL_DVFS_OUTPUT_CFG_MIN_SHIFT);
900 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
903 cl_dvfs_writel(cld, 0, CL_DVFS_OUTPUT_FORCE);
904 cl_dvfs_writel(cld, 0, CL_DVFS_INTR_EN);
905 cl_dvfs_writel(cld, CL_DVFS_INTR_MAX_MASK | CL_DVFS_INTR_MIN_MASK,
908 /* fill in LUT table */
909 cl_dvfs_load_lut(cld);
911 /* configure transport */
912 if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
913 cl_dvfs_init_i2c_if(cld);
915 cl_dvfs_init_pwm_if(cld);
918 static void cl_dvfs_init_cntrl_logic(struct tegra_cl_dvfs *cld)
921 struct tegra_cl_dvfs_cfg_param *param = cld->p_data->cfg_param;
923 /* configure mode, control loop parameters, DFLL tuning */
924 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
926 val = GET_DIV(cld->ref_rate, param->sample_rate, 32);
927 BUG_ON(val > CL_DVFS_CONFIG_DIV_MASK);
928 cl_dvfs_writel(cld, val, CL_DVFS_CONFIG);
930 val = (param->force_mode << CL_DVFS_PARAMS_FORCE_MODE_SHIFT) |
931 (param->cf << CL_DVFS_PARAMS_CF_PARAM_SHIFT) |
932 (param->ci << CL_DVFS_PARAMS_CI_PARAM_SHIFT) |
933 ((u8)param->cg << CL_DVFS_PARAMS_CG_PARAM_SHIFT) |
934 (param->cg_scale ? CL_DVFS_PARAMS_CG_SCALE : 0);
935 cl_dvfs_writel(cld, val, CL_DVFS_PARAMS);
937 if (cld->safe_dvfs->dfll_data.tune_trimmers)
938 cld->safe_dvfs->dfll_data.tune_trimmers(false);
939 cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
940 cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune1, CL_DVFS_TUNE1);
942 /* configure droop (skipper 1) and scale (skipper 2) */
943 val = GET_DROOP_FREQ(cld->safe_dvfs->dfll_data.droop_rate_min,
944 cld->ref_rate) << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT;
945 BUG_ON(val > CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK);
946 val |= (param->droop_cut_value << CL_DVFS_DROOP_CTRL_CUT_SHIFT);
947 val |= (param->droop_restore_ramp << CL_DVFS_DROOP_CTRL_RAMP_SHIFT);
948 cl_dvfs_writel(cld, val, CL_DVFS_DROOP_CTRL);
950 cld->last_req.cap = 0;
951 cld->last_req.freq = 0;
952 cld->last_req.output = 0;
953 cld->last_req.scale = SCALE_MAX - 1;
954 cl_dvfs_writel(cld, CL_DVFS_FREQ_REQ_SCALE_MASK, CL_DVFS_FREQ_REQ);
955 cl_dvfs_writel(cld, param->scale_out_ramp, CL_DVFS_SCALE_RAMP);
957 /* select frequency for monitoring */
958 cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ, CL_DVFS_MONITOR_CTRL);
962 static int cl_dvfs_enable_clocks(struct tegra_cl_dvfs *cld)
964 if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
965 clk_enable(cld->i2c_clk);
967 clk_enable(cld->ref_clk);
968 clk_enable(cld->soc_clk);
972 static void cl_dvfs_disable_clocks(struct tegra_cl_dvfs *cld)
974 if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
975 clk_disable(cld->i2c_clk);
977 clk_disable(cld->ref_clk);
978 clk_disable(cld->soc_clk);
981 static int cl_dvfs_init(struct tegra_cl_dvfs *cld)
985 /* Enable output inerface clock */
986 if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C) {
987 ret = clk_enable(cld->i2c_clk);
989 pr_err("%s: Failed to enable %s\n",
990 __func__, cld->i2c_clk->name);
993 cld->i2c_rate = clk_get_rate(cld->i2c_clk);
995 pr_err("%s: PMU interface is not I2C\n", __func__);
999 /* Enable module clocks, release control logic reset */
1000 ret = clk_enable(cld->ref_clk);
1002 pr_err("%s: Failed to enable %s\n",
1003 __func__, cld->ref_clk->name);
1006 ret = clk_enable(cld->soc_clk);
1008 pr_err("%s: Failed to enable %s\n",
1009 __func__, cld->ref_clk->name);
1012 cld->ref_rate = clk_get_rate(cld->ref_clk);
1013 BUG_ON(!cld->ref_rate);
1015 /* init tuning timer */
1016 init_timer(&cld->tune_timer);
1017 cld->tune_timer.function = tune_timer_cb;
1018 cld->tune_timer.data = (unsigned long)cld;
1019 cld->tune_delay = usecs_to_jiffies(CL_DVFS_TUNE_HIGH_DELAY);
1021 /* init calibration timer */
1022 init_timer(&cld->calibration_timer);
1023 cld->calibration_timer.function = calibration_timer_cb;
1024 cld->calibration_timer.data = (unsigned long)cld;
1025 cld->calibration_delay = usecs_to_jiffies(CL_DVFS_CALIBR_TIME);
1027 /* Get ready ouput voltage mapping*/
1028 cl_dvfs_init_maps(cld);
1030 /* Setup output range thresholds */
1031 cl_dvfs_init_output_thresholds(cld);
1033 /* Setup PMU interface */
1034 cl_dvfs_init_out_if(cld);
1036 /* Configure control registers in disabled mode and disable clocks */
1037 cl_dvfs_init_cntrl_logic(cld);
1038 cl_dvfs_disable_clocks(cld);
1044 * Re-initialize and enable target device clock in open loop mode. Called
1045 * directly from SoC clock resume syscore operation. Closed loop will be
1046 * re-entered in platform syscore ops as well.
1048 void tegra_cl_dvfs_resume(struct tegra_cl_dvfs *cld)
1050 enum tegra_cl_dvfs_ctrl_mode mode = cld->mode;
1051 struct dfll_rate_req req = cld->last_req;
1053 cl_dvfs_enable_clocks(cld);
1055 /* Setup PMU interface, and configure controls in disabled mode */
1056 cl_dvfs_init_out_if(cld);
1057 cl_dvfs_init_cntrl_logic(cld);
1059 cl_dvfs_disable_clocks(cld);
1061 /* Restore last request and mode */
1062 cld->last_req = req;
1063 if (mode != TEGRA_CL_DVFS_DISABLED) {
1064 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1065 WARN(mode > TEGRA_CL_DVFS_OPEN_LOOP,
1066 "DFLL was left locked in suspend\n");
1070 #ifdef CONFIG_THERMAL
1071 /* cl_dvfs vmin cooling device */
1072 static int tegra_cl_dvfs_get_vmin_cdev_max_state(
1073 struct thermal_cooling_device *cdev, unsigned long *max_state)
1075 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1076 *max_state = cld->vmin_cdev->trip_temperatures_num;
1080 static int tegra_cl_dvfs_get_vmin_cdev_cur_state(
1081 struct thermal_cooling_device *cdev, unsigned long *cur_state)
1083 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1084 *cur_state = cld->therm_floor_idx;
1088 static int tegra_cl_dvfs_set_vmin_cdev_state(
1089 struct thermal_cooling_device *cdev, unsigned long cur_state)
1091 unsigned long flags;
1092 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1094 clk_lock_save(cld->dfll_clk, &flags);
1096 if (cld->therm_floor_idx != cur_state) {
1097 cld->therm_floor_idx = cur_state;
1098 cl_dvfs_set_dvco_rate_min(cld);
1099 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1100 tegra_cl_dvfs_request_rate(cld,
1101 tegra_cl_dvfs_request_get(cld));
1104 clk_unlock_restore(cld->dfll_clk, &flags);
1108 static struct thermal_cooling_device_ops tegra_cl_dvfs_cooling_ops = {
1109 .get_max_state = tegra_cl_dvfs_get_vmin_cdev_max_state,
1110 .get_cur_state = tegra_cl_dvfs_get_vmin_cdev_cur_state,
1111 .set_cur_state = tegra_cl_dvfs_set_vmin_cdev_state,
1114 static void tegra_cl_dvfs_init_cdev(struct work_struct *work)
1116 struct tegra_cl_dvfs *cld = container_of(
1117 work, struct tegra_cl_dvfs, init_cdev_work);
1119 if (!cld->vmin_cdev)
1122 /* just report error - initialized at WC temperature, anyway */
1123 if (IS_ERR_OR_NULL(thermal_cooling_device_register(
1124 cld->vmin_cdev->cdev_type, (void *)cld,
1125 &tegra_cl_dvfs_cooling_ops))) {
1126 pr_err("tegra cooling device %s failed to register\n",
1127 cld->vmin_cdev->cdev_type);
1130 pr_info("%s cooling device is registered\n", cld->vmin_cdev->cdev_type);
1134 #ifdef CONFIG_PM_SLEEP
1136 * cl_dvfs controls clock/voltage to other devices, including CPU. Therefore,
1137 * cl_dvfs driver pm suspend callback does not stop cl-dvfs operations. It is
1138 * only used to enforce cold volatge limit, since SoC may cool down during
1139 * suspend without waking up. The correct temperature zone after supend will
1140 * be updated via cl_dvfs cooling device interface during resume of temperature
1143 static int tegra_cl_dvfs_suspend_cl(struct device *dev)
1145 unsigned long flags;
1146 struct tegra_cl_dvfs *cld = dev_get_drvdata(dev);
1148 clk_lock_save(cld->dfll_clk, &flags);
1149 cld->therm_floor_idx = 0;
1150 cl_dvfs_set_dvco_rate_min(cld);
1151 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1152 set_cl_config(cld, &cld->last_req);
1153 set_request(cld, &cld->last_req);
1155 clk_unlock_restore(cld->dfll_clk, &flags);
1160 static const struct dev_pm_ops tegra_cl_dvfs_pm_ops = {
1161 .suspend = tegra_cl_dvfs_suspend_cl,
1165 static int __init tegra_cl_dvfs_probe(struct platform_device *pdev)
1168 struct tegra_cl_dvfs_platform_data *p_data;
1169 struct resource *res;
1170 struct tegra_cl_dvfs *cld;
1171 struct clk *ref_clk, *soc_clk, *i2c_clk, *safe_dvfs_clk, *dfll_clk;
1174 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1176 dev_err(&pdev->dev, "missing register base\n");
1180 p_data = pdev->dev.platform_data;
1181 if (!p_data || !p_data->cfg_param || !p_data->vdd_map) {
1182 dev_err(&pdev->dev, "missing platform data\n");
1186 ref_clk = clk_get(&pdev->dev, "ref");
1187 soc_clk = clk_get(&pdev->dev, "soc");
1188 i2c_clk = clk_get(&pdev->dev, "i2c");
1189 safe_dvfs_clk = clk_get(&pdev->dev, "safe_dvfs");
1190 dfll_clk = clk_get(&pdev->dev, p_data->dfll_clk_name);
1191 if (IS_ERR(ref_clk) || IS_ERR(soc_clk) || IS_ERR(i2c_clk)) {
1192 dev_err(&pdev->dev, "missing control clock\n");
1195 if (IS_ERR(safe_dvfs_clk)) {
1196 dev_err(&pdev->dev, "missing safe dvfs source clock\n");
1197 return PTR_ERR(safe_dvfs_clk);
1199 if (IS_ERR(dfll_clk)) {
1200 dev_err(&pdev->dev, "missing target dfll clock\n");
1201 return PTR_ERR(dfll_clk);
1203 if (!safe_dvfs_clk->dvfs || !safe_dvfs_clk->dvfs->dvfs_rail) {
1204 dev_err(&pdev->dev, "invalid safe dvfs source\n");
1208 /* Allocate cl_dvfs object and populate resource accessors */
1209 cld = kzalloc(sizeof(*cld), GFP_KERNEL);
1211 dev_err(&pdev->dev, "failed to allocate cl_dvfs object\n");
1215 cld->cl_base = (u32)IO_ADDRESS(res->start);
1216 cld->p_data = p_data;
1217 cld->ref_clk = ref_clk;
1218 cld->soc_clk = soc_clk;
1219 cld->i2c_clk = i2c_clk;
1220 cld->dfll_clk = dfll_clk;
1221 cld->safe_dvfs = safe_dvfs_clk->dvfs;
1222 #ifdef CONFIG_THERMAL
1223 cld->vmin_cdev = cld->safe_dvfs->dvfs_rail->vmin_cdev;
1224 INIT_WORK(&cld->init_cdev_work, tegra_cl_dvfs_init_cdev);
1226 /* Initialize cl_dvfs */
1227 ret = cl_dvfs_init(cld);
1233 platform_set_drvdata(pdev, cld);
1236 * Schedule cooling device registration as a separate work to address
1237 * the following race: when cl_dvfs is probed the DFLL child clock
1238 * (e.g., CPU) cannot be changed; on the other hand cooling device
1239 * registration will update the entire thermal zone, and may trigger
1240 * rate change of the target clock
1243 schedule_work(&cld->init_cdev_work);
1247 static struct platform_driver tegra_cl_dvfs_driver = {
1249 .name = "tegra_cl_dvfs",
1250 .owner = THIS_MODULE,
1251 #ifdef CONFIG_PM_SLEEP
1252 .pm = &tegra_cl_dvfs_pm_ops,
1257 int __init tegra_init_cl_dvfs(void)
1259 return platform_driver_probe(&tegra_cl_dvfs_driver,
1260 tegra_cl_dvfs_probe);
1266 * - DISABLED: control logic mode - DISABLED, output interface disabled,
1268 * - OPEN_LOOP: control logic mode - OPEN_LOOP, output interface disabled,
1269 * dfll is running "unlocked"
1270 * - CLOSED_LOOP: control logic mode - CLOSED_LOOP, output interface enabled,
1271 * dfll is running "locked"
1274 /* Switch from any other state to DISABLED state */
1275 void tegra_cl_dvfs_disable(struct tegra_cl_dvfs *cld)
1277 switch (cld->mode) {
1278 case TEGRA_CL_DVFS_CLOSED_LOOP:
1279 WARN(1, "DFLL is disabled directly from closed loop mode\n");
1281 output_disable_ol_prepare(cld);
1282 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1283 output_disable_post_ol(cld);
1284 cl_dvfs_disable_clocks(cld);
1287 case TEGRA_CL_DVFS_OPEN_LOOP:
1288 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1289 cl_dvfs_disable_clocks(cld);
1293 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1298 /* Switch from DISABLE state to OPEN_LOOP state */
1299 int tegra_cl_dvfs_enable(struct tegra_cl_dvfs *cld)
1301 if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
1302 pr_err("%s: Cannot enable DFLL in %s mode\n",
1303 __func__, mode_name[cld->mode]);
1307 if (cld->mode != TEGRA_CL_DVFS_DISABLED)
1310 cl_dvfs_enable_clocks(cld);
1311 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1315 /* Switch from OPEN_LOOP state to CLOSED_LOOP state */
1316 int tegra_cl_dvfs_lock(struct tegra_cl_dvfs *cld)
1318 struct dfll_rate_req *req = &cld->last_req;
1320 switch (cld->mode) {
1321 case TEGRA_CL_DVFS_CLOSED_LOOP:
1324 case TEGRA_CL_DVFS_OPEN_LOOP:
1325 if (req->freq == 0) {
1326 pr_err("%s: Cannot lock DFLL at rate 0\n", __func__);
1331 * Update control logic setting with last rate request;
1332 * sync output limits with current tuning and thermal state,
1333 * enable output and switch to closed loop mode.
1335 set_cl_config(cld, req);
1337 set_mode(cld, TEGRA_CL_DVFS_CLOSED_LOOP);
1338 set_request(cld, req);
1339 calibration_timer_update(cld);
1343 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1344 pr_err("%s: Cannot lock DFLL in %s mode\n",
1345 __func__, mode_name[cld->mode]);
1350 /* Switch from CLOSED_LOOP state to OPEN_LOOP state */
1351 int tegra_cl_dvfs_unlock(struct tegra_cl_dvfs *cld)
1355 switch (cld->mode) {
1356 case TEGRA_CL_DVFS_CLOSED_LOOP:
1358 ret = output_disable_ol_prepare(cld);
1359 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1361 ret = output_disable_post_ol(cld);
1364 case TEGRA_CL_DVFS_OPEN_LOOP:
1368 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1369 pr_err("%s: Cannot unlock DFLL in %s mode\n",
1370 __func__, mode_name[cld->mode]);
1376 * Convert requested rate into the control logic settings. In CLOSED_LOOP mode,
1377 * update new settings immediately to adjust DFLL output rate accordingly.
1378 * Otherwise, just save them until next switch to closed loop.
1380 int tegra_cl_dvfs_request_rate(struct tegra_cl_dvfs *cld, unsigned long rate)
1383 struct dfll_rate_req req;
1386 if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
1387 pr_err("%s: Cannot set DFLL rate in %s mode\n",
1388 __func__, mode_name[cld->mode]);
1392 /* Calibrate dfll minimum rate */
1393 cl_dvfs_calibrate(cld);
1395 /* Determine DFLL output scale */
1396 req.scale = SCALE_MAX - 1;
1397 if (rate < cld->dvco_rate_min) {
1398 int scale = DIV_ROUND_CLOSEST((rate / 1000 * SCALE_MAX),
1399 (cld->dvco_rate_min / 1000));
1401 pr_err("%s: Rate %lu is below scalable range\n",
1405 req.scale = scale - 1;
1406 rate = cld->dvco_rate_min;
1409 /* Convert requested rate into frequency request and scale settings */
1410 val = GET_REQUEST_FREQ(rate, cld->ref_rate);
1411 if (val > FREQ_MAX) {
1412 pr_err("%s: Rate %lu is above dfll range\n", __func__, rate);
1416 rate = GET_REQUEST_RATE(val, cld->ref_rate);
1418 /* Find safe voltage for requested rate */
1419 if (find_safe_output(cld, rate, &req.output)) {
1420 pr_err("%s: Failed to find safe output for rate %lu\n",
1424 req.cap = req.output;
1427 * Save validated request, and in CLOSED_LOOP mode actually update
1428 * control logic settings; use request output to set maximum voltage
1429 * limit, but keep one LUT step room above safe voltage
1431 cld->last_req = req;
1433 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1434 set_cl_config(cld, &cld->last_req);
1435 set_request(cld, &cld->last_req);
1440 unsigned long tegra_cl_dvfs_request_get(struct tegra_cl_dvfs *cld)
1442 struct dfll_rate_req *req = &cld->last_req;
1445 * If running below dvco minimum rate with skipper resolution:
1446 * dvco min rate / 256 - return last requested rate rounded to 1kHz.
1447 * If running above dvco minimum, with closed loop resolution:
1448 * ref rate / 2 - return cl_dvfs target rate.
1450 if ((req->scale + 1) < SCALE_MAX)
1451 return req->rate / 1000 * 1000;
1453 return GET_REQUEST_RATE(req->freq, cld->ref_rate);
1456 #ifdef CONFIG_DEBUG_FS
1458 static int lock_get(void *data, u64 *val)
1460 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1461 *val = cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP;
1464 static int lock_set(void *data, u64 val)
1466 struct clk *c = (struct clk *)data;
1467 return tegra_clk_cfg_ex(c, TEGRA_CLK_DFLL_LOCK, val);
1469 DEFINE_SIMPLE_ATTRIBUTE(lock_fops, lock_get, lock_set, "%llu\n");
1471 static int monitor_get(void *data, u64 *val)
1474 unsigned long flags;
1475 struct clk *c = (struct clk *)data;
1476 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1478 clk_enable(cld->soc_clk);
1480 clk_lock_save(c, &flags);
1481 v = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
1482 CL_DVFS_MONITOR_DATA_MASK;
1484 if (cl_dvfs_readl(cld, CL_DVFS_MONITOR_CTRL) ==
1485 CL_DVFS_MONITOR_CTRL_FREQ) {
1486 v = GET_MONITORED_RATE(v, cld->ref_rate);
1487 s = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
1488 s = (s & CL_DVFS_FREQ_REQ_SCALE_MASK) >>
1489 CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1490 *val = (u64)v * (s + 1) / 256;
1492 clk_unlock_restore(c, &flags);
1493 clk_disable(cld->soc_clk);
1498 clk_unlock_restore(c, &flags);
1499 clk_disable(cld->soc_clk);
1502 DEFINE_SIMPLE_ATTRIBUTE(monitor_fops, monitor_get, NULL, "%llu\n");
1504 static int vmin_get(void *data, u64 *val)
1507 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1509 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
1510 clk_enable(cld->soc_clk);
1511 v = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
1512 v = (v & CL_DVFS_OUTPUT_CFG_MIN_MASK) >> CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
1513 clk_disable(cld->soc_clk);
1517 *val = cld->out_map[v]->reg_uV / 1000;
1520 DEFINE_SIMPLE_ATTRIBUTE(vmin_fops, vmin_get, NULL, "%llu\n");
1522 static int tune_high_mv_get(void *data, u64 *val)
1524 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1525 *val = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
1528 static int tune_high_mv_set(void *data, u64 val)
1530 unsigned long flags;
1531 struct clk *c = (struct clk *)data;
1532 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1534 clk_lock_save(c, &flags);
1536 cld->safe_dvfs->dfll_data.tune_high_min_millivolts = val;
1537 cl_dvfs_init_output_thresholds(cld);
1538 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1539 set_cl_config(cld, &cld->last_req);
1540 set_request(cld, &cld->last_req);
1543 clk_unlock_restore(c, &flags);
1546 DEFINE_SIMPLE_ATTRIBUTE(tune_high_mv_fops, tune_high_mv_get, tune_high_mv_set,
1548 static int fmin_get(void *data, u64 *val)
1550 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1551 *val = cld->dvco_rate_min;
1554 DEFINE_SIMPLE_ATTRIBUTE(dvco_rate_min_fops, fmin_get, NULL, "%llu\n");
1556 static int calibr_delay_get(void *data, u64 *val)
1558 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1559 *val = jiffies_to_msecs(cld->calibration_delay);
1562 static int calibr_delay_set(void *data, u64 val)
1564 unsigned long flags;
1565 struct clk *c = (struct clk *)data;
1566 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1568 clk_lock_save(c, &flags);
1569 cld->calibration_delay = msecs_to_jiffies(val);
1570 clk_unlock_restore(c, &flags);
1573 DEFINE_SIMPLE_ATTRIBUTE(calibr_delay_fops, calibr_delay_get, calibr_delay_set,
1576 static int cl_register_show(struct seq_file *s, void *data)
1579 struct clk *c = s->private;
1580 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1582 clk_enable(cld->soc_clk);
1584 seq_printf(s, "CONTROL REGISTERS:\n");
1585 for (offs = 0; offs <= CL_DVFS_MONITOR_DATA; offs += 4)
1586 seq_printf(s, "[0x%02x] = 0x%08x\n",
1587 offs, cl_dvfs_readl(cld, offs));
1589 seq_printf(s, "\nI2C and INTR REGISTERS:\n");
1590 for (offs = CL_DVFS_I2C_CFG; offs <= CL_DVFS_I2C_STS; offs += 4)
1591 seq_printf(s, "[0x%02x] = 0x%08x\n",
1592 offs, cl_dvfs_readl(cld, offs));
1594 offs = CL_DVFS_INTR_STS;
1595 seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
1596 offs = CL_DVFS_INTR_EN;
1597 seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
1598 offs = CL_DVFS_I2C_CLK_DIVISOR;
1599 seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
1601 seq_printf(s, "\nLUT:\n");
1602 for (offs = CL_DVFS_OUTPUT_LUT;
1603 offs < CL_DVFS_OUTPUT_LUT + 4 * MAX_CL_DVFS_VOLTAGES;
1605 seq_printf(s, "[0x%02x] = 0x%08x\n",
1606 offs, cl_dvfs_readl(cld, offs));
1608 clk_disable(cld->soc_clk);
1612 static int cl_register_open(struct inode *inode, struct file *file)
1614 return single_open(file, cl_register_show, inode->i_private);
1617 static ssize_t cl_register_write(struct file *file,
1618 const char __user *userbuf, size_t count, loff_t *ppos)
1623 struct clk *c = file->f_path.dentry->d_inode->i_private;
1624 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1626 if (sizeof(buf) <= count)
1629 if (copy_from_user(buf, userbuf, count))
1632 /* terminate buffer and trim - white spaces may be appended
1633 * at the end when invoked from shell command line */
1637 if (sscanf(buf, "[0x%x] = 0x%x", &offs, &val) != 2)
1640 clk_enable(cld->soc_clk);
1641 cl_dvfs_writel(cld, val, offs & (~0x3));
1642 clk_disable(cld->soc_clk);
1646 static const struct file_operations cl_register_fops = {
1647 .open = cl_register_open,
1649 .write = cl_register_write,
1650 .llseek = seq_lseek,
1651 .release = single_release,
1654 int __init tegra_cl_dvfs_debug_init(struct clk *dfll_clk)
1656 struct dentry *cl_dvfs_dentry;
1658 if (!dfll_clk || !dfll_clk->dent || (dfll_clk->state == UNINITIALIZED))
1661 if (!debugfs_create_file("lock", S_IRUGO | S_IWUSR,
1662 dfll_clk->dent, dfll_clk, &lock_fops))
1665 cl_dvfs_dentry = debugfs_create_dir("cl_dvfs", dfll_clk->dent);
1666 if (!cl_dvfs_dentry)
1669 if (!debugfs_create_file("monitor", S_IRUGO,
1670 cl_dvfs_dentry, dfll_clk, &monitor_fops))
1673 if (!debugfs_create_file("vmin_mv", S_IRUGO,
1674 cl_dvfs_dentry, dfll_clk, &vmin_fops))
1677 if (!debugfs_create_file("tune_high_mv", S_IRUGO | S_IWUSR,
1678 cl_dvfs_dentry, dfll_clk, &tune_high_mv_fops))
1681 if (!debugfs_create_file("dvco_min", S_IRUGO,
1682 cl_dvfs_dentry, dfll_clk, &dvco_rate_min_fops))
1685 if (!debugfs_create_file("calibr_delay", S_IRUGO,
1686 cl_dvfs_dentry, dfll_clk, &calibr_delay_fops))
1689 if (!debugfs_create_file("registers", S_IRUGO | S_IWUSR,
1690 cl_dvfs_dentry, dfll_clk, &cl_register_fops))
1696 debugfs_remove_recursive(dfll_clk->dent);