2 * arch/arm/mach-tegra/tegra_cl_dvfs.c
4 * Copyright (c) 2012-2013 NVIDIA CORPORATION. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/kernel.h>
20 #include <linux/spinlock.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
24 #include <linux/clk.h>
25 #include <linux/interrupt.h>
26 #include <linux/suspend.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/uaccess.h>
30 #include <linux/module.h>
31 #include <linux/platform_device.h>
33 #include <mach/irqs.h>
34 #include <mach/hardware.h>
36 #include "tegra_cl_dvfs.h"
43 #define CL_DVFS_CTRL 0x00
44 #define CL_DVFS_CONFIG 0x04
45 #define CL_DVFS_CONFIG_DIV_MASK 0xff
47 #define CL_DVFS_PARAMS 0x08
48 #define CL_DVFS_PARAMS_CG_SCALE (0x1 << 24)
49 #define CL_DVFS_PARAMS_FORCE_MODE_SHIFT 22
50 #define CL_DVFS_PARAMS_FORCE_MODE_MASK (0x3 << CL_DVFS_PARAMS_FORCE_MODE_SHIFT)
51 #define CL_DVFS_PARAMS_CF_PARAM_SHIFT 16
52 #define CL_DVFS_PARAMS_CF_PARAM_MASK (0x3f << CL_DVFS_PARAMS_CF_PARAM_SHIFT)
53 #define CL_DVFS_PARAMS_CI_PARAM_SHIFT 8
54 #define CL_DVFS_PARAMS_CI_PARAM_MASK (0x7 << CL_DVFS_PARAMS_CI_PARAM_SHIFT)
55 #define CL_DVFS_PARAMS_CG_PARAM_SHIFT 0
56 #define CL_DVFS_PARAMS_CG_PARAM_MASK (0xff << CL_DVFS_PARAMS_CG_PARAM_SHIFT)
58 #define CL_DVFS_TUNE0 0x0c
59 #define CL_DVFS_TUNE1 0x10
61 #define CL_DVFS_FREQ_REQ 0x14
62 #define CL_DVFS_FREQ_REQ_FORCE_ENABLE (0x1 << 28)
63 #define CL_DVFS_FREQ_REQ_FORCE_SHIFT 16
64 #define CL_DVFS_FREQ_REQ_FORCE_MASK (0xfff << CL_DVFS_FREQ_REQ_FORCE_SHIFT)
65 #define FORCE_MAX 2047
66 #define FORCE_MIN -2048
67 #define CL_DVFS_FREQ_REQ_SCALE_SHIFT 8
68 #define CL_DVFS_FREQ_REQ_SCALE_MASK (0xff << CL_DVFS_FREQ_REQ_SCALE_SHIFT)
70 #define CL_DVFS_FREQ_REQ_FREQ_VALID (0x1 << 7)
71 #define CL_DVFS_FREQ_REQ_FREQ_SHIFT 0
72 #define CL_DVFS_FREQ_REQ_FREQ_MASK (0x7f << CL_DVFS_FREQ_REQ_FREQ_SHIFT)
75 #define CL_DVFS_SCALE_RAMP 0x18
77 #define CL_DVFS_DROOP_CTRL 0x1c
78 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT 16
79 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK \
80 (0xff << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT)
81 #define CL_DVFS_DROOP_CTRL_CUT_SHIFT 8
82 #define CL_DVFS_DROOP_CTRL_CUT_MASK (0xf << CL_DVFS_DROOP_CTRL_CUT_SHIFT)
83 #define CL_DVFS_DROOP_CTRL_RAMP_SHIFT 0
84 #define CL_DVFS_DROOP_CTRL_RAMP_MASK (0xff << CL_DVFS_DROOP_CTRL_RAMP_SHIFT)
86 #define CL_DVFS_OUTPUT_CFG 0x20
87 #define CL_DVFS_OUTPUT_CFG_I2C_ENABLE (0x1 << 30)
88 #define CL_DVFS_OUTPUT_CFG_SAFE_SHIFT 24
89 #define CL_DVFS_OUTPUT_CFG_SAFE_MASK \
90 (OUT_MASK << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT)
91 #define CL_DVFS_OUTPUT_CFG_MAX_SHIFT 16
92 #define CL_DVFS_OUTPUT_CFG_MAX_MASK \
93 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MAX_SHIFT)
94 #define CL_DVFS_OUTPUT_CFG_MIN_SHIFT 8
95 #define CL_DVFS_OUTPUT_CFG_MIN_MASK \
96 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MIN_SHIFT)
98 #define CL_DVFS_OUTPUT_FORCE 0x24
99 #define CL_DVFS_MONITOR_CTRL 0x28
100 #define CL_DVFS_MONITOR_CTRL_DISABLE 0
101 #define CL_DVFS_MONITOR_CTRL_FREQ 6
102 #define CL_DVFS_MONITOR_DATA 0x2c
103 #define CL_DVFS_MONITOR_DATA_NEW (0x1 << 16)
104 #define CL_DVFS_MONITOR_DATA_MASK 0xFFFF
106 #define CL_DVFS_I2C_CFG 0x40
107 #define CL_DVFS_I2C_CFG_ARB_ENABLE (0x1 << 20)
108 #define CL_DVFS_I2C_CFG_HS_CODE_SHIFT 16
109 #define CL_DVFS_I2C_CFG_HS_CODE_MASK (0x7 << CL_DVFS_I2C_CFG_HS_CODE_SHIFT)
110 #define CL_DVFS_I2C_CFG_PACKET_ENABLE (0x1 << 15)
111 #define CL_DVFS_I2C_CFG_SIZE_SHIFT 12
112 #define CL_DVFS_I2C_CFG_SIZE_MASK (0x7 << CL_DVFS_I2C_CFG_SIZE_SHIFT)
113 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_10 (0x1 << 10)
114 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT 0
115 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_MASK \
116 (0x3ff << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT)
118 #define CL_DVFS_I2C_VDD_REG_ADDR 0x44
119 #define CL_DVFS_I2C_STS 0x48
120 #define CL_DVFS_I2C_STS_I2C_LAST_SHIFT 1
121 #define CL_DVFS_I2C_STS_I2C_REQ_PENDING 0x1
123 #define CL_DVFS_INTR_STS 0x5c
124 #define CL_DVFS_INTR_EN 0x60
125 #define CL_DVFS_INTR_MIN_MASK 0x1
126 #define CL_DVFS_INTR_MAX_MASK 0x2
128 #define CL_DVFS_I2C_CLK_DIVISOR 0x16c
129 #define CL_DVFS_I2C_CLK_DIVISOR_MASK 0xffff
130 #define CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT 16
131 #define CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT 0
133 #define CL_DVFS_OUTPUT_LUT 0x200
135 #define CL_DVFS_CALIBR_TIME 40000
136 #define CL_DVFS_OUTPUT_PENDING_TIMEOUT 1000
137 #define CL_DVFS_OUTPUT_RAMP_DELAY 100
138 #define CL_DVFS_TUNE_HIGH_DELAY 2000
140 #define CL_DVFS_TUNE_HIGH_MARGIN_MV 20
142 enum tegra_cl_dvfs_ctrl_mode {
143 TEGRA_CL_DVFS_UNINITIALIZED = 0,
144 TEGRA_CL_DVFS_DISABLED = 1,
145 TEGRA_CL_DVFS_OPEN_LOOP = 2,
146 TEGRA_CL_DVFS_CLOSED_LOOP = 3,
149 enum tegra_cl_dvfs_tune_state {
150 TEGRA_CL_DVFS_TUNE_LOW = 0,
151 TEGRA_CL_DVFS_TUNE_HIGH_REQUEST,
152 TEGRA_CL_DVFS_TUNE_HIGH,
155 struct dfll_rate_req {
163 struct tegra_cl_dvfs {
165 struct tegra_cl_dvfs_platform_data *p_data;
167 struct dvfs *safe_dvfs;
168 struct thermal_cooling_device *vmax_cdev;
169 struct thermal_cooling_device *vmin_cdev;
170 struct work_struct init_cdev_work;
175 struct clk *dfll_clk;
176 unsigned long ref_rate;
177 unsigned long i2c_rate;
179 /* output voltage mapping:
180 * legacy dvfs table index -to- cl_dvfs output LUT index
181 * cl_dvfs output LUT index -to- PMU value/voltage pair ptr
183 u8 clk_dvfs_map[MAX_DVFS_FREQS];
184 struct voltage_reg_map *out_map[MAX_CL_DVFS_VOLTAGES];
187 u8 tune_high_out_start;
188 u8 tune_high_out_min;
190 u8 thermal_out_caps[MAX_THERMAL_LIMITS];
191 u8 thermal_out_floors[MAX_THERMAL_LIMITS];
193 int therm_floors_num;
194 unsigned long dvco_rate_min;
201 struct dfll_rate_req last_req;
202 enum tegra_cl_dvfs_tune_state tune_state;
203 enum tegra_cl_dvfs_ctrl_mode mode;
205 struct timer_list tune_timer;
206 unsigned long tune_delay;
207 struct timer_list calibration_timer;
208 unsigned long calibration_delay;
209 ktime_t last_calibration;
210 unsigned long calibration_range_min;
211 unsigned long calibration_range_max;
214 /* Conversion macros (different scales for frequency request, and monitored
215 rate is not a typo) */
216 #define RATE_STEP(cld) ((cld)->ref_rate / 2)
217 #define GET_REQUEST_FREQ(rate, ref_rate) ((rate) / ((ref_rate) / 2))
218 #define GET_REQUEST_RATE(freq, ref_rate) ((freq) * ((ref_rate) / 2))
219 #define GET_MONITORED_RATE(freq, ref_rate) ((freq) * ((ref_rate) / 4))
220 #define GET_DROOP_FREQ(rate, ref_rate) ((rate) / ((ref_rate) / 4))
221 #define ROUND_MIN_RATE(rate, ref_rate) \
222 (DIV_ROUND_UP(rate, (ref_rate) / 2) * ((ref_rate) / 2))
223 #define GET_DIV(ref_rate, out_rate, scale) \
224 DIV_ROUND_UP((ref_rate), (out_rate) * (scale))
226 static const char *mode_name[] = {
227 [TEGRA_CL_DVFS_UNINITIALIZED] = "uninitialized",
228 [TEGRA_CL_DVFS_DISABLED] = "disabled",
229 [TEGRA_CL_DVFS_OPEN_LOOP] = "open_loop",
230 [TEGRA_CL_DVFS_CLOSED_LOOP] = "closed_loop",
233 static inline u32 cl_dvfs_readl(struct tegra_cl_dvfs *cld, u32 offs)
235 return __raw_readl((void *)cld->cl_base + offs);
237 static inline void cl_dvfs_writel(struct tegra_cl_dvfs *cld, u32 val, u32 offs)
239 __raw_writel(val, (void *)cld->cl_base + offs);
241 static inline void cl_dvfs_wmb(struct tegra_cl_dvfs *cld)
244 cl_dvfs_readl(cld, CL_DVFS_CTRL);
247 static inline void invalidate_request(struct tegra_cl_dvfs *cld)
249 u32 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
250 val &= ~CL_DVFS_FREQ_REQ_FREQ_VALID;
251 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
255 static inline int output_enable(struct tegra_cl_dvfs *cld)
257 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
259 /* FIXME: PWM output control */
260 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
261 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
266 static noinline int output_flush_disable(struct tegra_cl_dvfs *cld)
270 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
272 /* Flush transactions in flight, and then disable */
273 for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
274 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
276 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
277 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
278 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
279 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
280 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
282 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
283 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
284 return 0; /* no pending rqst */
286 /* Re-enable, continue wait */
287 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
288 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
294 /* I2C request is still pending - disable, anyway, but report error */
295 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
296 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
301 static noinline int output_disable_flush(struct tegra_cl_dvfs *cld)
305 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
307 /* Disable output interface right away */
308 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
309 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
312 /* Flush possible transaction in flight */
313 for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
314 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
316 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
317 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
318 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
323 /* I2C request is still pending - report error */
327 static inline int output_disable_ol_prepare(struct tegra_cl_dvfs *cld)
329 /* FIXME: PWM output control */
331 * If cl-dvfs h/w does not require output to be quiet before disable,
332 * s/w can stop I2C communications at any time (including operations
333 * in closed loop mode), and I2C bus integrity is guaranteed even in
334 * case of flush timeout.
336 if (!(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET)) {
337 int ret = output_disable_flush(cld);
339 pr_debug("cl_dvfs: I2C pending timeout ol_prepare\n");
345 static inline int output_disable_post_ol(struct tegra_cl_dvfs *cld)
347 /* FIXME: PWM output control */
349 * If cl-dvfs h/w requires output to be quiet before disable, s/w
350 * should stop I2C communications only after the switch to open loop
351 * mode, and I2C bus integrity is not guaranteed in case of flush
354 if (cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) {
355 int ret = output_flush_disable(cld);
357 pr_err("cl_dvfs: I2C pending timeout post_ol\n");
363 static inline void set_mode(struct tegra_cl_dvfs *cld,
364 enum tegra_cl_dvfs_ctrl_mode mode)
367 cl_dvfs_writel(cld, mode - 1, CL_DVFS_CTRL);
371 static inline u8 get_output_cap(struct tegra_cl_dvfs *cld,
372 struct dfll_rate_req *req)
374 u32 thermal_cap = cld->num_voltages - 1;
376 if (cld->therm_cap_idx && (cld->therm_cap_idx <= cld->therm_caps_num))
377 thermal_cap = cld->thermal_out_caps[cld->therm_cap_idx - 1];
378 if (req && (req->cap < thermal_cap))
383 static inline u8 get_output_min(struct tegra_cl_dvfs *cld)
385 u32 tune_min, thermal_min;
387 tune_min = cld->tune_state == TEGRA_CL_DVFS_TUNE_LOW ?
388 0 : cld->tune_high_out_min;
390 if (cld->therm_floor_idx < cld->therm_floors_num)
391 thermal_min = cld->thermal_out_floors[cld->therm_floor_idx];
393 return max(tune_min, thermal_min);
396 static inline void _load_lut(struct tegra_cl_dvfs *cld)
401 val = cld->out_map[cld->lut_min]->reg_value;
402 for (i = 0; i <= cld->lut_min; i++)
403 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
405 for (; i < cld->lut_max; i++) {
406 val = cld->out_map[i]->reg_value;
407 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
410 val = cld->out_map[cld->lut_max]->reg_value;
411 for (; i < cld->num_voltages; i++)
412 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
417 static void cl_dvfs_load_lut(struct tegra_cl_dvfs *cld)
419 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
420 bool disable_out_for_load =
421 !(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) &&
422 (val & CL_DVFS_OUTPUT_CFG_I2C_ENABLE);
424 if (disable_out_for_load) {
425 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
426 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
428 udelay(2); /* 2us (big margin) window for disable propafation */
433 if (disable_out_for_load) {
434 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
435 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
440 #define set_tune_state(cld, state) \
442 cld->tune_state = state; \
443 pr_debug("%s: set tune state %d\n", __func__, state); \
446 static inline void tune_low(struct tegra_cl_dvfs *cld)
448 /* a must order: 1st tune dfll low, then tune trimmers low */
449 cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
451 if (cld->safe_dvfs->dfll_data.tune_trimmers)
452 cld->safe_dvfs->dfll_data.tune_trimmers(false);
455 static inline void tune_high(struct tegra_cl_dvfs *cld)
457 /* a must order: 1st tune trimmers high, then tune dfll high */
458 if (cld->safe_dvfs->dfll_data.tune_trimmers)
459 cld->safe_dvfs->dfll_data.tune_trimmers(true);
460 cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0_high_mv,
465 static void set_ol_config(struct tegra_cl_dvfs *cld)
469 /* always tune low (safe) in open loop */
470 if (cld->tune_state != TEGRA_CL_DVFS_TUNE_LOW) {
471 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
474 out_min = get_output_min(cld);
475 if (cld->lut_min != out_min) {
476 cld->lut_min = out_min;
477 if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
478 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
479 val &= ~CL_DVFS_OUTPUT_CFG_MIN_MASK;
480 val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
481 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
483 cl_dvfs_load_lut(cld);
488 /* 1:1 scaling in open loop */
489 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
490 val |= (SCALE_MAX - 1) << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
491 val &= ~CL_DVFS_FREQ_REQ_FORCE_ENABLE;
492 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
495 static void set_cl_config(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
497 u32 out_max, out_min;
498 u32 out_cap = get_output_cap(cld, req);
500 switch (cld->tune_state) {
501 case TEGRA_CL_DVFS_TUNE_LOW:
502 if (out_cap > cld->tune_high_out_start) {
503 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH_REQUEST);
504 mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
508 case TEGRA_CL_DVFS_TUNE_HIGH:
509 case TEGRA_CL_DVFS_TUNE_HIGH_REQUEST:
510 if (out_cap <= cld->tune_high_out_start) {
511 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
519 out_min = get_output_min(cld);
520 if (out_cap > (out_min + 1))
521 req->output = out_cap - 1;
523 req->output = out_min + 1;
524 if (req->output == cld->safe_output)
526 out_max = max((u8)(req->output + 1), cld->minimax_output);
527 out_max = max((u8)(out_max), cld->force_out_min);
529 if ((cld->lut_min != out_min) || (cld->lut_max != out_max)) {
530 cld->lut_min = out_min;
531 cld->lut_max = out_max;
532 if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
533 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
534 val &= ~(CL_DVFS_OUTPUT_CFG_MAX_MASK |
535 CL_DVFS_OUTPUT_CFG_MIN_MASK);
536 val |= out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT;
537 val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
538 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
540 cl_dvfs_load_lut(cld);
545 static void tune_timer_cb(unsigned long data)
548 u32 val, out_min, out_last;
549 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
551 clk_lock_save(cld->dfll_clk, &flags);
553 /* FIXME: PWM output control */
554 if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH_REQUEST) {
555 out_min = cld->lut_min;
556 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
557 out_last = (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK;
559 if (!(val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) &&
560 (out_last >= cld->tune_high_out_min) &&
561 (out_min >= cld->tune_high_out_min)) {
562 udelay(CL_DVFS_OUTPUT_RAMP_DELAY);
563 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH);
566 mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
569 clk_unlock_restore(cld->dfll_clk, &flags);
572 static inline void calibration_timer_update(struct tegra_cl_dvfs *cld)
574 if (!cld->calibration_delay)
576 mod_timer(&cld->calibration_timer, jiffies + cld->calibration_delay);
579 static void cl_dvfs_calibrate(struct tegra_cl_dvfs *cld)
584 u8 out_min = get_output_min(cld);
587 * Enter calibration procedure only if
588 * - closed loop operations
589 * - last request engaged clock skipper
590 * - at least specified time after the last calibration attempt
592 if ((cld->mode != TEGRA_CL_DVFS_CLOSED_LOOP) ||
593 (cld->last_req.rate > cld->dvco_rate_min))
597 if (ktime_us_delta(now, cld->last_calibration) < CL_DVFS_CALIBR_TIME)
599 cld->last_calibration = now;
601 if (cl_dvfs_readl(cld, CL_DVFS_MONITOR_CTRL) !=
602 CL_DVFS_MONITOR_CTRL_FREQ)
603 cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ,
604 CL_DVFS_MONITOR_CTRL);
606 /* Synchronize with sample period, and get rate measurements */
607 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
609 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
610 } while (!(data & CL_DVFS_MONITOR_DATA_NEW));
612 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
613 } while (!(data & CL_DVFS_MONITOR_DATA_NEW));
615 /* Defer calibration if I2C transaction is pending */
616 /* FIXME: PWM output control */
617 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
618 if (val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) {
619 calibration_timer_update(cld);
623 /* Adjust minimum rate */
624 data &= CL_DVFS_MONITOR_DATA_MASK;
625 data = GET_MONITORED_RATE(data, cld->ref_rate);
626 if ((val > out_min) || (data < (cld->dvco_rate_min - RATE_STEP(cld))))
627 cld->dvco_rate_min -= RATE_STEP(cld);
628 else if (data > (cld->dvco_rate_min + RATE_STEP(cld)))
629 cld->dvco_rate_min += RATE_STEP(cld);
633 cld->dvco_rate_min = clamp(cld->dvco_rate_min,
634 cld->calibration_range_min, cld->calibration_range_max);
635 calibration_timer_update(cld);
636 pr_debug("%s: calibrated dvco_rate_min %lu\n",
637 __func__, cld->dvco_rate_min);
640 static void calibration_timer_cb(unsigned long data)
643 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
645 pr_debug("%s\n", __func__);
647 clk_lock_save(cld->dfll_clk, &flags);
648 cl_dvfs_calibrate(cld);
649 clk_unlock_restore(cld->dfll_clk, &flags);
652 static void set_request(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
655 int force_val = req->output - cld->safe_output;
656 int coef = 128; /* FIXME: cld->p_data->cfg_param->cg_scale? */;
658 /* If going down apply force output floor */
659 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
660 f = (val & CL_DVFS_FREQ_REQ_FREQ_MASK) >> CL_DVFS_FREQ_REQ_FREQ_SHIFT;
661 if ((!(val & CL_DVFS_FREQ_REQ_FREQ_VALID) || (f > req->freq)) &&
662 (cld->force_out_min > req->output))
663 force_val = cld->force_out_min - cld->safe_output;
665 force_val = force_val * coef / cld->p_data->cfg_param->cg;
666 force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
669 * 1st set new frequency request and force values, then set force enable
670 * bit (if not set already). Use same CL_DVFS_FREQ_REQ register read
671 * (not other cl_dvfs register) plus explicit delay as a fence.
673 val &= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
674 val |= req->freq << CL_DVFS_FREQ_REQ_FREQ_SHIFT;
675 val |= req->scale << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
676 val |= ((u32)force_val << CL_DVFS_FREQ_REQ_FORCE_SHIFT) &
677 CL_DVFS_FREQ_REQ_FORCE_MASK;
678 val |= CL_DVFS_FREQ_REQ_FREQ_VALID;
679 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
681 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
683 if (!(val & CL_DVFS_FREQ_REQ_FORCE_ENABLE)) {
684 udelay(1); /* 1us (big margin) window for force value settle */
685 val |= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
686 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
691 static u8 find_mv_out_cap(struct tegra_cl_dvfs *cld, int mv)
696 for (cap = 0; cap < cld->num_voltages; cap++) {
697 uv = cld->out_map[cap]->reg_uV;
701 return cap - 1; /* maximum possible output */
704 static u8 find_mv_out_floor(struct tegra_cl_dvfs *cld, int mv)
709 for (floor = 0; floor < cld->num_voltages; floor++) {
710 uv = cld->out_map[floor]->reg_uV;
711 if (uv > mv * 1000) {
713 return 0; /* minimum possible output */
720 static int find_safe_output(
721 struct tegra_cl_dvfs *cld, unsigned long rate, u8 *safe_output)
724 int n = cld->safe_dvfs->num_freqs;
725 unsigned long *freqs = cld->safe_dvfs->freqs;
727 for (i = 0; i < n; i++) {
728 if (freqs[i] >= rate) {
729 *safe_output = cld->clk_dvfs_map[i];
736 static unsigned long find_dvco_rate_min(struct tegra_cl_dvfs *cld, u8 out_min)
740 for (i = 0; i < cld->safe_dvfs->num_freqs; i++) {
741 if (cld->clk_dvfs_map[i] > out_min)
745 return cld->safe_dvfs->freqs[i];
748 static void cl_dvfs_set_dvco_rate_min(struct tegra_cl_dvfs *cld)
750 unsigned long rate = cld->safe_dvfs->dfll_data.out_rate_min;
751 if (cld->therm_floor_idx < cld->therm_floors_num)
752 rate = find_dvco_rate_min(
753 cld, cld->thermal_out_floors[cld->therm_floor_idx]);
755 /* round minimum rate to request unit (ref_rate/2) boundary */
756 cld->dvco_rate_min = ROUND_MIN_RATE(rate, cld->ref_rate);
758 /* dvco min rate is under-estimated - skewed range up */
759 cld->calibration_range_min = cld->dvco_rate_min - 4 * RATE_STEP(cld);
760 cld->calibration_range_max = cld->dvco_rate_min + 8 * RATE_STEP(cld);
763 static void cl_dvfs_set_force_out_min(struct tegra_cl_dvfs *cld)
765 u8 force_out_min = 0;
766 int force_mv_min = cld->p_data->pmu_undershoot_gb;
769 cld->force_out_min = 0;
773 if (cld->therm_floor_idx < cld->therm_floors_num)
774 force_out_min = cld->thermal_out_floors[cld->therm_floor_idx];
775 force_mv_min += cld->out_map[force_out_min]->reg_uV / 1000;
776 force_out_min = find_mv_out_cap(cld, force_mv_min);
777 if (force_out_min == cld->safe_output)
779 cld->force_out_min = force_out_min;
782 static struct voltage_reg_map *find_vdd_map_entry(
783 struct tegra_cl_dvfs *cld, int mV, bool exact)
787 for (i = 0; i < cld->p_data->vdd_map_size; i++) {
788 /* round down to 1mV */
789 reg_mV = cld->p_data->vdd_map[i].reg_uV / 1000;
794 if (i < cld->p_data->vdd_map_size) {
795 if (!exact || (mV == reg_mV))
796 return &cld->p_data->vdd_map[i];
801 static void cl_dvfs_init_maps(struct tegra_cl_dvfs *cld)
803 int i, j, v, v_max, n;
804 const int *millivolts;
805 struct voltage_reg_map *m;
807 BUILD_BUG_ON(MAX_CL_DVFS_VOLTAGES > OUT_MASK + 1);
809 n = cld->safe_dvfs->num_freqs;
810 BUG_ON(n >= MAX_CL_DVFS_VOLTAGES);
812 millivolts = cld->safe_dvfs->dfll_millivolts;
813 v_max = millivolts[n - 1];
815 v = cld->safe_dvfs->dfll_data.min_millivolts;
816 BUG_ON(v > millivolts[0]);
818 cld->out_map[0] = find_vdd_map_entry(cld, v, true);
819 BUG_ON(!cld->out_map[0]);
821 for (i = 0, j = 1; i < n; i++) {
823 v += max(1, (v_max - v) / (MAX_CL_DVFS_VOLTAGES - j));
824 if (v >= millivolts[i])
827 m = find_vdd_map_entry(cld, v, false);
829 if (m != cld->out_map[j - 1])
830 cld->out_map[j++] = m;
834 m = find_vdd_map_entry(cld, v, true);
836 if (m != cld->out_map[j - 1])
837 cld->out_map[j++] = m;
838 cld->clk_dvfs_map[i] = j - 1;
840 BUG_ON(j > MAX_CL_DVFS_VOLTAGES);
841 cld->num_voltages = j;
844 static void cl_dvfs_init_tuning_thresholds(struct tegra_cl_dvfs *cld)
849 * Convert high tuning voltage threshold into output LUT index, and
850 * add necessary margin. If voltage threshold is outside operating
851 * range set it at maximum output level to effectively disable tuning
852 * parameters adjustment.
854 cld->tune_high_out_min = cld->num_voltages - 1;
855 cld->tune_high_out_start = cld->num_voltages - 1;
856 mv = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
857 if (mv >= cld->safe_dvfs->dfll_data.min_millivolts) {
858 u8 out_min = find_mv_out_cap(cld, mv);
859 u8 out_start = find_mv_out_cap(
860 cld, mv + CL_DVFS_TUNE_HIGH_MARGIN_MV);
861 out_start = max(out_start, (u8)(out_min + 1));
862 if ((out_start + 1) < cld->num_voltages) {
863 cld->tune_high_out_min = out_min;
864 cld->tune_high_out_start = out_start;
865 if (cld->minimax_output <= out_start)
866 cld->minimax_output = out_start + 1;
871 static void cl_dvfs_init_hot_output_cap(struct tegra_cl_dvfs *cld)
874 if (!cld->safe_dvfs->dvfs_rail->therm_mv_caps ||
875 !cld->safe_dvfs->dvfs_rail->therm_mv_caps_num)
878 if (!cld->safe_dvfs->dvfs_rail->vmax_cdev)
879 WARN(1, "%s: missing dfll cap cooling device\n",
880 cld->safe_dvfs->dvfs_rail->reg_id);
882 * Convert monotonically decreasing thermal caps at high temperature
883 * into output LUT indexes; make sure there is a room for regulation
884 * below minimum thermal cap.
886 cld->therm_caps_num = cld->safe_dvfs->dvfs_rail->therm_mv_caps_num;
887 for (i = 0; i < cld->therm_caps_num; i++) {
888 cld->thermal_out_caps[i] = find_mv_out_floor(
889 cld, cld->safe_dvfs->dvfs_rail->therm_mv_caps[i]);
891 BUG_ON(cld->thermal_out_caps[cld->therm_caps_num - 1] <
892 cld->minimax_output);
895 static void cl_dvfs_init_cold_output_floor(struct tegra_cl_dvfs *cld)
898 if (!cld->safe_dvfs->dvfs_rail->therm_mv_floors ||
899 !cld->safe_dvfs->dvfs_rail->therm_mv_floors_num)
902 if (!cld->safe_dvfs->dvfs_rail->vmin_cdev)
903 WARN(1, "%s: missing dfll floor cooling device\n",
904 cld->safe_dvfs->dvfs_rail->reg_id);
906 * Convert monotonically decreasing thermal floors at low temperature
907 * into output LUT indexes; make sure there is a room for regulation
908 * above maximum thermal floor.
910 cld->therm_floors_num = cld->safe_dvfs->dvfs_rail->therm_mv_floors_num;
911 for (i = 0; i < cld->therm_floors_num; i++) {
912 cld->thermal_out_floors[i] = find_mv_out_cap(
913 cld, cld->safe_dvfs->dvfs_rail->therm_mv_floors[i]);
915 BUG_ON(cld->thermal_out_floors[0] + 2 >= cld->num_voltages);
916 if (cld->minimax_output <= cld->thermal_out_floors[0])
917 cld->minimax_output = cld->thermal_out_floors[0] + 1;
920 static void cl_dvfs_init_output_thresholds(struct tegra_cl_dvfs *cld)
922 cld->minimax_output = 0;
923 cl_dvfs_init_tuning_thresholds(cld);
924 cl_dvfs_init_cold_output_floor(cld);
926 /* make sure safe output is safe at any temperature */
927 cld->safe_output = cld->thermal_out_floors[0] ? : 1;
928 if (cld->minimax_output <= cld->safe_output)
929 cld->minimax_output = cld->safe_output + 1;
931 /* init caps after minimax output is determined */
932 cl_dvfs_init_hot_output_cap(cld);
935 static void cl_dvfs_init_pwm_if(struct tegra_cl_dvfs *cld)
937 /* FIXME: not supported */
940 static void cl_dvfs_init_i2c_if(struct tegra_cl_dvfs *cld)
943 struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
944 bool hs_mode = p_data->u.pmu_i2c.hs_rate;
946 /* PMU slave address, vdd register offset, and transfer mode */
947 val = p_data->u.pmu_i2c.slave_addr << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT;
948 if (p_data->u.pmu_i2c.addr_10)
949 val |= CL_DVFS_I2C_CFG_SLAVE_ADDR_10;
951 val |= p_data->u.pmu_i2c.hs_master_code <<
952 CL_DVFS_I2C_CFG_HS_CODE_SHIFT;
953 val |= CL_DVFS_I2C_CFG_PACKET_ENABLE;
955 val |= CL_DVFS_I2C_CFG_SIZE_MASK;
956 val |= CL_DVFS_I2C_CFG_ARB_ENABLE;
957 cl_dvfs_writel(cld, val, CL_DVFS_I2C_CFG);
958 cl_dvfs_writel(cld, p_data->u.pmu_i2c.reg, CL_DVFS_I2C_VDD_REG_ADDR);
961 val = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.fs_rate, 8);
962 BUG_ON(!val || (val > CL_DVFS_I2C_CLK_DIVISOR_MASK));
963 val = (val - 1) << CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT;
965 div = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.hs_rate, 12);
966 BUG_ON(!div || (div > CL_DVFS_I2C_CLK_DIVISOR_MASK));
968 div = 2; /* default hs divisor just in case */
970 val |= (div - 1) << CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT;
971 cl_dvfs_writel(cld, val, CL_DVFS_I2C_CLK_DIVISOR);
975 static void cl_dvfs_init_out_if(struct tegra_cl_dvfs *cld)
977 u32 val, out_min, out_max;
980 * Disable output, and set safe voltage and output limits;
981 * disable and clear limit interrupts.
983 cld->tune_state = TEGRA_CL_DVFS_TUNE_LOW;
984 cld->therm_cap_idx = cld->therm_caps_num;
985 cld->therm_floor_idx = 0;
986 cl_dvfs_set_dvco_rate_min(cld);
987 cl_dvfs_set_force_out_min(cld);
989 if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
991 * If h/w supports dynamic chanage of output register, limit
992 * LUT * index range using cl_dvfs h/w controls, and load full
993 * range LUT table once.
995 out_min = get_output_min(cld);
996 out_max = get_output_cap(cld, NULL);
998 cld->lut_max = cld->num_voltages - 1;
1001 * Allow the entire range of LUT indexes, but limit output
1002 * voltage in LUT mapping (this "indirect" application of limits
1003 * is used, because h/w does not support dynamic change of index
1004 * limits, but dynamic reload of LUT is fine).
1007 out_max = cld->num_voltages - 1;
1008 cld->lut_min = get_output_min(cld);
1009 cld->lut_max = get_output_cap(cld, NULL);
1012 val = (cld->safe_output << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT) |
1013 (out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT) |
1014 (out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT);
1015 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
1018 cl_dvfs_writel(cld, 0, CL_DVFS_OUTPUT_FORCE);
1019 cl_dvfs_writel(cld, 0, CL_DVFS_INTR_EN);
1020 cl_dvfs_writel(cld, CL_DVFS_INTR_MAX_MASK | CL_DVFS_INTR_MIN_MASK,
1023 /* fill in LUT table */
1024 cl_dvfs_load_lut(cld);
1025 if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
1026 /* dynamic update of output register allowed - no need to reload
1027 lut - use lut limits as output register setting shadow */
1028 cld->lut_min = out_min;
1029 cld->lut_max = out_max;
1032 /* configure transport */
1033 if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
1034 cl_dvfs_init_i2c_if(cld);
1036 cl_dvfs_init_pwm_if(cld);
1039 static void cl_dvfs_init_cntrl_logic(struct tegra_cl_dvfs *cld)
1042 struct tegra_cl_dvfs_cfg_param *param = cld->p_data->cfg_param;
1044 /* configure mode, control loop parameters, DFLL tuning */
1045 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1047 val = GET_DIV(cld->ref_rate, param->sample_rate, 32);
1048 BUG_ON(val > CL_DVFS_CONFIG_DIV_MASK);
1049 cl_dvfs_writel(cld, val, CL_DVFS_CONFIG);
1051 val = (param->force_mode << CL_DVFS_PARAMS_FORCE_MODE_SHIFT) |
1052 (param->cf << CL_DVFS_PARAMS_CF_PARAM_SHIFT) |
1053 (param->ci << CL_DVFS_PARAMS_CI_PARAM_SHIFT) |
1054 ((u8)param->cg << CL_DVFS_PARAMS_CG_PARAM_SHIFT) |
1055 (param->cg_scale ? CL_DVFS_PARAMS_CG_SCALE : 0);
1056 cl_dvfs_writel(cld, val, CL_DVFS_PARAMS);
1058 cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
1059 cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune1, CL_DVFS_TUNE1);
1061 if (cld->safe_dvfs->dfll_data.tune_trimmers)
1062 cld->safe_dvfs->dfll_data.tune_trimmers(false);
1064 /* configure droop (skipper 1) and scale (skipper 2) */
1065 val = GET_DROOP_FREQ(cld->safe_dvfs->dfll_data.droop_rate_min,
1066 cld->ref_rate) << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT;
1067 BUG_ON(val > CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK);
1068 val |= (param->droop_cut_value << CL_DVFS_DROOP_CTRL_CUT_SHIFT);
1069 val |= (param->droop_restore_ramp << CL_DVFS_DROOP_CTRL_RAMP_SHIFT);
1070 cl_dvfs_writel(cld, val, CL_DVFS_DROOP_CTRL);
1072 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ) &
1073 CL_DVFS_FREQ_REQ_SCALE_MASK;
1074 cld->last_req.scale = val >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1075 cld->last_req.cap = 0;
1076 cld->last_req.freq = 0;
1077 cld->last_req.output = 0;
1078 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
1079 cl_dvfs_writel(cld, param->scale_out_ramp, CL_DVFS_SCALE_RAMP);
1081 /* select frequency for monitoring */
1082 cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ, CL_DVFS_MONITOR_CTRL);
1086 static int cl_dvfs_enable_clocks(struct tegra_cl_dvfs *cld)
1088 if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
1089 clk_enable(cld->i2c_clk);
1091 clk_enable(cld->ref_clk);
1092 clk_enable(cld->soc_clk);
1096 static void cl_dvfs_disable_clocks(struct tegra_cl_dvfs *cld)
1098 if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
1099 clk_disable(cld->i2c_clk);
1101 clk_disable(cld->ref_clk);
1102 clk_disable(cld->soc_clk);
1105 static int cl_dvfs_init(struct tegra_cl_dvfs *cld)
1109 /* Enable output inerface clock */
1110 if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C) {
1111 ret = clk_enable(cld->i2c_clk);
1113 pr_err("%s: Failed to enable %s\n",
1114 __func__, cld->i2c_clk->name);
1117 cld->i2c_rate = clk_get_rate(cld->i2c_clk);
1119 pr_err("%s: PMU interface is not I2C\n", __func__);
1123 /* Enable module clocks, release control logic reset */
1124 ret = clk_enable(cld->ref_clk);
1126 pr_err("%s: Failed to enable %s\n",
1127 __func__, cld->ref_clk->name);
1130 ret = clk_enable(cld->soc_clk);
1132 pr_err("%s: Failed to enable %s\n",
1133 __func__, cld->ref_clk->name);
1136 cld->ref_rate = clk_get_rate(cld->ref_clk);
1137 BUG_ON(!cld->ref_rate);
1139 /* init tuning timer */
1140 init_timer(&cld->tune_timer);
1141 cld->tune_timer.function = tune_timer_cb;
1142 cld->tune_timer.data = (unsigned long)cld;
1143 cld->tune_delay = usecs_to_jiffies(CL_DVFS_TUNE_HIGH_DELAY);
1145 /* init calibration timer */
1146 init_timer_deferrable(&cld->calibration_timer);
1147 cld->calibration_timer.function = calibration_timer_cb;
1148 cld->calibration_timer.data = (unsigned long)cld;
1149 cld->calibration_delay = usecs_to_jiffies(CL_DVFS_CALIBR_TIME);
1151 /* Get ready ouput voltage mapping*/
1152 cl_dvfs_init_maps(cld);
1154 /* Setup output range thresholds */
1155 cl_dvfs_init_output_thresholds(cld);
1157 /* Setup PMU interface */
1158 cl_dvfs_init_out_if(cld);
1160 /* Configure control registers in disabled mode and disable clocks */
1161 cl_dvfs_init_cntrl_logic(cld);
1162 cl_dvfs_disable_clocks(cld);
1168 * Re-initialize and enable target device clock in open loop mode. Called
1169 * directly from SoC clock resume syscore operation. Closed loop will be
1170 * re-entered in platform syscore ops as well.
1172 void tegra_cl_dvfs_resume(struct tegra_cl_dvfs *cld)
1174 enum tegra_cl_dvfs_ctrl_mode mode = cld->mode;
1175 struct dfll_rate_req req = cld->last_req;
1177 cl_dvfs_enable_clocks(cld);
1179 /* Setup PMU interface, and configure controls in disabled mode */
1180 cl_dvfs_init_out_if(cld);
1181 cl_dvfs_init_cntrl_logic(cld);
1183 cl_dvfs_disable_clocks(cld);
1185 /* Restore last request and mode */
1186 cld->last_req = req;
1187 if (mode != TEGRA_CL_DVFS_DISABLED) {
1188 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1189 WARN(mode > TEGRA_CL_DVFS_OPEN_LOOP,
1190 "DFLL was left locked in suspend\n");
1194 #ifdef CONFIG_THERMAL
1195 /* cl_dvfs cap cooling device */
1196 static int tegra_cl_dvfs_get_vmax_cdev_max_state(
1197 struct thermal_cooling_device *cdev, unsigned long *max_state)
1199 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1200 *max_state = cld->therm_caps_num;
1204 static int tegra_cl_dvfs_get_vmax_cdev_cur_state(
1205 struct thermal_cooling_device *cdev, unsigned long *cur_state)
1207 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1208 *cur_state = cld->therm_cap_idx;
1212 static int tegra_cl_dvfs_set_vmax_cdev_state(
1213 struct thermal_cooling_device *cdev, unsigned long cur_state)
1215 unsigned long flags;
1216 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1218 clk_lock_save(cld->dfll_clk, &flags);
1220 if (cld->therm_cap_idx != cur_state) {
1221 cld->therm_cap_idx = cur_state;
1222 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1223 tegra_cl_dvfs_request_rate(cld,
1224 tegra_cl_dvfs_request_get(cld));
1227 clk_unlock_restore(cld->dfll_clk, &flags);
1231 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmax_cool_ops = {
1232 .get_max_state = tegra_cl_dvfs_get_vmax_cdev_max_state,
1233 .get_cur_state = tegra_cl_dvfs_get_vmax_cdev_cur_state,
1234 .set_cur_state = tegra_cl_dvfs_set_vmax_cdev_state,
1237 /* cl_dvfs vmin cooling device */
1238 static int tegra_cl_dvfs_get_vmin_cdev_max_state(
1239 struct thermal_cooling_device *cdev, unsigned long *max_state)
1241 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1242 *max_state = cld->therm_floors_num;
1246 static int tegra_cl_dvfs_get_vmin_cdev_cur_state(
1247 struct thermal_cooling_device *cdev, unsigned long *cur_state)
1249 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1250 *cur_state = cld->therm_floor_idx;
1254 static int tegra_cl_dvfs_set_vmin_cdev_state(
1255 struct thermal_cooling_device *cdev, unsigned long cur_state)
1257 unsigned long flags;
1258 struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1260 clk_lock_save(cld->dfll_clk, &flags);
1262 if (cld->therm_floor_idx != cur_state) {
1263 cld->therm_floor_idx = cur_state;
1264 cl_dvfs_set_dvco_rate_min(cld);
1265 cl_dvfs_set_force_out_min(cld);
1266 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1267 tegra_cl_dvfs_request_rate(cld,
1268 tegra_cl_dvfs_request_get(cld));
1271 clk_unlock_restore(cld->dfll_clk, &flags);
1275 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmin_cool_ops = {
1276 .get_max_state = tegra_cl_dvfs_get_vmin_cdev_max_state,
1277 .get_cur_state = tegra_cl_dvfs_get_vmin_cdev_cur_state,
1278 .set_cur_state = tegra_cl_dvfs_set_vmin_cdev_state,
1281 static void tegra_cl_dvfs_init_cdev(struct work_struct *work)
1283 struct tegra_cl_dvfs *cld = container_of(
1284 work, struct tegra_cl_dvfs, init_cdev_work);
1286 /* just report error - initialized at WC temperature, anyway */
1287 if (cld->safe_dvfs->dvfs_rail->vmin_cdev) {
1288 char *type = cld->safe_dvfs->dvfs_rail->vmin_cdev->cdev_type;
1289 cld->vmin_cdev = thermal_cooling_device_register(
1290 type, (void *)cld, &tegra_cl_dvfs_vmin_cool_ops);
1291 if (IS_ERR_OR_NULL(cld->vmin_cdev)) {
1292 cld->vmin_cdev = NULL;
1293 pr_err("tegra cooling device %s failed to register\n",
1297 pr_info("%s cooling device is registered\n", type);
1300 if (cld->safe_dvfs->dvfs_rail->vmax_cdev) {
1301 char *type = cld->safe_dvfs->dvfs_rail->vmax_cdev->cdev_type;
1302 cld->vmax_cdev = thermal_cooling_device_register(
1303 type, (void *)cld, &tegra_cl_dvfs_vmax_cool_ops);
1304 if (IS_ERR_OR_NULL(cld->vmax_cdev)) {
1305 cld->vmax_cdev = NULL;
1306 pr_err("tegra cooling device %s failed to register\n",
1310 pr_info("%s cooling device is registered\n", type);
1315 #ifdef CONFIG_PM_SLEEP
1317 * cl_dvfs controls clock/voltage to other devices, including CPU. Therefore,
1318 * cl_dvfs driver pm suspend callback does not stop cl-dvfs operations. It is
1319 * only used to enforce cold/hot volatge limit, since temperature may change in
1320 * suspend without waking up. The correct temperature zone after supend will
1321 * be updated via cl_dvfs cooling device interface during resume of temperature
1324 static int tegra_cl_dvfs_suspend_cl(struct device *dev)
1326 unsigned long flags;
1327 struct tegra_cl_dvfs *cld = dev_get_drvdata(dev);
1329 clk_lock_save(cld->dfll_clk, &flags);
1331 cld->vmax_cdev->updated = false;
1332 cld->therm_cap_idx = cld->therm_caps_num;
1334 cld->vmin_cdev->updated = false;
1335 cld->therm_floor_idx = 0;
1336 cl_dvfs_set_dvco_rate_min(cld);
1337 cl_dvfs_set_force_out_min(cld);
1338 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1339 set_cl_config(cld, &cld->last_req);
1340 set_request(cld, &cld->last_req);
1342 clk_unlock_restore(cld->dfll_clk, &flags);
1347 static const struct dev_pm_ops tegra_cl_dvfs_pm_ops = {
1348 .suspend = tegra_cl_dvfs_suspend_cl,
1352 static int __init tegra_cl_dvfs_probe(struct platform_device *pdev)
1355 struct tegra_cl_dvfs_platform_data *p_data;
1356 struct resource *res;
1357 struct tegra_cl_dvfs *cld;
1358 struct clk *ref_clk, *soc_clk, *i2c_clk, *safe_dvfs_clk, *dfll_clk;
1361 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1363 dev_err(&pdev->dev, "missing register base\n");
1367 p_data = pdev->dev.platform_data;
1368 if (!p_data || !p_data->cfg_param || !p_data->vdd_map) {
1369 dev_err(&pdev->dev, "missing platform data\n");
1373 ref_clk = clk_get(&pdev->dev, "ref");
1374 soc_clk = clk_get(&pdev->dev, "soc");
1375 i2c_clk = clk_get(&pdev->dev, "i2c");
1376 safe_dvfs_clk = clk_get(&pdev->dev, "safe_dvfs");
1377 dfll_clk = clk_get(&pdev->dev, p_data->dfll_clk_name);
1378 if (IS_ERR(ref_clk) || IS_ERR(soc_clk) || IS_ERR(i2c_clk)) {
1379 dev_err(&pdev->dev, "missing control clock\n");
1382 if (IS_ERR(safe_dvfs_clk)) {
1383 dev_err(&pdev->dev, "missing safe dvfs source clock\n");
1384 return PTR_ERR(safe_dvfs_clk);
1386 if (IS_ERR(dfll_clk)) {
1387 dev_err(&pdev->dev, "missing target dfll clock\n");
1388 return PTR_ERR(dfll_clk);
1390 if (!safe_dvfs_clk->dvfs || !safe_dvfs_clk->dvfs->dvfs_rail) {
1391 dev_err(&pdev->dev, "invalid safe dvfs source\n");
1395 /* Allocate cl_dvfs object and populate resource accessors */
1396 cld = kzalloc(sizeof(*cld), GFP_KERNEL);
1398 dev_err(&pdev->dev, "failed to allocate cl_dvfs object\n");
1402 cld->cl_base = IO_ADDRESS(res->start);
1403 cld->p_data = p_data;
1404 cld->ref_clk = ref_clk;
1405 cld->soc_clk = soc_clk;
1406 cld->i2c_clk = i2c_clk;
1407 cld->dfll_clk = dfll_clk;
1408 cld->safe_dvfs = safe_dvfs_clk->dvfs;
1409 #ifdef CONFIG_THERMAL
1410 INIT_WORK(&cld->init_cdev_work, tegra_cl_dvfs_init_cdev);
1412 /* Initialize cl_dvfs */
1413 ret = cl_dvfs_init(cld);
1419 platform_set_drvdata(pdev, cld);
1422 * Schedule cooling device registration as a separate work to address
1423 * the following race: when cl_dvfs is probed the DFLL child clock
1424 * (e.g., CPU) cannot be changed; on the other hand cooling device
1425 * registration will update the entire thermal zone, and may trigger
1426 * rate change of the target clock
1428 if (cld->safe_dvfs->dvfs_rail->vmin_cdev ||
1429 cld->safe_dvfs->dvfs_rail->vmax_cdev)
1430 schedule_work(&cld->init_cdev_work);
1434 static struct platform_driver tegra_cl_dvfs_driver = {
1436 .name = "tegra_cl_dvfs",
1437 .owner = THIS_MODULE,
1438 #ifdef CONFIG_PM_SLEEP
1439 .pm = &tegra_cl_dvfs_pm_ops,
1444 int __init tegra_init_cl_dvfs(void)
1446 return platform_driver_probe(&tegra_cl_dvfs_driver,
1447 tegra_cl_dvfs_probe);
1453 * - DISABLED: control logic mode - DISABLED, output interface disabled,
1455 * - OPEN_LOOP: control logic mode - OPEN_LOOP, output interface disabled,
1456 * dfll is running "unlocked"
1457 * - CLOSED_LOOP: control logic mode - CLOSED_LOOP, output interface enabled,
1458 * dfll is running "locked"
1461 /* Switch from any other state to DISABLED state */
1462 void tegra_cl_dvfs_disable(struct tegra_cl_dvfs *cld)
1464 switch (cld->mode) {
1465 case TEGRA_CL_DVFS_CLOSED_LOOP:
1466 WARN(1, "DFLL is disabled directly from closed loop mode\n");
1468 output_disable_ol_prepare(cld);
1469 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1470 output_disable_post_ol(cld);
1471 invalidate_request(cld);
1472 cl_dvfs_disable_clocks(cld);
1475 case TEGRA_CL_DVFS_OPEN_LOOP:
1476 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1477 invalidate_request(cld);
1478 cl_dvfs_disable_clocks(cld);
1482 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1487 /* Switch from DISABLE state to OPEN_LOOP state */
1488 int tegra_cl_dvfs_enable(struct tegra_cl_dvfs *cld)
1490 if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
1491 pr_err("%s: Cannot enable DFLL in %s mode\n",
1492 __func__, mode_name[cld->mode]);
1496 if (cld->mode != TEGRA_CL_DVFS_DISABLED)
1499 cl_dvfs_enable_clocks(cld);
1500 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1504 /* Switch from OPEN_LOOP state to CLOSED_LOOP state */
1505 int tegra_cl_dvfs_lock(struct tegra_cl_dvfs *cld)
1507 struct dfll_rate_req *req = &cld->last_req;
1509 switch (cld->mode) {
1510 case TEGRA_CL_DVFS_CLOSED_LOOP:
1513 case TEGRA_CL_DVFS_OPEN_LOOP:
1514 if (req->freq == 0) {
1515 pr_err("%s: Cannot lock DFLL at rate 0\n", __func__);
1520 * Update control logic setting with last rate request;
1521 * sync output limits with current tuning and thermal state,
1522 * enable output and switch to closed loop mode.
1524 set_cl_config(cld, req);
1526 set_mode(cld, TEGRA_CL_DVFS_CLOSED_LOOP);
1527 set_request(cld, req);
1528 calibration_timer_update(cld);
1532 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1533 pr_err("%s: Cannot lock DFLL in %s mode\n",
1534 __func__, mode_name[cld->mode]);
1539 /* Switch from CLOSED_LOOP state to OPEN_LOOP state */
1540 int tegra_cl_dvfs_unlock(struct tegra_cl_dvfs *cld)
1544 switch (cld->mode) {
1545 case TEGRA_CL_DVFS_CLOSED_LOOP:
1547 ret = output_disable_ol_prepare(cld);
1548 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1550 ret = output_disable_post_ol(cld);
1553 case TEGRA_CL_DVFS_OPEN_LOOP:
1557 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1558 pr_err("%s: Cannot unlock DFLL in %s mode\n",
1559 __func__, mode_name[cld->mode]);
1565 * Convert requested rate into the control logic settings. In CLOSED_LOOP mode,
1566 * update new settings immediately to adjust DFLL output rate accordingly.
1567 * Otherwise, just save them until next switch to closed loop.
1569 int tegra_cl_dvfs_request_rate(struct tegra_cl_dvfs *cld, unsigned long rate)
1572 struct dfll_rate_req req;
1575 if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
1576 pr_err("%s: Cannot set DFLL rate in %s mode\n",
1577 __func__, mode_name[cld->mode]);
1581 /* Calibrate dfll minimum rate */
1582 cl_dvfs_calibrate(cld);
1584 /* Determine DFLL output scale */
1585 req.scale = SCALE_MAX - 1;
1586 if (rate < cld->dvco_rate_min) {
1587 int scale = DIV_ROUND_CLOSEST((rate / 1000 * SCALE_MAX),
1588 (cld->dvco_rate_min / 1000));
1590 pr_err("%s: Rate %lu is below scalable range\n",
1594 req.scale = scale - 1;
1595 rate = cld->dvco_rate_min;
1598 /* Convert requested rate into frequency request and scale settings */
1599 val = GET_REQUEST_FREQ(rate, cld->ref_rate);
1600 if (val > FREQ_MAX) {
1601 pr_err("%s: Rate %lu is above dfll range\n", __func__, rate);
1605 rate = GET_REQUEST_RATE(val, cld->ref_rate);
1607 /* Find safe voltage for requested rate */
1608 if (find_safe_output(cld, rate, &req.output)) {
1609 pr_err("%s: Failed to find safe output for rate %lu\n",
1613 req.cap = req.output;
1616 * Save validated request, and in CLOSED_LOOP mode actually update
1617 * control logic settings; use request output to set maximum voltage
1618 * limit, but keep one LUT step room above safe voltage
1620 cld->last_req = req;
1622 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1623 set_cl_config(cld, &cld->last_req);
1624 set_request(cld, &cld->last_req);
1629 unsigned long tegra_cl_dvfs_request_get(struct tegra_cl_dvfs *cld)
1631 struct dfll_rate_req *req = &cld->last_req;
1634 * If running below dvco minimum rate with skipper resolution:
1635 * dvco min rate / 256 - return last requested rate rounded to 1kHz.
1636 * If running above dvco minimum, with closed loop resolution:
1637 * ref rate / 2 - return cl_dvfs target rate.
1639 if ((req->scale + 1) < SCALE_MAX)
1640 return req->rate / 1000 * 1000;
1642 return GET_REQUEST_RATE(req->freq, cld->ref_rate);
1645 #ifdef CONFIG_DEBUG_FS
1647 static int lock_get(void *data, u64 *val)
1649 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1650 *val = cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP;
1653 static int lock_set(void *data, u64 val)
1655 struct clk *c = (struct clk *)data;
1656 return tegra_clk_cfg_ex(c, TEGRA_CLK_DFLL_LOCK, val);
1658 DEFINE_SIMPLE_ATTRIBUTE(lock_fops, lock_get, lock_set, "%llu\n");
1660 static int monitor_get(void *data, u64 *val)
1663 unsigned long flags;
1664 struct clk *c = (struct clk *)data;
1665 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1667 clk_enable(cld->soc_clk);
1669 clk_lock_save(c, &flags);
1670 v = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
1671 CL_DVFS_MONITOR_DATA_MASK;
1673 if (cl_dvfs_readl(cld, CL_DVFS_MONITOR_CTRL) ==
1674 CL_DVFS_MONITOR_CTRL_FREQ) {
1675 v = GET_MONITORED_RATE(v, cld->ref_rate);
1676 s = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
1677 s = (s & CL_DVFS_FREQ_REQ_SCALE_MASK) >>
1678 CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1679 *val = (u64)v * (s + 1) / 256;
1681 clk_unlock_restore(c, &flags);
1682 clk_disable(cld->soc_clk);
1687 clk_unlock_restore(c, &flags);
1688 clk_disable(cld->soc_clk);
1691 DEFINE_SIMPLE_ATTRIBUTE(monitor_fops, monitor_get, NULL, "%llu\n");
1693 static int vmax_get(void *data, u64 *val)
1696 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1698 *val = cld->out_map[v]->reg_uV / 1000;
1701 DEFINE_SIMPLE_ATTRIBUTE(vmax_fops, vmax_get, NULL, "%llu\n");
1703 static int vmin_get(void *data, u64 *val)
1706 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1708 *val = cld->out_map[v]->reg_uV / 1000;
1711 DEFINE_SIMPLE_ATTRIBUTE(vmin_fops, vmin_get, NULL, "%llu\n");
1713 static int tune_high_mv_get(void *data, u64 *val)
1715 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1716 *val = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
1719 static int tune_high_mv_set(void *data, u64 val)
1721 unsigned long flags;
1722 struct clk *c = (struct clk *)data;
1723 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1725 clk_lock_save(c, &flags);
1727 cld->safe_dvfs->dfll_data.tune_high_min_millivolts = val;
1728 cl_dvfs_init_output_thresholds(cld);
1729 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1730 set_cl_config(cld, &cld->last_req);
1731 set_request(cld, &cld->last_req);
1734 clk_unlock_restore(c, &flags);
1737 DEFINE_SIMPLE_ATTRIBUTE(tune_high_mv_fops, tune_high_mv_get, tune_high_mv_set,
1739 static int fmin_get(void *data, u64 *val)
1741 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1742 *val = cld->dvco_rate_min;
1745 DEFINE_SIMPLE_ATTRIBUTE(dvco_rate_min_fops, fmin_get, NULL, "%llu\n");
1747 static int calibr_delay_get(void *data, u64 *val)
1749 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1750 *val = jiffies_to_msecs(cld->calibration_delay);
1753 static int calibr_delay_set(void *data, u64 val)
1755 unsigned long flags;
1756 struct clk *c = (struct clk *)data;
1757 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1759 clk_lock_save(c, &flags);
1760 cld->calibration_delay = msecs_to_jiffies(val);
1761 clk_unlock_restore(c, &flags);
1764 DEFINE_SIMPLE_ATTRIBUTE(calibr_delay_fops, calibr_delay_get, calibr_delay_set,
1767 static int undershoot_get(void *data, u64 *val)
1769 struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1770 *val = cld->p_data->pmu_undershoot_gb;
1773 static int undershoot_set(void *data, u64 val)
1775 unsigned long flags;
1776 struct clk *c = (struct clk *)data;
1777 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1779 clk_lock_save(c, &flags);
1780 cld->p_data->pmu_undershoot_gb = val;
1781 cl_dvfs_set_force_out_min(cld);
1782 clk_unlock_restore(c, &flags);
1785 DEFINE_SIMPLE_ATTRIBUTE(undershoot_fops, undershoot_get, undershoot_set,
1788 static int cl_register_show(struct seq_file *s, void *data)
1791 struct clk *c = s->private;
1792 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1794 clk_enable(cld->soc_clk);
1796 seq_printf(s, "CONTROL REGISTERS:\n");
1797 for (offs = 0; offs <= CL_DVFS_MONITOR_DATA; offs += 4)
1798 seq_printf(s, "[0x%02x] = 0x%08x\n",
1799 offs, cl_dvfs_readl(cld, offs));
1801 seq_printf(s, "\nI2C and INTR REGISTERS:\n");
1802 for (offs = CL_DVFS_I2C_CFG; offs <= CL_DVFS_I2C_STS; offs += 4)
1803 seq_printf(s, "[0x%02x] = 0x%08x\n",
1804 offs, cl_dvfs_readl(cld, offs));
1806 offs = CL_DVFS_INTR_STS;
1807 seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
1808 offs = CL_DVFS_INTR_EN;
1809 seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
1811 seq_printf(s, "\nLUT:\n");
1812 for (offs = CL_DVFS_OUTPUT_LUT;
1813 offs < CL_DVFS_OUTPUT_LUT + 4 * MAX_CL_DVFS_VOLTAGES;
1815 seq_printf(s, "[0x%02x] = 0x%08x\n",
1816 offs, cl_dvfs_readl(cld, offs));
1818 clk_disable(cld->soc_clk);
1822 static int cl_register_open(struct inode *inode, struct file *file)
1824 return single_open(file, cl_register_show, inode->i_private);
1827 static ssize_t cl_register_write(struct file *file,
1828 const char __user *userbuf, size_t count, loff_t *ppos)
1833 struct clk *c = file->f_path.dentry->d_inode->i_private;
1834 struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1836 if (sizeof(buf) <= count)
1839 if (copy_from_user(buf, userbuf, count))
1842 /* terminate buffer and trim - white spaces may be appended
1843 * at the end when invoked from shell command line */
1847 if (sscanf(buf, "[0x%x] = 0x%x", &offs, &val) != 2)
1850 clk_enable(cld->soc_clk);
1851 cl_dvfs_writel(cld, val, offs & (~0x3));
1852 clk_disable(cld->soc_clk);
1856 static const struct file_operations cl_register_fops = {
1857 .open = cl_register_open,
1859 .write = cl_register_write,
1860 .llseek = seq_lseek,
1861 .release = single_release,
1864 int __init tegra_cl_dvfs_debug_init(struct clk *dfll_clk)
1866 struct dentry *cl_dvfs_dentry;
1868 if (!dfll_clk || !dfll_clk->dent || (dfll_clk->state == UNINITIALIZED))
1871 if (!debugfs_create_file("lock", S_IRUGO | S_IWUSR,
1872 dfll_clk->dent, dfll_clk, &lock_fops))
1875 cl_dvfs_dentry = debugfs_create_dir("cl_dvfs", dfll_clk->dent);
1876 if (!cl_dvfs_dentry)
1879 if (!debugfs_create_file("monitor", S_IRUGO,
1880 cl_dvfs_dentry, dfll_clk, &monitor_fops))
1883 if (!debugfs_create_file("vmax_mv", S_IRUGO,
1884 cl_dvfs_dentry, dfll_clk, &vmax_fops))
1887 if (!debugfs_create_file("vmin_mv", S_IRUGO,
1888 cl_dvfs_dentry, dfll_clk, &vmin_fops))
1891 if (!debugfs_create_file("tune_high_mv", S_IRUGO | S_IWUSR,
1892 cl_dvfs_dentry, dfll_clk, &tune_high_mv_fops))
1895 if (!debugfs_create_file("dvco_min", S_IRUGO,
1896 cl_dvfs_dentry, dfll_clk, &dvco_rate_min_fops))
1899 if (!debugfs_create_file("calibr_delay", S_IRUGO,
1900 cl_dvfs_dentry, dfll_clk, &calibr_delay_fops))
1903 if (!debugfs_create_file("pmu_undershoot_gb", S_IRUGO,
1904 cl_dvfs_dentry, dfll_clk, &undershoot_fops))
1907 if (!debugfs_create_file("registers", S_IRUGO | S_IWUSR,
1908 cl_dvfs_dentry, dfll_clk, &cl_register_fops))
1914 debugfs_remove_recursive(dfll_clk->dent);