ARM: tegra: loki: fix build error due to warning
[linux-3.10.git] / arch / arm / mach-tegra / tegra_cl_dvfs.c
1 /*
2  * arch/arm/mach-tegra/tegra_cl_dvfs.c
3  *
4  * Copyright (c) 2012-2013 NVIDIA CORPORATION. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/spinlock.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
23 #include <linux/io.h>
24 #include <linux/clk.h>
25 #include <linux/interrupt.h>
26 #include <linux/suspend.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/uaccess.h>
30 #include <linux/module.h>
31 #include <linux/platform_device.h>
32 #include <linux/gpio.h>
33 #include <linux/regulator/tegra-dfll-bypass-regulator.h>
34 #include <linux/tegra-soc.h>
35
36 #include <mach/irqs.h>
37 #include <mach/pinmux.h>
38
39 #include "tegra_cl_dvfs.h"
40 #include "clock.h"
41 #include "dvfs.h"
42 #include "iomap.h"
43
44 #define OUT_MASK                        0x3f
45
46 #define CL_DVFS_CTRL                    0x00
47 #define CL_DVFS_CONFIG                  0x04
48 #define CL_DVFS_CONFIG_DIV_MASK         0xff
49
50 #define CL_DVFS_PARAMS                  0x08
51 #define CL_DVFS_PARAMS_CG_SCALE         (0x1 << 24)
52 #define CL_DVFS_PARAMS_FORCE_MODE_SHIFT 22
53 #define CL_DVFS_PARAMS_FORCE_MODE_MASK  (0x3 << CL_DVFS_PARAMS_FORCE_MODE_SHIFT)
54 #define CL_DVFS_PARAMS_CF_PARAM_SHIFT   16
55 #define CL_DVFS_PARAMS_CF_PARAM_MASK    (0x3f << CL_DVFS_PARAMS_CF_PARAM_SHIFT)
56 #define CL_DVFS_PARAMS_CI_PARAM_SHIFT   8
57 #define CL_DVFS_PARAMS_CI_PARAM_MASK    (0x7 << CL_DVFS_PARAMS_CI_PARAM_SHIFT)
58 #define CL_DVFS_PARAMS_CG_PARAM_SHIFT   0
59 #define CL_DVFS_PARAMS_CG_PARAM_MASK    (0xff << CL_DVFS_PARAMS_CG_PARAM_SHIFT)
60
61 #define CL_DVFS_TUNE0                   0x0c
62 #define CL_DVFS_TUNE1                   0x10
63
64 #define CL_DVFS_FREQ_REQ                0x14
65 #define CL_DVFS_FREQ_REQ_FORCE_ENABLE   (0x1 << 28)
66 #define CL_DVFS_FREQ_REQ_FORCE_SHIFT    16
67 #define CL_DVFS_FREQ_REQ_FORCE_MASK     (0xfff << CL_DVFS_FREQ_REQ_FORCE_SHIFT)
68 #define FORCE_MAX                       2047
69 #define FORCE_MIN                       -2048
70 #define CL_DVFS_FREQ_REQ_SCALE_SHIFT    8
71 #define CL_DVFS_FREQ_REQ_SCALE_MASK     (0xff << CL_DVFS_FREQ_REQ_SCALE_SHIFT)
72 #define SCALE_MAX                       256
73 #define CL_DVFS_FREQ_REQ_FREQ_VALID     (0x1 << 7)
74 #define CL_DVFS_FREQ_REQ_FREQ_SHIFT     0
75 #define CL_DVFS_FREQ_REQ_FREQ_MASK      (0x7f << CL_DVFS_FREQ_REQ_FREQ_SHIFT)
76 #define FREQ_MAX                        127
77
78 #define CL_DVFS_SCALE_RAMP              0x18
79
80 #define CL_DVFS_DROOP_CTRL              0x1c
81 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT 16
82 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK  \
83                 (0xff << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT)
84 #define CL_DVFS_DROOP_CTRL_CUT_SHIFT    8
85 #define CL_DVFS_DROOP_CTRL_CUT_MASK     (0xf << CL_DVFS_DROOP_CTRL_CUT_SHIFT)
86 #define CL_DVFS_DROOP_CTRL_RAMP_SHIFT   0
87 #define CL_DVFS_DROOP_CTRL_RAMP_MASK    (0xff << CL_DVFS_DROOP_CTRL_RAMP_SHIFT)
88
89 #define CL_DVFS_OUTPUT_CFG              0x20
90 #define CL_DVFS_OUTPUT_CFG_I2C_ENABLE   (0x1 << 30)
91 #define CL_DVFS_OUTPUT_CFG_SAFE_SHIFT   24
92 #define CL_DVFS_OUTPUT_CFG_SAFE_MASK    \
93                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT)
94 #define CL_DVFS_OUTPUT_CFG_MAX_SHIFT    16
95 #define CL_DVFS_OUTPUT_CFG_MAX_MASK     \
96                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MAX_SHIFT)
97 #define CL_DVFS_OUTPUT_CFG_MIN_SHIFT    8
98 #define CL_DVFS_OUTPUT_CFG_MIN_MASK     \
99                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MIN_SHIFT)
100 #define CL_DVFS_OUTPUT_CFG_PWM_DELTA    (0x1 << 7)
101 #define CL_DVFS_OUTPUT_CFG_PWM_ENABLE   (0x1 << 6)
102 #define CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT 0
103 #define CL_DVFS_OUTPUT_CFG_PWM_DIV_MASK  \
104                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT)
105
106 #define CL_DVFS_OUTPUT_FORCE            0x24
107 #define CL_DVFS_OUTPUT_FORCE_ENABLE     (0x1 << 6)
108 #define CL_DVFS_OUTPUT_FORCE_VALUE_SHIFT 0
109 #define CL_DVFS_OUTPUT_FORCE_VALUE_MASK  \
110                 (OUT_MASK << CL_DVFS_OUTPUT_FORCE_VALUE_SHIFT)
111
112 #define CL_DVFS_MONITOR_CTRL            0x28
113 #define CL_DVFS_MONITOR_CTRL_DISABLE    0
114 #define CL_DVFS_MONITOR_CTRL_OUT        5
115 #define CL_DVFS_MONITOR_CTRL_FREQ       6
116 #define CL_DVFS_MONITOR_DATA            0x2c
117 #define CL_DVFS_MONITOR_DATA_NEW        (0x1 << 16)
118 #define CL_DVFS_MONITOR_DATA_MASK       0xFFFF
119
120 #define CL_DVFS_I2C_CFG                 0x40
121 #define CL_DVFS_I2C_CFG_ARB_ENABLE      (0x1 << 20)
122 #define CL_DVFS_I2C_CFG_HS_CODE_SHIFT   16
123 #define CL_DVFS_I2C_CFG_HS_CODE_MASK    (0x7 << CL_DVFS_I2C_CFG_HS_CODE_SHIFT)
124 #define CL_DVFS_I2C_CFG_PACKET_ENABLE   (0x1 << 15)
125 #define CL_DVFS_I2C_CFG_SIZE_SHIFT      12
126 #define CL_DVFS_I2C_CFG_SIZE_MASK       (0x7 << CL_DVFS_I2C_CFG_SIZE_SHIFT)
127 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_10   (0x1 << 10)
128 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT 0
129 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_MASK \
130                 (0x3ff << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT)
131
132 #define CL_DVFS_I2C_VDD_REG_ADDR        0x44
133 #define CL_DVFS_I2C_STS                 0x48
134 #define CL_DVFS_I2C_STS_I2C_LAST_SHIFT  1
135 #define CL_DVFS_I2C_STS_I2C_REQ_PENDING 0x1
136
137 #define CL_DVFS_INTR_STS                0x5c
138 #define CL_DVFS_INTR_EN                 0x60
139 #define CL_DVFS_INTR_MIN_MASK           0x1
140 #define CL_DVFS_INTR_MAX_MASK           0x2
141
142 #define CL_DVFS_I2C_CLK_DIVISOR         0x16c
143 #define CL_DVFS_I2C_CLK_DIVISOR_MASK    0xffff
144 #define CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT 16
145 #define CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT 0
146
147 #define CL_DVFS_OUTPUT_LUT              0x200
148
149 #define CL_DVFS_CALIBR_TIME             40000
150 #define CL_DVFS_OUTPUT_PENDING_TIMEOUT  1000
151 #define CL_DVFS_OUTPUT_RAMP_DELAY       100
152 #define CL_DVFS_TUNE_HIGH_DELAY         2000
153
154 #define CL_DVFS_TUNE_HIGH_MARGIN_MV     20
155
156 enum tegra_cl_dvfs_ctrl_mode {
157         TEGRA_CL_DVFS_UNINITIALIZED = 0,
158         TEGRA_CL_DVFS_DISABLED = 1,
159         TEGRA_CL_DVFS_OPEN_LOOP = 2,
160         TEGRA_CL_DVFS_CLOSED_LOOP = 3,
161 };
162
163 enum tegra_cl_dvfs_tune_state {
164         TEGRA_CL_DVFS_TUNE_LOW = 0,
165         TEGRA_CL_DVFS_TUNE_HIGH_REQUEST,
166         TEGRA_CL_DVFS_TUNE_HIGH,
167 };
168
169 struct dfll_rate_req {
170         u8      freq;
171         u8      scale;
172         u8      output;
173         u8      cap;
174         unsigned long rate;
175 };
176
177 struct tegra_cl_dvfs {
178         void                                    *cl_base;
179         struct tegra_cl_dvfs_platform_data      *p_data;
180
181         struct dvfs                     *safe_dvfs;
182         struct thermal_cooling_device   *vmax_cdev;
183         struct thermal_cooling_device   *vmin_cdev;
184         struct work_struct              init_cdev_work;
185
186         struct clk                      *soc_clk;
187         struct clk                      *ref_clk;
188         struct clk                      *i2c_clk;
189         struct clk                      *dfll_clk;
190         unsigned long                   ref_rate;
191         unsigned long                   i2c_rate;
192
193         /* output voltage mapping:
194          * legacy dvfs table index -to- cl_dvfs output LUT index
195          * cl_dvfs output LUT index -to- PMU value/voltage pair ptr
196          */
197         u8                              clk_dvfs_map[MAX_DVFS_FREQS];
198         struct voltage_reg_map          *out_map[MAX_CL_DVFS_VOLTAGES];
199         u8                              num_voltages;
200         u8                              safe_output;
201         u8                              tune_high_out_start;
202         u8                              tune_high_out_min;
203         u8                              minimax_output;
204         u8                              thermal_out_caps[MAX_THERMAL_LIMITS];
205         u8                              thermal_out_floors[MAX_THERMAL_LIMITS];
206         int                             therm_caps_num;
207         int                             therm_floors_num;
208         unsigned long                   dvco_rate_floors[MAX_THERMAL_LIMITS+1];
209         unsigned long                   dvco_rate_min;
210
211         u8                              lut_min;
212         u8                              lut_max;
213         u8                              force_out_min;
214         u32                             suspended_force_out;
215         int                             therm_cap_idx;
216         int                             therm_floor_idx;
217         struct dfll_rate_req            last_req;
218         enum tegra_cl_dvfs_tune_state   tune_state;
219         enum tegra_cl_dvfs_ctrl_mode    mode;
220
221         struct timer_list               tune_timer;
222         unsigned long                   tune_delay;
223         struct timer_list               calibration_timer;
224         unsigned long                   calibration_delay;
225         ktime_t                         last_calibration;
226         unsigned long                   calibration_range_min;
227         unsigned long                   calibration_range_max;
228 };
229
230 /* Conversion macros (different scales for frequency request, and monitored
231    rate is not a typo) */
232 #define RATE_STEP(cld)                          ((cld)->ref_rate / 2)
233 #define GET_REQUEST_FREQ(rate, ref_rate)        ((rate) / ((ref_rate) / 2))
234 #define GET_REQUEST_RATE(freq, ref_rate)        ((freq) * ((ref_rate) / 2))
235 #define GET_MONITORED_RATE(freq, ref_rate)      ((freq) * ((ref_rate) / 4))
236 #define GET_DROOP_FREQ(rate, ref_rate)          ((rate) / ((ref_rate) / 4))
237 #define ROUND_MIN_RATE(rate, ref_rate)          \
238                 (DIV_ROUND_UP(rate, (ref_rate) / 2) * ((ref_rate) / 2))
239 #define GET_DIV(ref_rate, out_rate, scale)      \
240                 DIV_ROUND_UP((ref_rate), (out_rate) * (scale))
241
242 static const char *mode_name[] = {
243         [TEGRA_CL_DVFS_UNINITIALIZED] = "uninitialized",
244         [TEGRA_CL_DVFS_DISABLED] = "disabled",
245         [TEGRA_CL_DVFS_OPEN_LOOP] = "open_loop",
246         [TEGRA_CL_DVFS_CLOSED_LOOP] = "closed_loop",
247 };
248
249 static inline u32 cl_dvfs_readl(struct tegra_cl_dvfs *cld, u32 offs)
250 {
251         return __raw_readl((void *)cld->cl_base + offs);
252 }
253 static inline void cl_dvfs_writel(struct tegra_cl_dvfs *cld, u32 val, u32 offs)
254 {
255         __raw_writel(val, (void *)cld->cl_base + offs);
256 }
257 static inline void cl_dvfs_wmb(struct tegra_cl_dvfs *cld)
258 {
259         wmb();
260         cl_dvfs_readl(cld, CL_DVFS_CTRL);
261 }
262
263 static inline void switch_monitor(struct tegra_cl_dvfs *cld, u32 selector)
264 {
265         /* delay to make sure selector has switched */
266         cl_dvfs_writel(cld, selector, CL_DVFS_MONITOR_CTRL);
267         cl_dvfs_wmb(cld);
268         udelay(1);
269 }
270
271 static inline void invalidate_request(struct tegra_cl_dvfs *cld)
272 {
273         u32 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
274         val &= ~CL_DVFS_FREQ_REQ_FREQ_VALID;
275         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
276         cl_dvfs_wmb(cld);
277 }
278
279 static inline void disable_forced_output(struct tegra_cl_dvfs *cld)
280 {
281         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
282         val &= ~CL_DVFS_OUTPUT_FORCE_ENABLE;
283         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
284         cl_dvfs_wmb(cld);
285 }
286
287 static inline u32 get_last_output(struct tegra_cl_dvfs *cld)
288 {
289         switch_monitor(cld, CL_DVFS_MONITOR_CTRL_OUT);
290         return cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
291                 CL_DVFS_MONITOR_DATA_MASK;
292 }
293
294 /* out minitored before forced value applied - return the latter if enabled */
295 static inline u32 cl_dvfs_get_output(struct tegra_cl_dvfs *cld)
296 {
297         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
298         if (val & CL_DVFS_OUTPUT_FORCE_ENABLE)
299                 return val & OUT_MASK;
300         return get_last_output(cld);
301 }
302
303 static inline bool is_i2c(struct tegra_cl_dvfs *cld)
304 {
305         return cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C;
306 }
307
308 static inline u8 get_output_bottom(struct tegra_cl_dvfs *cld)
309 {
310         return is_i2c(cld) ? 0 : cld->out_map[0]->reg_value;
311 }
312
313 static inline u8 get_output_top(struct tegra_cl_dvfs *cld)
314 {
315         return is_i2c(cld) ?  cld->num_voltages - 1 :
316                 cld->out_map[cld->num_voltages - 1]->reg_value;
317 }
318
319 static int output_enable(struct tegra_cl_dvfs *cld)
320 {
321         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
322
323         if (is_i2c(cld)) {
324                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
325         } else {
326                 int pg, gpio = cld->p_data->u.pmu_pwm.out_gpio;
327                 if (gpio) {
328                         int v = cld->p_data->u.pmu_pwm.out_enable_high ? 1 : 0;
329                         __gpio_set_value(gpio, v);
330                         return 0;
331                 }
332
333                 pg = cld->p_data->u.pmu_pwm.pwm_pingroup;
334                 if (pg) {
335                         tegra_pinmux_set_tristate(pg, TEGRA_TRI_NORMAL);
336                         return 0;
337                 }
338
339                 val |= CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
340         }
341
342         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
343         cl_dvfs_wmb(cld);
344         return  0;
345 }
346
347 static int output_disable_pwm(struct tegra_cl_dvfs *cld)
348 {
349         u32 val;
350
351         int pg, gpio = cld->p_data->u.pmu_pwm.out_gpio;
352         if (gpio) {
353                 int v = cld->p_data->u.pmu_pwm.out_enable_high ? 0 : 1;
354                 __gpio_set_value(gpio, v);
355                 return 0;
356         }
357
358         pg = cld->p_data->u.pmu_pwm.pwm_pingroup;
359         if (pg) {
360                 tegra_pinmux_set_tristate(pg, TEGRA_TRI_TRISTATE);
361                 return 0;
362         }
363
364         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
365         val &= ~CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
366         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
367         cl_dvfs_wmb(cld);
368         return  0;
369 }
370
371 static noinline int output_flush_disable(struct tegra_cl_dvfs *cld)
372 {
373         int i;
374         u32 sts;
375         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
376
377         /* Flush transactions in flight, and then disable */
378         for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
379                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
380                 udelay(2);
381                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
382                         sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
383                         if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
384                                 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
385                                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
386                                 wmb();
387                                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
388                                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
389                                         return 0; /* no pending rqst */
390
391                                 /* Re-enable, continue wait */
392                                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
393                                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
394                                 wmb();
395                         }
396                 }
397         }
398
399         /* I2C request is still pending - disable, anyway, but report error */
400         val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
401         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
402         cl_dvfs_wmb(cld);
403         return -ETIMEDOUT;
404 }
405
406 static noinline int output_disable_flush(struct tegra_cl_dvfs *cld)
407 {
408         int i;
409         u32 sts;
410         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
411
412         /* Disable output interface right away */
413         val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
414         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
415         cl_dvfs_wmb(cld);
416
417         /* Flush possible transaction in flight */
418         for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
419                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
420                 udelay(2);
421                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
422                         sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
423                         if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
424                                 return 0;
425                 }
426         }
427
428         /* I2C request is still pending - report error */
429         return -ETIMEDOUT;
430 }
431
432 static inline int output_disable_ol_prepare(struct tegra_cl_dvfs *cld)
433 {
434         /* PWM output control */
435         if (!is_i2c(cld))
436                 return output_disable_pwm(cld);
437
438         /*
439          * If cl-dvfs h/w does not require output to be quiet before disable,
440          * s/w can stop I2C communications at any time (including operations
441          * in closed loop mode), and I2C bus integrity is guaranteed even in
442          * case of flush timeout.
443          */
444         if (!(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET)) {
445                 int ret = output_disable_flush(cld);
446                 if (ret)
447                         pr_debug("cl_dvfs: I2C pending timeout ol_prepare\n");
448                 return ret;
449         }
450         return 0;
451 }
452
453 static inline int output_disable_post_ol(struct tegra_cl_dvfs *cld)
454 {
455         /* PWM output control */
456         if (!is_i2c(cld))
457                 return 0;
458
459         /*
460          * If cl-dvfs h/w requires output to be quiet before disable, s/w
461          * should stop I2C communications only after the switch to open loop
462          * mode, and I2C bus integrity is not guaranteed in case of flush
463          * timeout
464         */
465         if (cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) {
466                 int ret = output_flush_disable(cld);
467                 if (ret)
468                         pr_err("cl_dvfs: I2C pending timeout post_ol\n");
469                 return ret;
470         }
471         return 0;
472 }
473
474 static inline void set_mode(struct tegra_cl_dvfs *cld,
475                             enum tegra_cl_dvfs_ctrl_mode mode)
476 {
477         cld->mode = mode;
478         cl_dvfs_writel(cld, mode - 1, CL_DVFS_CTRL);
479         cl_dvfs_wmb(cld);
480 }
481
482 static inline u8 get_output_cap(struct tegra_cl_dvfs *cld,
483                                 struct dfll_rate_req *req)
484 {
485         u32 thermal_cap = get_output_top(cld);
486
487         if (cld->therm_cap_idx && (cld->therm_cap_idx <= cld->therm_caps_num))
488                 thermal_cap = cld->thermal_out_caps[cld->therm_cap_idx - 1];
489         if (req && (req->cap < thermal_cap))
490                 return req->cap;
491         return thermal_cap;
492 }
493
494 static inline u8 get_output_min(struct tegra_cl_dvfs *cld)
495 {
496         u32 tune_min = get_output_bottom(cld);
497         u32 thermal_min = tune_min;
498
499         tune_min = cld->tune_state == TEGRA_CL_DVFS_TUNE_LOW ?
500                 tune_min : cld->tune_high_out_min;
501
502         if (cld->therm_floor_idx < cld->therm_floors_num)
503                 thermal_min = cld->thermal_out_floors[cld->therm_floor_idx];
504
505         return max(tune_min, thermal_min);
506 }
507
508 static inline void _load_lut(struct tegra_cl_dvfs *cld)
509 {
510         int i;
511         u32 val;
512
513         val = cld->out_map[cld->lut_min]->reg_value;
514         for (i = 0; i <= cld->lut_min; i++)
515                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
516
517         for (; i < cld->lut_max; i++) {
518                 val = cld->out_map[i]->reg_value;
519                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
520         }
521
522         val = cld->out_map[cld->lut_max]->reg_value;
523         for (; i < cld->num_voltages; i++)
524                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
525
526         cl_dvfs_wmb(cld);
527 }
528
529 static void cl_dvfs_load_lut(struct tegra_cl_dvfs *cld)
530 {
531         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
532         bool disable_out_for_load =
533                 !(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) &&
534                 (val & CL_DVFS_OUTPUT_CFG_I2C_ENABLE);
535
536         if (disable_out_for_load) {
537                 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
538                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
539                 cl_dvfs_wmb(cld);
540                 udelay(2); /* 2us (big margin) window for disable propafation */
541         }
542
543         _load_lut(cld);
544
545         if (disable_out_for_load) {
546                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
547                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
548                 cl_dvfs_wmb(cld);
549         }
550 }
551
552 #define set_tune_state(cld, state) \
553         do {                                                            \
554                 cld->tune_state = state;                                \
555                 pr_debug("%s: set tune state %d\n", __func__, state);   \
556         } while (0)
557
558 static inline void tune_low(struct tegra_cl_dvfs *cld)
559 {
560         /* a must order: 1st tune dfll low, then tune trimmers low */
561         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
562         cl_dvfs_wmb(cld);
563         if (cld->safe_dvfs->dfll_data.tune_trimmers)
564                 cld->safe_dvfs->dfll_data.tune_trimmers(false);
565 }
566
567 static inline void tune_high(struct tegra_cl_dvfs *cld)
568 {
569         /* a must order: 1st tune trimmers high, then tune dfll high */
570         if (cld->safe_dvfs->dfll_data.tune_trimmers)
571                 cld->safe_dvfs->dfll_data.tune_trimmers(true);
572         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0_high_mv,
573                        CL_DVFS_TUNE0);
574         cl_dvfs_wmb(cld);
575 }
576
577 static void set_ol_config(struct tegra_cl_dvfs *cld)
578 {
579         u32 val, out_min;
580
581         /* always tune low (safe) in open loop */
582         if (cld->tune_state != TEGRA_CL_DVFS_TUNE_LOW) {
583                 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
584                 tune_low(cld);
585
586                 out_min = get_output_min(cld);
587                 if (cld->lut_min != out_min) {
588                         cld->lut_min = out_min;
589                         if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
590                                 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
591                                 val &= ~CL_DVFS_OUTPUT_CFG_MIN_MASK;
592                                 val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
593                                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
594                         } else {
595                                 cl_dvfs_load_lut(cld);
596                         }
597                 }
598         }
599
600         /* 1:1 scaling in open loop */
601         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
602         val |= (SCALE_MAX - 1) << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
603         val &= ~CL_DVFS_FREQ_REQ_FORCE_ENABLE;
604         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
605 }
606
607 static void set_cl_config(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
608 {
609         u32 out_max, out_min;
610         u32 out_cap = get_output_cap(cld, req);
611
612         switch (cld->tune_state) {
613         case TEGRA_CL_DVFS_TUNE_LOW:
614                 if (out_cap > cld->tune_high_out_start) {
615                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH_REQUEST);
616                         mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
617                 }
618                 break;
619
620         case TEGRA_CL_DVFS_TUNE_HIGH:
621         case TEGRA_CL_DVFS_TUNE_HIGH_REQUEST:
622                 if (out_cap <= cld->tune_high_out_start) {
623                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
624                         tune_low(cld);
625                 }
626                 break;
627         default:
628                 BUG();
629         }
630
631         out_min = get_output_min(cld);
632         if (out_cap > (out_min + 1))
633                 req->output = out_cap - 1;
634         else
635                 req->output = out_min + 1;
636         if (req->output == cld->safe_output)
637                 req->output++;
638         out_max = max((u8)(req->output + 1), cld->minimax_output);
639         out_max = max((u8)(out_max), cld->force_out_min);
640
641         if ((cld->lut_min != out_min) || (cld->lut_max != out_max)) {
642                 cld->lut_min = out_min;
643                 cld->lut_max = out_max;
644                 if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
645                         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
646                         val &= ~(CL_DVFS_OUTPUT_CFG_MAX_MASK |
647                                  CL_DVFS_OUTPUT_CFG_MIN_MASK);
648                         val |= out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT;
649                         val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
650                         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
651                 } else {
652                         cl_dvfs_load_lut(cld);
653                 }
654         }
655 }
656
657 static void tune_timer_cb(unsigned long data)
658 {
659         unsigned long flags;
660         u32 val, out_min, out_last;
661         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
662
663         clk_lock_save(cld->dfll_clk, &flags);
664
665         if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH_REQUEST) {
666                 out_min = cld->lut_min;
667                 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
668                 out_last = is_i2c(cld) ?
669                         (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK :
670                         out_min; /* no way to stall PWM: out_last >= out_min */
671
672                 if (!(val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) &&
673                     (out_last >= cld->tune_high_out_min)  &&
674                     (out_min >= cld->tune_high_out_min)) {
675                         udelay(CL_DVFS_OUTPUT_RAMP_DELAY);
676                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH);
677                         tune_high(cld);
678                 } else {
679                         mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
680                 }
681         }
682         clk_unlock_restore(cld->dfll_clk, &flags);
683 }
684
685 static inline void calibration_timer_update(struct tegra_cl_dvfs *cld)
686 {
687         if (!cld->calibration_delay)
688                 return;
689         mod_timer(&cld->calibration_timer, jiffies + cld->calibration_delay);
690 }
691
692 static void cl_dvfs_calibrate(struct tegra_cl_dvfs *cld)
693 {
694         u32 val;
695         ktime_t now;
696         unsigned long data;
697         unsigned long step = RATE_STEP(cld);
698         unsigned long rate_min = cld->dvco_rate_min;
699         u8 out_min = get_output_min(cld);
700
701         /*
702          *  Enter calibration procedure only if
703          *  - closed loop operations
704          *  - last request engaged clock skipper
705          *  - at least specified time after the last calibration attempt
706          */
707         if ((cld->mode != TEGRA_CL_DVFS_CLOSED_LOOP) ||
708             (cld->last_req.rate > rate_min))
709                 return;
710
711         now = ktime_get();
712         if (ktime_us_delta(now, cld->last_calibration) < CL_DVFS_CALIBR_TIME)
713                 return;
714         cld->last_calibration = now;
715
716         /* Synchronize with sample period, and get rate measurements */
717         switch_monitor(cld, CL_DVFS_MONITOR_CTRL_FREQ);
718         data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
719         do {
720                 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
721         } while (!(data & CL_DVFS_MONITOR_DATA_NEW));
722         do {
723                 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
724         } while (!(data & CL_DVFS_MONITOR_DATA_NEW));
725
726         if (is_i2c(cld)) {
727                 /* Defer calibration if I2C transaction is pending */
728                 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
729                 if (val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) {
730                         calibration_timer_update(cld);
731                         return;
732                 }
733         } else {
734                 /* Forced output must be disabled in closed loop mode */
735                 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
736                 if (val & CL_DVFS_OUTPUT_FORCE_ENABLE) {
737                         disable_forced_output(cld);
738                         calibration_timer_update(cld);
739                         return;
740                 }
741                 /* Get last output (there is no such thing as pending PWM) */
742                 val = get_last_output(cld);
743         }
744
745         /* Adjust minimum rate */
746         data &= CL_DVFS_MONITOR_DATA_MASK;
747         data = GET_MONITORED_RATE(data, cld->ref_rate);
748         if ((val > out_min) || (data < (rate_min - step)))
749                 rate_min -= step;
750         else if (data > (cld->dvco_rate_min + step))
751                 rate_min += (data - rate_min) / step * step;
752         else {
753                 cld->dvco_rate_floors[cld->therm_floor_idx] = rate_min;
754                 return;
755         }
756
757         cld->dvco_rate_min = clamp(rate_min,
758                         cld->calibration_range_min, cld->calibration_range_max);
759         calibration_timer_update(cld);
760         pr_debug("%s: calibrated dvco_rate_min %lu\n",
761                  __func__, cld->dvco_rate_min);
762 }
763
764 static void calibration_timer_cb(unsigned long data)
765 {
766         unsigned long flags;
767         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
768
769         pr_debug("%s\n", __func__);
770
771         clk_lock_save(cld->dfll_clk, &flags);
772         cl_dvfs_calibrate(cld);
773         clk_unlock_restore(cld->dfll_clk, &flags);
774 }
775
776 static void set_request(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
777 {
778         u32 val, f;
779         int force_val = req->output - cld->safe_output;
780         int coef = 128; /* FIXME: cld->p_data->cfg_param->cg_scale? */;
781
782         /* If going down apply force output floor */
783         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
784         f = (val & CL_DVFS_FREQ_REQ_FREQ_MASK) >> CL_DVFS_FREQ_REQ_FREQ_SHIFT;
785         if ((!(val & CL_DVFS_FREQ_REQ_FREQ_VALID) || (f > req->freq)) &&
786             (cld->force_out_min > req->output))
787                 force_val = cld->force_out_min - cld->safe_output;
788
789         force_val = force_val * coef / cld->p_data->cfg_param->cg;
790         force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
791
792         /*
793          * 1st set new frequency request and force values, then set force enable
794          * bit (if not set already). Use same CL_DVFS_FREQ_REQ register read
795          * (not other cl_dvfs register) plus explicit delay as a fence.
796          */
797         val &= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
798         val |= req->freq << CL_DVFS_FREQ_REQ_FREQ_SHIFT;
799         val |= req->scale << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
800         val |= ((u32)force_val << CL_DVFS_FREQ_REQ_FORCE_SHIFT) &
801                 CL_DVFS_FREQ_REQ_FORCE_MASK;
802         val |= CL_DVFS_FREQ_REQ_FREQ_VALID;
803         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
804         wmb();
805         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
806
807         if (!(val & CL_DVFS_FREQ_REQ_FORCE_ENABLE)) {
808                 udelay(1);  /* 1us (big margin) window for force value settle */
809                 val |= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
810                 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
811                 cl_dvfs_wmb(cld);
812         }
813 }
814
815 static u8 find_mv_out_cap(struct tegra_cl_dvfs *cld, int mv)
816 {
817         u8 cap;
818         int uv;
819
820         for (cap = 0; cap < cld->num_voltages; cap++) {
821                 uv = cld->out_map[cap]->reg_uV;
822                 if (uv >= mv * 1000)
823                         return is_i2c(cld) ? cap : cld->out_map[cap]->reg_value;
824         }
825         return get_output_top(cld);     /* maximum possible output */
826 }
827
828 static u8 find_mv_out_floor(struct tegra_cl_dvfs *cld, int mv)
829 {
830         u8 floor;
831         int uv;
832
833         for (floor = 0; floor < cld->num_voltages; floor++) {
834                 uv = cld->out_map[floor]->reg_uV;
835                 if (uv > mv * 1000) {
836                         if (!floor)     /* minimum possible output */
837                                 return get_output_bottom(cld);
838                         break;
839                 }
840         }
841         return is_i2c(cld) ? floor - 1 : cld->out_map[floor - 1]->reg_value;
842 }
843
844 static int find_safe_output(
845         struct tegra_cl_dvfs *cld, unsigned long rate, u8 *safe_output)
846 {
847         int i;
848         int n = cld->safe_dvfs->num_freqs;
849         unsigned long *freqs = cld->safe_dvfs->freqs;
850
851         for (i = 0; i < n; i++) {
852                 if (freqs[i] >= rate) {
853                         *safe_output = cld->clk_dvfs_map[i];
854                         return 0;
855                 }
856         }
857         return -EINVAL;
858 }
859
860 static unsigned long find_dvco_rate_min(struct tegra_cl_dvfs *cld, u8 out_min)
861 {
862         int i;
863
864         for (i = 0; i < cld->safe_dvfs->num_freqs; i++) {
865                 if (cld->clk_dvfs_map[i] > out_min)
866                         break;
867         }
868         i = i ? i-1 : 0;
869         return cld->safe_dvfs->freqs[i];
870 }
871
872 static void cl_dvfs_set_dvco_rate_min(struct tegra_cl_dvfs *cld)
873 {
874         unsigned long rate = cld->dvco_rate_floors[cld->therm_floor_idx];
875         if (!rate) {
876                 rate = cld->safe_dvfs->dfll_data.out_rate_min;
877                 if (cld->therm_floor_idx < cld->therm_floors_num)
878                         rate = find_dvco_rate_min(cld,
879                                 cld->thermal_out_floors[cld->therm_floor_idx]);
880         }
881
882         /* round minimum rate to request unit (ref_rate/2) boundary */
883         cld->dvco_rate_min = ROUND_MIN_RATE(rate, cld->ref_rate);
884         pr_debug("%s: calibrated dvco_rate_min %lu\n",
885                  __func__, cld->dvco_rate_min);
886
887         /* dvco min rate is under-estimated - skewed range up */
888         cld->calibration_range_min = cld->dvco_rate_min - 4 * RATE_STEP(cld);
889         if (cld->calibration_range_min < cld->safe_dvfs->freqs[0])
890                 cld->calibration_range_min = cld->safe_dvfs->freqs[0];
891         cld->calibration_range_max = cld->dvco_rate_min + 24 * RATE_STEP(cld);
892         rate = cld->safe_dvfs->freqs[cld->safe_dvfs->num_freqs - 1];
893         if (cld->calibration_range_max > rate)
894                 cld->calibration_range_max = rate;
895 }
896
897 static void cl_dvfs_set_force_out_min(struct tegra_cl_dvfs *cld)
898 {
899         u8 force_out_min = 0;
900         int force_mv_min = cld->p_data->pmu_undershoot_gb;
901
902         if (!force_mv_min) {
903                 cld->force_out_min = 0;
904                 return;
905         }
906
907         if (cld->therm_floor_idx < cld->therm_floors_num)
908                 force_out_min = cld->thermal_out_floors[cld->therm_floor_idx];
909         force_mv_min += cld->out_map[force_out_min]->reg_uV / 1000;
910         force_out_min = find_mv_out_cap(cld, force_mv_min);
911         if (force_out_min == cld->safe_output)
912                 force_out_min++;
913         cld->force_out_min = force_out_min;
914 }
915
916 static struct voltage_reg_map *find_vdd_map_entry(
917         struct tegra_cl_dvfs *cld, int mV, bool exact)
918 {
919         int i, reg_mV;
920
921         for (i = 0; i < cld->p_data->vdd_map_size; i++) {
922                 /* round down to 1mV */
923                 reg_mV = cld->p_data->vdd_map[i].reg_uV / 1000;
924                 if (mV <= reg_mV)
925                         break;
926         }
927
928         if (i < cld->p_data->vdd_map_size) {
929                 if (!exact || (mV == reg_mV))
930                         return &cld->p_data->vdd_map[i];
931         }
932         return NULL;
933 }
934
935 static void cl_dvfs_init_maps(struct tegra_cl_dvfs *cld)
936 {
937         int i, j, v, v_max, n;
938         const int *millivolts;
939         struct voltage_reg_map *m;
940
941         BUILD_BUG_ON(MAX_CL_DVFS_VOLTAGES > OUT_MASK + 1);
942
943         n = cld->safe_dvfs->num_freqs;
944         BUG_ON(n >= MAX_CL_DVFS_VOLTAGES);
945
946         millivolts = cld->safe_dvfs->dfll_millivolts;
947         v_max = millivolts[n - 1];
948
949         v = cld->safe_dvfs->dfll_data.min_millivolts;
950         BUG_ON(v > millivolts[0]);
951
952         cld->out_map[0] = find_vdd_map_entry(cld, v, true);
953         BUG_ON(!cld->out_map[0]);
954
955         for (i = 0, j = 1; i < n; i++) {
956                 for (;;) {
957                         v += max(1, (v_max - v) / (MAX_CL_DVFS_VOLTAGES - j));
958                         if (v >= millivolts[i])
959                                 break;
960
961                         m = find_vdd_map_entry(cld, v, false);
962                         BUG_ON(!m);
963                         if (m != cld->out_map[j - 1])
964                                 cld->out_map[j++] = m;
965                 }
966
967                 v = (j == MAX_CL_DVFS_VOLTAGES - 1) ? v_max : millivolts[i];
968                 m = find_vdd_map_entry(cld, v, true);
969                 BUG_ON(!m);
970                 if (m != cld->out_map[j - 1])
971                         cld->out_map[j++] = m;
972                 if (is_i2c(cld)) {
973                         cld->clk_dvfs_map[i] = j - 1;
974                 } else {
975                         cld->clk_dvfs_map[i] = cld->out_map[j - 1]->reg_value;
976                         BUG_ON(cld->clk_dvfs_map[i] > OUT_MASK + 1);
977                 }
978
979                 if (v >= v_max)
980                         break;
981         }
982         cld->num_voltages = j;
983 }
984
985 static void cl_dvfs_init_tuning_thresholds(struct tegra_cl_dvfs *cld)
986 {
987         int mv;
988
989         /*
990          * Convert high tuning voltage threshold into output LUT index, and
991          * add necessary margin.  If voltage threshold is outside operating
992          * range set it at maximum output level to effectively disable tuning
993          * parameters adjustment.
994          */
995         cld->tune_high_out_min = get_output_top(cld);
996         cld->tune_high_out_start = cld->tune_high_out_min;
997         mv = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
998         if (mv >= cld->safe_dvfs->dfll_data.min_millivolts) {
999                 int margin = cld->safe_dvfs->dfll_data.tune_high_margin_mv ? :
1000                                 CL_DVFS_TUNE_HIGH_MARGIN_MV;
1001                 u8 out_min = find_mv_out_cap(cld, mv);
1002                 u8 out_start = find_mv_out_cap(cld, mv + margin);
1003                 out_start = max(out_start, (u8)(out_min + 1));
1004                 if (out_start < get_output_top(cld)) {
1005                         cld->tune_high_out_min = out_min;
1006                         cld->tune_high_out_start = out_start;
1007                         if (cld->minimax_output <= out_start)
1008                                 cld->minimax_output = out_start + 1;
1009                 }
1010         }
1011 }
1012
1013 static void cl_dvfs_init_hot_output_cap(struct tegra_cl_dvfs *cld)
1014 {
1015         int i;
1016         if (!cld->safe_dvfs->dvfs_rail->therm_mv_caps ||
1017             !cld->safe_dvfs->dvfs_rail->therm_mv_caps_num)
1018                 return;
1019
1020         if (!cld->safe_dvfs->dvfs_rail->vmax_cdev)
1021                 WARN(1, "%s: missing dfll cap cooling device\n",
1022                      cld->safe_dvfs->dvfs_rail->reg_id);
1023         /*
1024          * Convert monotonically decreasing thermal caps at high temperature
1025          * into output LUT indexes; make sure there is a room for regulation
1026          * below minimum thermal cap.
1027          */
1028         cld->therm_caps_num = cld->safe_dvfs->dvfs_rail->therm_mv_caps_num;
1029         for (i = 0; i < cld->therm_caps_num; i++) {
1030                 cld->thermal_out_caps[i] = find_mv_out_floor(
1031                         cld, cld->safe_dvfs->dvfs_rail->therm_mv_caps[i]);
1032         }
1033         BUG_ON(cld->thermal_out_caps[cld->therm_caps_num - 1] <
1034                cld->minimax_output);
1035 }
1036
1037 static void cl_dvfs_init_cold_output_floor(struct tegra_cl_dvfs *cld)
1038 {
1039         int i;
1040         if (!cld->safe_dvfs->dvfs_rail->therm_mv_floors ||
1041             !cld->safe_dvfs->dvfs_rail->therm_mv_floors_num)
1042                 return;
1043
1044         if (!cld->safe_dvfs->dvfs_rail->vmin_cdev)
1045                 WARN(1, "%s: missing dfll floor cooling device\n",
1046                      cld->safe_dvfs->dvfs_rail->reg_id);
1047         /*
1048          * Convert monotonically decreasing thermal floors at low temperature
1049          * into output LUT indexes; make sure there is a room for regulation
1050          * above maximum thermal floor.
1051          */
1052         cld->therm_floors_num = cld->safe_dvfs->dvfs_rail->therm_mv_floors_num;
1053         for (i = 0; i < cld->therm_floors_num; i++) {
1054                 cld->thermal_out_floors[i] = find_mv_out_cap(
1055                         cld, cld->safe_dvfs->dvfs_rail->therm_mv_floors[i]);
1056         }
1057         BUG_ON(cld->thermal_out_floors[0] + 1 >= get_output_top(cld));
1058         if (cld->minimax_output <= cld->thermal_out_floors[0])
1059                 cld->minimax_output = cld->thermal_out_floors[0] + 1;
1060 }
1061
1062 static void cl_dvfs_init_output_thresholds(struct tegra_cl_dvfs *cld)
1063 {
1064         cld->minimax_output = 0;
1065         cl_dvfs_init_tuning_thresholds(cld);
1066         cl_dvfs_init_cold_output_floor(cld);
1067
1068         /* make sure safe output is safe at any temperature */
1069         cld->safe_output = cld->thermal_out_floors[0] ? :
1070                 get_output_bottom(cld) + 1;
1071         if (cld->minimax_output <= cld->safe_output)
1072                 cld->minimax_output = cld->safe_output + 1;
1073
1074         /* init caps after minimax output is determined */
1075         cl_dvfs_init_hot_output_cap(cld);
1076 }
1077
1078 static void cl_dvfs_init_pwm_if(struct tegra_cl_dvfs *cld)
1079 {
1080         u32 val, div;
1081         struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
1082         bool delta_mode = p_data->u.pmu_pwm.delta_mode;
1083
1084         div = GET_DIV(cld->ref_rate, p_data->u.pmu_pwm.pwm_rate, 1);
1085
1086         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
1087         val |= delta_mode ? CL_DVFS_OUTPUT_CFG_PWM_DELTA : 0;
1088         val |= (div << CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT) &
1089                 CL_DVFS_OUTPUT_CFG_PWM_DIV_MASK;
1090
1091         /*
1092          * Different ways to enable/disable PWM depending on board design:
1093          * a) Use native CL-DVFS output configuration PWM_ENABLE control
1094          * b) Use gpio control of external buffer (out_gpio is populated)
1095          * c) Use tristate PWM pingroup control (pwm_pingroup is populated)
1096          * in cases (b) and (c) keep CL-DVFS native control always enabled
1097          */
1098         if (p_data->u.pmu_pwm.out_gpio || p_data->u.pmu_pwm.pwm_pingroup)
1099                 val |= CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
1100
1101         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
1102         cl_dvfs_wmb(cld);
1103 }
1104
1105 static void cl_dvfs_init_i2c_if(struct tegra_cl_dvfs *cld)
1106 {
1107         u32 val, div;
1108         struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
1109         bool hs_mode = p_data->u.pmu_i2c.hs_rate;
1110
1111         /* PMU slave address, vdd register offset, and transfer mode */
1112         val = p_data->u.pmu_i2c.slave_addr << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT;
1113         if (p_data->u.pmu_i2c.addr_10)
1114                 val |= CL_DVFS_I2C_CFG_SLAVE_ADDR_10;
1115         if (hs_mode) {
1116                 val |= p_data->u.pmu_i2c.hs_master_code <<
1117                         CL_DVFS_I2C_CFG_HS_CODE_SHIFT;
1118                 val |= CL_DVFS_I2C_CFG_PACKET_ENABLE;
1119         }
1120         val |= CL_DVFS_I2C_CFG_SIZE_MASK;
1121         val |= CL_DVFS_I2C_CFG_ARB_ENABLE;
1122         cl_dvfs_writel(cld, val, CL_DVFS_I2C_CFG);
1123         cl_dvfs_writel(cld, p_data->u.pmu_i2c.reg, CL_DVFS_I2C_VDD_REG_ADDR);
1124
1125
1126         val = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.fs_rate, 8);
1127         BUG_ON(!val || (val > CL_DVFS_I2C_CLK_DIVISOR_MASK));
1128         val = (val - 1) << CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT;
1129         if (hs_mode) {
1130                 div = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.hs_rate, 12);
1131                 BUG_ON(!div || (div > CL_DVFS_I2C_CLK_DIVISOR_MASK));
1132         } else {
1133                 div = 2;        /* default hs divisor just in case */
1134         }
1135         val |= (div - 1) << CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT;
1136         cl_dvfs_writel(cld, val, CL_DVFS_I2C_CLK_DIVISOR);
1137         cl_dvfs_wmb(cld);
1138 }
1139
1140 static void cl_dvfs_init_out_if(struct tegra_cl_dvfs *cld)
1141 {
1142         u32 val, out_min, out_max;
1143
1144         /*
1145          * Disable output, and set safe voltage and output limits;
1146          * disable and clear limit interrupts.
1147          */
1148         cld->tune_state = TEGRA_CL_DVFS_TUNE_LOW;
1149         cld->therm_cap_idx = cld->therm_caps_num;
1150         cld->therm_floor_idx = 0;
1151         cl_dvfs_set_dvco_rate_min(cld);
1152         cl_dvfs_set_force_out_min(cld);
1153
1154         if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
1155                 /*
1156                  * If h/w supports dynamic chanage of output register, limit
1157                  * LUT * index range using cl_dvfs h/w controls, and load full
1158                  * range LUT table once.
1159                  */
1160                 out_min = get_output_min(cld);
1161                 out_max = get_output_cap(cld, NULL);
1162                 cld->lut_min = get_output_bottom(cld);
1163                 cld->lut_max = get_output_top(cld);
1164         } else {
1165                 /* LUT available only for I2C, no dynamic config WAR for PWM */
1166                 BUG_ON(!is_i2c(cld));
1167
1168                 /*
1169                  * Allow the entire range of LUT indexes, but limit output
1170                  * voltage in LUT mapping (this "indirect" application of limits
1171                  * is used, because h/w does not support dynamic change of index
1172                  * limits, but dynamic reload of LUT is fine).
1173                  */
1174                 out_min = get_output_bottom(cld);
1175                 out_max = get_output_top(cld);
1176                 cld->lut_min = get_output_min(cld);
1177                 cld->lut_max = get_output_cap(cld, NULL);
1178         }
1179
1180         val = (cld->safe_output << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT) |
1181                 (out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT) |
1182                 (out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT);
1183         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
1184         cl_dvfs_wmb(cld);
1185
1186         cl_dvfs_writel(cld, 0, CL_DVFS_OUTPUT_FORCE);
1187         cl_dvfs_writel(cld, 0, CL_DVFS_INTR_EN);
1188         cl_dvfs_writel(cld, CL_DVFS_INTR_MAX_MASK | CL_DVFS_INTR_MIN_MASK,
1189                        CL_DVFS_INTR_STS);
1190
1191         /* fill in LUT table */
1192         if (is_i2c(cld))
1193                 cl_dvfs_load_lut(cld);
1194
1195         if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
1196                 /* dynamic update of output register allowed - no need to reload
1197                    lut - use lut limits as output register setting shadow */
1198                 cld->lut_min = out_min;
1199                 cld->lut_max = out_max;
1200         }
1201
1202         /* configure transport */
1203         if (is_i2c(cld))
1204                 cl_dvfs_init_i2c_if(cld);
1205         else
1206                 cl_dvfs_init_pwm_if(cld);
1207 }
1208
1209 static void cl_dvfs_init_cntrl_logic(struct tegra_cl_dvfs *cld)
1210 {
1211         u32 val;
1212         struct tegra_cl_dvfs_cfg_param *param = cld->p_data->cfg_param;
1213
1214         /* configure mode, control loop parameters, DFLL tuning */
1215         set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1216
1217         val = GET_DIV(cld->ref_rate, param->sample_rate, 32);
1218         BUG_ON(val > CL_DVFS_CONFIG_DIV_MASK);
1219         cl_dvfs_writel(cld, val, CL_DVFS_CONFIG);
1220
1221         val = (param->force_mode << CL_DVFS_PARAMS_FORCE_MODE_SHIFT) |
1222                 (param->cf << CL_DVFS_PARAMS_CF_PARAM_SHIFT) |
1223                 (param->ci << CL_DVFS_PARAMS_CI_PARAM_SHIFT) |
1224                 ((u8)param->cg << CL_DVFS_PARAMS_CG_PARAM_SHIFT) |
1225                 (param->cg_scale ? CL_DVFS_PARAMS_CG_SCALE : 0);
1226         cl_dvfs_writel(cld, val, CL_DVFS_PARAMS);
1227
1228         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
1229         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune1, CL_DVFS_TUNE1);
1230         cl_dvfs_wmb(cld);
1231         if (cld->safe_dvfs->dfll_data.tune_trimmers)
1232                 cld->safe_dvfs->dfll_data.tune_trimmers(false);
1233
1234         /* configure droop (skipper 1) and scale (skipper 2) */
1235         val = GET_DROOP_FREQ(cld->safe_dvfs->dfll_data.droop_rate_min,
1236                         cld->ref_rate) << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT;
1237         BUG_ON(val > CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK);
1238         val |= (param->droop_cut_value << CL_DVFS_DROOP_CTRL_CUT_SHIFT);
1239         val |= (param->droop_restore_ramp << CL_DVFS_DROOP_CTRL_RAMP_SHIFT);
1240         cl_dvfs_writel(cld, val, CL_DVFS_DROOP_CTRL);
1241
1242         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ) &
1243                 CL_DVFS_FREQ_REQ_SCALE_MASK;
1244         cld->last_req.scale = val >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1245         cld->last_req.cap = 0;
1246         cld->last_req.freq = 0;
1247         cld->last_req.output = 0;
1248         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
1249         cl_dvfs_writel(cld, param->scale_out_ramp, CL_DVFS_SCALE_RAMP);
1250
1251         /* select frequency for monitoring */
1252         cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ, CL_DVFS_MONITOR_CTRL);
1253         cl_dvfs_wmb(cld);
1254 }
1255
1256 static int cl_dvfs_enable_clocks(struct tegra_cl_dvfs *cld)
1257 {
1258         if (is_i2c(cld))
1259                 clk_enable(cld->i2c_clk);
1260
1261         clk_enable(cld->ref_clk);
1262         clk_enable(cld->soc_clk);
1263         return 0;
1264 }
1265
1266 static void cl_dvfs_disable_clocks(struct tegra_cl_dvfs *cld)
1267 {
1268         if (is_i2c(cld))
1269                 clk_disable(cld->i2c_clk);
1270
1271         clk_disable(cld->ref_clk);
1272         clk_disable(cld->soc_clk);
1273 }
1274
1275 static int cl_dvfs_init(struct tegra_cl_dvfs *cld)
1276 {
1277         int ret;
1278
1279         /* Enable output inerface clock */
1280         if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C) {
1281                 ret = clk_enable(cld->i2c_clk);
1282                 if (ret) {
1283                         pr_err("%s: Failed to enable %s\n",
1284                                __func__, cld->i2c_clk->name);
1285                         return ret;
1286                 }
1287                 cld->i2c_rate = clk_get_rate(cld->i2c_clk);
1288         } else if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_PWM) {
1289                 int gpio = cld->p_data->u.pmu_pwm.out_gpio;
1290                 int flags = cld->p_data->u.pmu_pwm.out_enable_high ?
1291                         GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH;
1292                 if (gpio && gpio_request_one(gpio, flags, "cl_dvfs_pwm")) {
1293                         pr_err("%s: Failed to request pwm gpio %d\n",
1294                                __func__, gpio);
1295                         return -EPERM;
1296                 }
1297         } else {
1298                 pr_err("%s: unknown PMU interface\n", __func__);
1299                 return -EINVAL;
1300         }
1301
1302         /* Enable module clocks, release control logic reset */
1303         ret = clk_enable(cld->ref_clk);
1304         if (ret) {
1305                 pr_err("%s: Failed to enable %s\n",
1306                        __func__, cld->ref_clk->name);
1307                 return ret;
1308         }
1309         ret = clk_enable(cld->soc_clk);
1310         if (ret) {
1311                 pr_err("%s: Failed to enable %s\n",
1312                        __func__, cld->ref_clk->name);
1313                 return ret;
1314         }
1315         cld->ref_rate = clk_get_rate(cld->ref_clk);
1316         BUG_ON(!cld->ref_rate);
1317
1318         /* init tuning timer */
1319         init_timer(&cld->tune_timer);
1320         cld->tune_timer.function = tune_timer_cb;
1321         cld->tune_timer.data = (unsigned long)cld;
1322         cld->tune_delay = usecs_to_jiffies(CL_DVFS_TUNE_HIGH_DELAY);
1323
1324         /* init calibration timer */
1325         init_timer_deferrable(&cld->calibration_timer);
1326         cld->calibration_timer.function = calibration_timer_cb;
1327         cld->calibration_timer.data = (unsigned long)cld;
1328         cld->calibration_delay = usecs_to_jiffies(CL_DVFS_CALIBR_TIME);
1329
1330         /* Get ready ouput voltage mapping*/
1331         cl_dvfs_init_maps(cld);
1332
1333         /* Setup output range thresholds */
1334         cl_dvfs_init_output_thresholds(cld);
1335
1336         /* Setup PMU interface */
1337         cl_dvfs_init_out_if(cld);
1338
1339         /* Configure control registers in disabled mode and disable clocks */
1340         cl_dvfs_init_cntrl_logic(cld);
1341         cl_dvfs_disable_clocks(cld);
1342
1343         return 0;
1344 }
1345
1346 /*
1347  * Re-initialize and enable target device clock in open loop mode. Called
1348  * directly from SoC clock resume syscore operation. Closed loop will be
1349  * re-entered in platform syscore ops as well.
1350  */
1351 void tegra_cl_dvfs_resume(struct tegra_cl_dvfs *cld)
1352 {
1353         enum tegra_cl_dvfs_ctrl_mode mode = cld->mode;
1354         struct dfll_rate_req req = cld->last_req;
1355
1356         cl_dvfs_enable_clocks(cld);
1357
1358         /* Setup PMU interface, and configure controls in disabled mode */
1359         cl_dvfs_init_out_if(cld);
1360         cl_dvfs_init_cntrl_logic(cld);
1361
1362         /* Restore force output */
1363         cl_dvfs_writel(cld, cld->suspended_force_out, CL_DVFS_OUTPUT_FORCE);
1364
1365         cl_dvfs_disable_clocks(cld);
1366
1367         /* Restore last request and mode */
1368         cld->last_req = req;
1369         if (mode != TEGRA_CL_DVFS_DISABLED) {
1370                 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1371                 WARN(mode > TEGRA_CL_DVFS_OPEN_LOOP,
1372                      "DFLL was left locked in suspend\n");
1373         }
1374 }
1375
1376 #ifdef CONFIG_THERMAL
1377 /* cl_dvfs cap cooling device */
1378 static int tegra_cl_dvfs_get_vmax_cdev_max_state(
1379         struct thermal_cooling_device *cdev, unsigned long *max_state)
1380 {
1381         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1382         *max_state = cld->therm_caps_num;
1383         return 0;
1384 }
1385
1386 static int tegra_cl_dvfs_get_vmax_cdev_cur_state(
1387         struct thermal_cooling_device *cdev, unsigned long *cur_state)
1388 {
1389         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1390         *cur_state = cld->therm_cap_idx;
1391         return 0;
1392 }
1393
1394 static int tegra_cl_dvfs_set_vmax_cdev_state(
1395         struct thermal_cooling_device *cdev, unsigned long cur_state)
1396 {
1397         unsigned long flags;
1398         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1399
1400         clk_lock_save(cld->dfll_clk, &flags);
1401
1402         if (cld->therm_cap_idx != cur_state) {
1403                 cld->therm_cap_idx = cur_state;
1404                 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1405                         tegra_cl_dvfs_request_rate(cld,
1406                                 tegra_cl_dvfs_request_get(cld));
1407                 }
1408         }
1409         clk_unlock_restore(cld->dfll_clk, &flags);
1410         return 0;
1411 }
1412
1413 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmax_cool_ops = {
1414         .get_max_state = tegra_cl_dvfs_get_vmax_cdev_max_state,
1415         .get_cur_state = tegra_cl_dvfs_get_vmax_cdev_cur_state,
1416         .set_cur_state = tegra_cl_dvfs_set_vmax_cdev_state,
1417 };
1418
1419 /* cl_dvfs vmin cooling device */
1420 static int tegra_cl_dvfs_get_vmin_cdev_max_state(
1421         struct thermal_cooling_device *cdev, unsigned long *max_state)
1422 {
1423         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1424         *max_state = cld->therm_floors_num;
1425         return 0;
1426 }
1427
1428 static int tegra_cl_dvfs_get_vmin_cdev_cur_state(
1429         struct thermal_cooling_device *cdev, unsigned long *cur_state)
1430 {
1431         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1432         *cur_state = cld->therm_floor_idx;
1433         return 0;
1434 }
1435
1436 static int tegra_cl_dvfs_set_vmin_cdev_state(
1437         struct thermal_cooling_device *cdev, unsigned long cur_state)
1438 {
1439         unsigned long flags;
1440         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1441
1442         clk_lock_save(cld->dfll_clk, &flags);
1443
1444         if (cld->therm_floor_idx != cur_state) {
1445                 cld->therm_floor_idx = cur_state;
1446                 cl_dvfs_set_dvco_rate_min(cld);
1447                 cl_dvfs_set_force_out_min(cld);
1448                 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1449                         tegra_cl_dvfs_request_rate(cld,
1450                                 tegra_cl_dvfs_request_get(cld));
1451                 }
1452         }
1453         clk_unlock_restore(cld->dfll_clk, &flags);
1454         return 0;
1455 }
1456
1457 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmin_cool_ops = {
1458         .get_max_state = tegra_cl_dvfs_get_vmin_cdev_max_state,
1459         .get_cur_state = tegra_cl_dvfs_get_vmin_cdev_cur_state,
1460         .set_cur_state = tegra_cl_dvfs_set_vmin_cdev_state,
1461 };
1462
1463 static void tegra_cl_dvfs_init_cdev(struct work_struct *work)
1464 {
1465         struct tegra_cl_dvfs *cld = container_of(
1466                 work, struct tegra_cl_dvfs, init_cdev_work);
1467
1468         /* just report error - initialized at WC temperature, anyway */
1469         if (cld->safe_dvfs->dvfs_rail->vmin_cdev) {
1470                 char *type = cld->safe_dvfs->dvfs_rail->vmin_cdev->cdev_type;
1471                 cld->vmin_cdev = thermal_cooling_device_register(
1472                         type, (void *)cld, &tegra_cl_dvfs_vmin_cool_ops);
1473                 if (IS_ERR_OR_NULL(cld->vmin_cdev)) {
1474                         cld->vmin_cdev = NULL;
1475                         pr_err("tegra cooling device %s failed to register\n",
1476                                type);
1477                         return;
1478                 }
1479                 pr_info("%s cooling device is registered\n", type);
1480         }
1481
1482         if (cld->safe_dvfs->dvfs_rail->vmax_cdev) {
1483                 char *type = cld->safe_dvfs->dvfs_rail->vmax_cdev->cdev_type;
1484                 cld->vmax_cdev = thermal_cooling_device_register(
1485                         type, (void *)cld, &tegra_cl_dvfs_vmax_cool_ops);
1486                 if (IS_ERR_OR_NULL(cld->vmax_cdev)) {
1487                         cld->vmax_cdev = NULL;
1488                         pr_err("tegra cooling device %s failed to register\n",
1489                                type);
1490                         return;
1491                 }
1492                 pr_info("%s cooling device is registered\n", type);
1493         }
1494 }
1495 #endif
1496
1497 #ifdef CONFIG_PM_SLEEP
1498 /*
1499  * cl_dvfs controls clock/voltage to other devices, including CPU. Therefore,
1500  * cl_dvfs driver pm suspend callback does not stop cl-dvfs operations. It is
1501  * only used to enforce cold/hot volatge limit, since temperature may change in
1502  * suspend without waking up. The correct temperature zone after supend will
1503  * be updated via cl_dvfs cooling device interface during resume of temperature
1504  * sensor.
1505  */
1506 static int tegra_cl_dvfs_suspend_cl(struct device *dev)
1507 {
1508         unsigned long flags;
1509         struct tegra_cl_dvfs *cld = dev_get_drvdata(dev);
1510
1511         clk_lock_save(cld->dfll_clk, &flags);
1512         if (cld->vmax_cdev)
1513                 cld->vmax_cdev->updated = false;
1514         cld->therm_cap_idx = cld->therm_caps_num;
1515         if (cld->vmin_cdev)
1516                 cld->vmin_cdev->updated = false;
1517         cld->therm_floor_idx = 0;
1518         cl_dvfs_set_dvco_rate_min(cld);
1519         cl_dvfs_set_force_out_min(cld);
1520         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1521                 set_cl_config(cld, &cld->last_req);
1522                 set_request(cld, &cld->last_req);
1523         }
1524         cld->suspended_force_out = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
1525         clk_unlock_restore(cld->dfll_clk, &flags);
1526
1527         return 0;
1528 }
1529
1530 static const struct dev_pm_ops tegra_cl_dvfs_pm_ops = {
1531         .suspend = tegra_cl_dvfs_suspend_cl,
1532 };
1533 #endif
1534
1535 /*
1536  * These dfll bypass APIs provide direct access to force output register.
1537  * Set operation always updates force value, but applies it only in open loop,
1538  * or disabled mode. Get operation returns force value back if it is applied,
1539  * and return monitored output, otherwise. Hence, get value matches real output
1540  * in any mode.
1541  */
1542 static int tegra_cl_dvfs_force_output(void *data, unsigned int out_sel)
1543 {
1544         u32 val;
1545         unsigned long flags;
1546         struct tegra_cl_dvfs *cld = data;
1547
1548         if (out_sel > OUT_MASK)
1549                 return -EINVAL;
1550
1551         clk_lock_save(cld->dfll_clk, &flags);
1552
1553         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
1554         val = (val & CL_DVFS_OUTPUT_FORCE_ENABLE) | out_sel;
1555         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
1556         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
1557
1558         if ((cld->mode < TEGRA_CL_DVFS_CLOSED_LOOP) &&
1559             !(val & CL_DVFS_OUTPUT_FORCE_ENABLE)) {
1560                 val |= CL_DVFS_OUTPUT_FORCE_ENABLE;
1561                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
1562                 cl_dvfs_wmb(cld);
1563                 output_enable(cld);
1564         }
1565
1566         clk_unlock_restore(cld->dfll_clk, &flags);
1567         return 0;
1568 }
1569
1570 static unsigned int tegra_cl_dvfs_get_output(void *data)
1571 {
1572         u32 val;
1573         unsigned long flags;
1574         struct tegra_cl_dvfs *cld = data;
1575
1576         clk_lock_save(cld->dfll_clk, &flags);
1577         val = cl_dvfs_get_output(cld);
1578         clk_unlock_restore(cld->dfll_clk, &flags);
1579         return val;
1580 }
1581
1582 static void tegra_cl_dvfs_bypass_dev_register(struct tegra_cl_dvfs *cld,
1583                                               struct platform_device *byp_dev)
1584 {
1585         struct tegra_dfll_bypass_platform_data *p_data =
1586                 byp_dev->dev.platform_data;
1587         p_data->set_bypass_sel = tegra_cl_dvfs_force_output;
1588         p_data->get_bypass_sel = tegra_cl_dvfs_get_output;
1589         p_data->dfll_data = cld;
1590
1591         platform_device_register(byp_dev);
1592 }
1593
1594 static int __init tegra_cl_dvfs_probe(struct platform_device *pdev)
1595 {
1596         int ret;
1597         struct tegra_cl_dvfs_platform_data *p_data;
1598         struct resource *res;
1599         struct tegra_cl_dvfs *cld;
1600         struct clk *ref_clk, *soc_clk, *i2c_clk, *safe_dvfs_clk, *dfll_clk;
1601
1602         /* Get resources */
1603         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1604         if (!res) {
1605                 dev_err(&pdev->dev, "missing register base\n");
1606                 return -ENOMEM;
1607         }
1608
1609         p_data = pdev->dev.platform_data;
1610         if (!p_data || !p_data->cfg_param || !p_data->vdd_map) {
1611                 dev_err(&pdev->dev, "missing platform data\n");
1612                 return -ENODATA;
1613         }
1614
1615         ref_clk = clk_get(&pdev->dev, "ref");
1616         soc_clk = clk_get(&pdev->dev, "soc");
1617         i2c_clk = clk_get(&pdev->dev, "i2c");
1618         safe_dvfs_clk = clk_get(&pdev->dev, "safe_dvfs");
1619         dfll_clk = clk_get(&pdev->dev, p_data->dfll_clk_name);
1620         if (IS_ERR(ref_clk) || IS_ERR(soc_clk) || IS_ERR(i2c_clk)) {
1621                 dev_err(&pdev->dev, "missing control clock\n");
1622                 return -ENODEV;
1623         }
1624         if (IS_ERR(safe_dvfs_clk)) {
1625                 dev_err(&pdev->dev, "missing safe dvfs source clock\n");
1626                 return PTR_ERR(safe_dvfs_clk);
1627         }
1628         if (IS_ERR(dfll_clk)) {
1629                 dev_err(&pdev->dev, "missing target dfll clock\n");
1630                 return PTR_ERR(dfll_clk);
1631         }
1632         if (!safe_dvfs_clk->dvfs || !safe_dvfs_clk->dvfs->dvfs_rail) {
1633                 dev_err(&pdev->dev, "invalid safe dvfs source\n");
1634                 return -EINVAL;
1635         }
1636
1637         /* Allocate cl_dvfs object and populate resource accessors */
1638         cld = kzalloc(sizeof(*cld), GFP_KERNEL);
1639         if (!cld) {
1640                 dev_err(&pdev->dev, "failed to allocate cl_dvfs object\n");
1641                 return -ENOMEM;
1642         }
1643
1644         cld->cl_base = IO_ADDRESS(res->start);
1645         cld->p_data = p_data;
1646         cld->ref_clk = ref_clk;
1647         cld->soc_clk = soc_clk;
1648         cld->i2c_clk = i2c_clk;
1649         cld->dfll_clk = dfll_clk;
1650         cld->safe_dvfs = safe_dvfs_clk->dvfs;
1651 #ifdef CONFIG_THERMAL
1652         INIT_WORK(&cld->init_cdev_work, tegra_cl_dvfs_init_cdev);
1653 #endif
1654         /* Initialize cl_dvfs */
1655         ret = cl_dvfs_init(cld);
1656         if (ret) {
1657                 kfree(cld);
1658                 return ret;
1659         }
1660
1661         platform_set_drvdata(pdev, cld);
1662
1663         /*
1664          *  I2C interface mux is embedded into cl_dvfs h/w, so the attached
1665          *  regulator can be accessed by s/w independently. PWM interface,
1666          *  on the other hand, is accessible solely through cl_dvfs registers.
1667          *  Hence, bypass device is supported in PWM mode only.
1668          */
1669         if ((p_data->pmu_if == TEGRA_CL_DVFS_PMU_PWM) &&
1670             p_data->u.pmu_pwm.dfll_bypass_dev) {
1671                 clk_enable(cld->soc_clk);
1672                 tegra_cl_dvfs_bypass_dev_register(
1673                         cld, p_data->u.pmu_pwm.dfll_bypass_dev);
1674         }
1675
1676         /*
1677          * Schedule cooling device registration as a separate work to address
1678          * the following race: when cl_dvfs is probed the DFLL child clock
1679          * (e.g., CPU) cannot be changed; on the other hand cooling device
1680          * registration will update the entire thermal zone, and may trigger
1681          * rate change of the target clock
1682          */
1683         if (cld->safe_dvfs->dvfs_rail->vmin_cdev ||
1684             cld->safe_dvfs->dvfs_rail->vmax_cdev)
1685                 schedule_work(&cld->init_cdev_work);
1686         return 0;
1687 }
1688
1689 static struct platform_driver tegra_cl_dvfs_driver = {
1690         .driver         = {
1691                 .name   = "tegra_cl_dvfs",
1692                 .owner  = THIS_MODULE,
1693 #ifdef CONFIG_PM_SLEEP
1694                 .pm = &tegra_cl_dvfs_pm_ops,
1695 #endif
1696         },
1697 };
1698
1699 int __init tegra_init_cl_dvfs(void)
1700 {
1701         return platform_driver_probe(&tegra_cl_dvfs_driver,
1702                                      tegra_cl_dvfs_probe);
1703 }
1704
1705 /*
1706  * CL_DVFS states:
1707  *
1708  * - DISABLED: control logic mode - DISABLED, output interface disabled,
1709  *   dfll in reset
1710  * - OPEN_LOOP: control logic mode - OPEN_LOOP, output interface disabled,
1711  *   dfll is running "unlocked"
1712  * - CLOSED_LOOP: control logic mode - CLOSED_LOOP, output interface enabled,
1713  *   dfll is running "locked"
1714  */
1715
1716 /* Switch from any other state to DISABLED state */
1717 void tegra_cl_dvfs_disable(struct tegra_cl_dvfs *cld)
1718 {
1719         switch (cld->mode) {
1720         case TEGRA_CL_DVFS_CLOSED_LOOP:
1721                 WARN(1, "DFLL is disabled directly from closed loop mode\n");
1722                 set_ol_config(cld);
1723                 output_disable_ol_prepare(cld);
1724                 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1725                 output_disable_post_ol(cld);
1726                 invalidate_request(cld);
1727                 cl_dvfs_disable_clocks(cld);
1728                 return;
1729
1730         case TEGRA_CL_DVFS_OPEN_LOOP:
1731                 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1732                 invalidate_request(cld);
1733                 cl_dvfs_disable_clocks(cld);
1734                 return;
1735
1736         default:
1737                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1738                 return;
1739         }
1740 }
1741
1742 /* Switch from DISABLE state to OPEN_LOOP state */
1743 int tegra_cl_dvfs_enable(struct tegra_cl_dvfs *cld)
1744 {
1745         if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
1746                 pr_err("%s: Cannot enable DFLL in %s mode\n",
1747                        __func__, mode_name[cld->mode]);
1748                 return -EPERM;
1749         }
1750
1751         if (cld->mode != TEGRA_CL_DVFS_DISABLED)
1752                 return 0;
1753
1754         cl_dvfs_enable_clocks(cld);
1755         set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1756         return 0;
1757 }
1758
1759 /* Switch from OPEN_LOOP state to CLOSED_LOOP state */
1760 int tegra_cl_dvfs_lock(struct tegra_cl_dvfs *cld)
1761 {
1762         struct dfll_rate_req *req = &cld->last_req;
1763
1764         switch (cld->mode) {
1765         case TEGRA_CL_DVFS_CLOSED_LOOP:
1766                 return 0;
1767
1768         case TEGRA_CL_DVFS_OPEN_LOOP:
1769                 if (req->freq == 0) {
1770                         pr_err("%s: Cannot lock DFLL at rate 0\n", __func__);
1771                         return -EINVAL;
1772                 }
1773
1774                 /*
1775                  * Update control logic setting with last rate request;
1776                  * sync output limits with current tuning and thermal state,
1777                  * enable output and switch to closed loop mode. Make sure
1778                  * forced output does not interfere with closed loop.
1779                  */
1780                 set_cl_config(cld, req);
1781                 output_enable(cld);
1782                 set_mode(cld, TEGRA_CL_DVFS_CLOSED_LOOP);
1783                 set_request(cld, req);
1784                 disable_forced_output(cld);
1785                 calibration_timer_update(cld);
1786                 return 0;
1787
1788         default:
1789                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1790                 pr_err("%s: Cannot lock DFLL in %s mode\n",
1791                        __func__, mode_name[cld->mode]);
1792                 return -EPERM;
1793         }
1794 }
1795
1796 /* Switch from CLOSED_LOOP state to OPEN_LOOP state */
1797 int tegra_cl_dvfs_unlock(struct tegra_cl_dvfs *cld)
1798 {
1799         int ret;
1800
1801         switch (cld->mode) {
1802         case TEGRA_CL_DVFS_CLOSED_LOOP:
1803                 set_ol_config(cld);
1804                 ret = output_disable_ol_prepare(cld);
1805                 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1806                 if (!ret)
1807                         ret = output_disable_post_ol(cld);
1808                 return ret;
1809
1810         case TEGRA_CL_DVFS_OPEN_LOOP:
1811                 return 0;
1812
1813         default:
1814                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1815                 pr_err("%s: Cannot unlock DFLL in %s mode\n",
1816                        __func__, mode_name[cld->mode]);
1817                 return -EPERM;
1818         }
1819 }
1820
1821 /*
1822  * Convert requested rate into the control logic settings. In CLOSED_LOOP mode,
1823  * update new settings immediately to adjust DFLL output rate accordingly.
1824  * Otherwise, just save them until next switch to closed loop.
1825  */
1826 int tegra_cl_dvfs_request_rate(struct tegra_cl_dvfs *cld, unsigned long rate)
1827 {
1828         u32 val;
1829         struct dfll_rate_req req;
1830         req.rate = rate;
1831
1832         if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
1833                 pr_err("%s: Cannot set DFLL rate in %s mode\n",
1834                        __func__, mode_name[cld->mode]);
1835                 return -EPERM;
1836         }
1837
1838         /* Calibrate dfll minimum rate */
1839         cl_dvfs_calibrate(cld);
1840
1841         /* Determine DFLL output scale */
1842         req.scale = SCALE_MAX - 1;
1843         if (rate < cld->dvco_rate_min) {
1844                 int scale = DIV_ROUND_CLOSEST((rate / 1000 * SCALE_MAX),
1845                         (cld->dvco_rate_min / 1000));
1846                 if (!scale) {
1847                         pr_err("%s: Rate %lu is below scalable range\n",
1848                                __func__, rate);
1849                         return -EINVAL;
1850                 }
1851                 req.scale = scale - 1;
1852                 rate = cld->dvco_rate_min;
1853         }
1854
1855         /* Convert requested rate into frequency request and scale settings */
1856         val = GET_REQUEST_FREQ(rate, cld->ref_rate);
1857         if (val > FREQ_MAX) {
1858                 pr_err("%s: Rate %lu is above dfll range\n", __func__, rate);
1859                 return -EINVAL;
1860         }
1861         req.freq = val;
1862         rate = GET_REQUEST_RATE(val, cld->ref_rate);
1863
1864         /* Find safe voltage for requested rate */
1865         if (find_safe_output(cld, rate, &req.output)) {
1866                 pr_err("%s: Failed to find safe output for rate %lu\n",
1867                        __func__, rate);
1868                 return -EINVAL;
1869         }
1870         req.cap = req.output;
1871
1872         /*
1873          * Save validated request, and in CLOSED_LOOP mode actually update
1874          * control logic settings; use request output to set maximum voltage
1875          * limit, but keep one LUT step room above safe voltage
1876          */
1877         cld->last_req = req;
1878
1879         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1880                 set_cl_config(cld, &cld->last_req);
1881                 set_request(cld, &cld->last_req);
1882         }
1883         return 0;
1884 }
1885
1886 unsigned long tegra_cl_dvfs_request_get(struct tegra_cl_dvfs *cld)
1887 {
1888         struct dfll_rate_req *req = &cld->last_req;
1889
1890         /*
1891          * If running below dvco minimum rate with skipper resolution:
1892          * dvco min rate / 256 - return last requested rate rounded to 1kHz.
1893          * If running above dvco minimum, with closed loop resolution:
1894          * ref rate / 2 - return cl_dvfs target rate.
1895          */
1896         if ((req->scale + 1) < SCALE_MAX)
1897                 return req->rate / 1000 * 1000;
1898
1899         return GET_REQUEST_RATE(req->freq, cld->ref_rate);
1900 }
1901
1902 #ifdef CONFIG_DEBUG_FS
1903
1904 static int lock_get(void *data, u64 *val)
1905 {
1906         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1907         *val = cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP;
1908         return 0;
1909 }
1910 static int lock_set(void *data, u64 val)
1911 {
1912         struct clk *c = (struct clk *)data;
1913         return tegra_clk_cfg_ex(c, TEGRA_CLK_DFLL_LOCK, val);
1914 }
1915 DEFINE_SIMPLE_ATTRIBUTE(lock_fops, lock_get, lock_set, "%llu\n");
1916
1917 static int monitor_get(void *data, u64 *val)
1918 {
1919         u32 v, s;
1920         unsigned long flags;
1921         struct clk *c = (struct clk *)data;
1922         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1923
1924         clk_enable(cld->soc_clk);
1925         clk_lock_save(c, &flags);
1926
1927         switch_monitor(cld, CL_DVFS_MONITOR_CTRL_FREQ);
1928
1929         v = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
1930                 CL_DVFS_MONITOR_DATA_MASK;
1931         v = GET_MONITORED_RATE(v, cld->ref_rate);
1932         s = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
1933         s = (s & CL_DVFS_FREQ_REQ_SCALE_MASK) >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1934         *val = (u64)v * (s + 1) / 256;
1935
1936         clk_unlock_restore(c, &flags);
1937         clk_disable(cld->soc_clk);
1938         return 0;
1939 }
1940 DEFINE_SIMPLE_ATTRIBUTE(monitor_fops, monitor_get, NULL, "%llu\n");
1941
1942 static int output_get(void *data, u64 *val)
1943 {
1944         u32 v;
1945         unsigned long flags;
1946         struct clk *c = (struct clk *)data;
1947         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1948
1949         clk_enable(cld->soc_clk);
1950         clk_lock_save(c, &flags);
1951
1952         v = cl_dvfs_get_output(cld);
1953         *val = is_i2c(cld) ? cld->out_map[v]->reg_uV / 1000 :
1954                 cld->p_data->vdd_map[v].reg_uV / 1000;
1955
1956         clk_unlock_restore(c, &flags);
1957         clk_disable(cld->soc_clk);
1958         return 0;
1959 }
1960 DEFINE_SIMPLE_ATTRIBUTE(output_fops, output_get, NULL, "%llu\n");
1961
1962 static int vmax_get(void *data, u64 *val)
1963 {
1964         u32 v;
1965         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1966         v = cld->lut_max;
1967         *val = is_i2c(cld) ? cld->out_map[v]->reg_uV / 1000 :
1968                 cld->p_data->vdd_map[v].reg_uV / 1000;
1969         return 0;
1970 }
1971 DEFINE_SIMPLE_ATTRIBUTE(vmax_fops, vmax_get, NULL, "%llu\n");
1972
1973 static int vmin_get(void *data, u64 *val)
1974 {
1975         u32 v;
1976         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1977         v = cld->lut_min;
1978         *val = is_i2c(cld) ? cld->out_map[v]->reg_uV / 1000 :
1979                 cld->p_data->vdd_map[v].reg_uV / 1000;
1980         return 0;
1981 }
1982 DEFINE_SIMPLE_ATTRIBUTE(vmin_fops, vmin_get, NULL, "%llu\n");
1983
1984 static int tune_high_mv_get(void *data, u64 *val)
1985 {
1986         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1987         *val = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
1988         return 0;
1989 }
1990 static int tune_high_mv_set(void *data, u64 val)
1991 {
1992         unsigned long flags;
1993         struct clk *c = (struct clk *)data;
1994         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1995
1996         clk_lock_save(c, &flags);
1997
1998         cld->safe_dvfs->dfll_data.tune_high_min_millivolts = val;
1999         cl_dvfs_init_output_thresholds(cld);
2000         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
2001                 set_cl_config(cld, &cld->last_req);
2002                 set_request(cld, &cld->last_req);
2003         }
2004
2005         clk_unlock_restore(c, &flags);
2006         return 0;
2007 }
2008 DEFINE_SIMPLE_ATTRIBUTE(tune_high_mv_fops, tune_high_mv_get, tune_high_mv_set,
2009                         "%llu\n");
2010
2011 static int fout_mv_get(void *data, u64 *val)
2012 {
2013         u32 v;
2014         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2015         v = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE) & OUT_MASK;
2016         *val = cld->p_data->vdd_map[v].reg_uV / 1000;
2017         return 0;
2018 }
2019 static int fout_mv_set(void *data, u64 val)
2020 {
2021         u32 v;
2022         unsigned long flags;
2023         struct clk *c = (struct clk *)data;
2024         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2025
2026         /* FIXME: do we need it in i2c mode ? */
2027         if (is_i2c(cld))
2028                 return -ENOSYS;
2029
2030         clk_lock_save(c, &flags);
2031         clk_enable(cld->soc_clk);
2032
2033         v = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
2034         if (val) {
2035                 val = find_mv_out_cap(cld, (int)val);
2036                 v = (v & CL_DVFS_OUTPUT_FORCE_ENABLE) | (u32)val;
2037                 cl_dvfs_writel(cld, v, CL_DVFS_OUTPUT_FORCE);
2038                 cl_dvfs_wmb(cld);
2039
2040                 if (!(v & CL_DVFS_OUTPUT_FORCE_ENABLE)) {
2041                         v |= CL_DVFS_OUTPUT_FORCE_ENABLE;
2042                         cl_dvfs_writel(cld, v, CL_DVFS_OUTPUT_FORCE);
2043                         cl_dvfs_wmb(cld);
2044                 }
2045         } else if (v & CL_DVFS_OUTPUT_FORCE_ENABLE) {
2046                 v &= ~CL_DVFS_OUTPUT_FORCE_ENABLE;
2047                 cl_dvfs_writel(cld, v, CL_DVFS_OUTPUT_FORCE);
2048                 cl_dvfs_wmb(cld);
2049         }
2050
2051         clk_disable(cld->soc_clk);
2052         clk_unlock_restore(c, &flags);
2053         return 0;
2054 }
2055 DEFINE_SIMPLE_ATTRIBUTE(fout_mv_fops, fout_mv_get, fout_mv_set, "%llu\n");
2056
2057 static int fmin_get(void *data, u64 *val)
2058 {
2059         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2060         *val = cld->dvco_rate_min;
2061         return 0;
2062 }
2063 DEFINE_SIMPLE_ATTRIBUTE(dvco_rate_min_fops, fmin_get, NULL, "%llu\n");
2064
2065 static int calibr_delay_get(void *data, u64 *val)
2066 {
2067         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2068         *val = jiffies_to_msecs(cld->calibration_delay);
2069         return 0;
2070 }
2071 static int calibr_delay_set(void *data, u64 val)
2072 {
2073         unsigned long flags;
2074         struct clk *c = (struct clk *)data;
2075         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2076
2077         clk_lock_save(c, &flags);
2078         cld->calibration_delay = msecs_to_jiffies(val);
2079         clk_unlock_restore(c, &flags);
2080         return 0;
2081 }
2082 DEFINE_SIMPLE_ATTRIBUTE(calibr_delay_fops, calibr_delay_get, calibr_delay_set,
2083                         "%llu\n");
2084
2085 static int undershoot_get(void *data, u64 *val)
2086 {
2087         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2088         *val = cld->p_data->pmu_undershoot_gb;
2089         return 0;
2090 }
2091 static int undershoot_set(void *data, u64 val)
2092 {
2093         unsigned long flags;
2094         struct clk *c = (struct clk *)data;
2095         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2096
2097         clk_lock_save(c, &flags);
2098         cld->p_data->pmu_undershoot_gb = val;
2099         cl_dvfs_set_force_out_min(cld);
2100         clk_unlock_restore(c, &flags);
2101         return 0;
2102 }
2103 DEFINE_SIMPLE_ATTRIBUTE(undershoot_fops, undershoot_get, undershoot_set,
2104                         "%llu\n");
2105
2106 static int cl_register_show(struct seq_file *s, void *data)
2107 {
2108         u32 offs;
2109         struct clk *c = s->private;
2110         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2111
2112         clk_enable(cld->soc_clk);
2113
2114         seq_printf(s, "CONTROL REGISTERS:\n");
2115         for (offs = 0; offs <= CL_DVFS_MONITOR_DATA; offs += 4)
2116                 seq_printf(s, "[0x%02x] = 0x%08x\n",
2117                            offs, cl_dvfs_readl(cld, offs));
2118
2119         seq_printf(s, "\nI2C and INTR REGISTERS:\n");
2120         for (offs = CL_DVFS_I2C_CFG; offs <= CL_DVFS_I2C_STS; offs += 4)
2121                 seq_printf(s, "[0x%02x] = 0x%08x\n",
2122                            offs, cl_dvfs_readl(cld, offs));
2123
2124         offs = CL_DVFS_INTR_STS;
2125         seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
2126         offs = CL_DVFS_INTR_EN;
2127         seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
2128
2129         seq_printf(s, "\nLUT:\n");
2130         for (offs = CL_DVFS_OUTPUT_LUT;
2131              offs < CL_DVFS_OUTPUT_LUT + 4 * MAX_CL_DVFS_VOLTAGES;
2132              offs += 4)
2133                 seq_printf(s, "[0x%02x] = 0x%08x\n",
2134                            offs, cl_dvfs_readl(cld, offs));
2135
2136         clk_disable(cld->soc_clk);
2137         return 0;
2138 }
2139
2140 static int cl_register_open(struct inode *inode, struct file *file)
2141 {
2142         return single_open(file, cl_register_show, inode->i_private);
2143 }
2144
2145 static ssize_t cl_register_write(struct file *file,
2146         const char __user *userbuf, size_t count, loff_t *ppos)
2147 {
2148         char buf[80];
2149         u32 offs;
2150         u32 val;
2151         struct clk *c = file->f_path.dentry->d_inode->i_private;
2152         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2153
2154         if (sizeof(buf) <= count)
2155                 return -EINVAL;
2156
2157         if (copy_from_user(buf, userbuf, count))
2158                 return -EFAULT;
2159
2160         /* terminate buffer and trim - white spaces may be appended
2161          *  at the end when invoked from shell command line */
2162         buf[count] = '\0';
2163         strim(buf);
2164
2165         if (sscanf(buf, "[0x%x] = 0x%x", &offs, &val) != 2)
2166                 return -1;
2167
2168         clk_enable(cld->soc_clk);
2169         cl_dvfs_writel(cld, val, offs & (~0x3));
2170         clk_disable(cld->soc_clk);
2171         return count;
2172 }
2173
2174 static const struct file_operations cl_register_fops = {
2175         .open           = cl_register_open,
2176         .read           = seq_read,
2177         .write          = cl_register_write,
2178         .llseek         = seq_lseek,
2179         .release        = single_release,
2180 };
2181
2182 int __init tegra_cl_dvfs_debug_init(struct clk *dfll_clk)
2183 {
2184         struct dentry *cl_dvfs_dentry;
2185
2186         if (!dfll_clk || !dfll_clk->dent || (dfll_clk->state == UNINITIALIZED))
2187                 return 0;
2188
2189         if (!debugfs_create_file("lock", S_IRUGO | S_IWUSR,
2190                 dfll_clk->dent, dfll_clk, &lock_fops))
2191                 goto err_out;
2192
2193         cl_dvfs_dentry = debugfs_create_dir("cl_dvfs", dfll_clk->dent);
2194         if (!cl_dvfs_dentry)
2195                 goto err_out;
2196
2197         if (!debugfs_create_file("monitor", S_IRUGO,
2198                 cl_dvfs_dentry, dfll_clk, &monitor_fops))
2199                 goto err_out;
2200
2201         if (!debugfs_create_file("output_mv", S_IRUGO,
2202                 cl_dvfs_dentry, dfll_clk, &output_fops))
2203                 goto err_out;
2204
2205         if (!debugfs_create_file("vmax_mv", S_IRUGO,
2206                 cl_dvfs_dentry, dfll_clk, &vmax_fops))
2207                 goto err_out;
2208
2209         if (!debugfs_create_file("vmin_mv", S_IRUGO,
2210                 cl_dvfs_dentry, dfll_clk, &vmin_fops))
2211                 goto err_out;
2212
2213         if (!debugfs_create_file("tune_high_mv", S_IRUGO | S_IWUSR,
2214                 cl_dvfs_dentry, dfll_clk, &tune_high_mv_fops))
2215                 goto err_out;
2216
2217         if (!debugfs_create_file("force_out_mv", S_IRUGO,
2218                 cl_dvfs_dentry, dfll_clk, &fout_mv_fops))
2219                 goto err_out;
2220
2221         if (!debugfs_create_file("dvco_min", S_IRUGO,
2222                 cl_dvfs_dentry, dfll_clk, &dvco_rate_min_fops))
2223                 goto err_out;
2224
2225         if (!debugfs_create_file("calibr_delay", S_IRUGO,
2226                 cl_dvfs_dentry, dfll_clk, &calibr_delay_fops))
2227                 goto err_out;
2228
2229         if (!debugfs_create_file("pmu_undershoot_gb", S_IRUGO,
2230                 cl_dvfs_dentry, dfll_clk, &undershoot_fops))
2231                 goto err_out;
2232
2233         if (!debugfs_create_file("registers", S_IRUGO | S_IWUSR,
2234                 cl_dvfs_dentry, dfll_clk, &cl_register_fops))
2235                 goto err_out;
2236
2237         return 0;
2238
2239 err_out:
2240         debugfs_remove_recursive(dfll_clk->dent);
2241         return -ENOMEM;
2242 }
2243 #endif