ARM: tegra12: set CPU rate to 2.2GHz for sku 0x87
[linux-3.10.git] / arch / arm / mach-tegra / tegra_cl_dvfs.c
1 /*
2  * arch/arm/mach-tegra/tegra_cl_dvfs.c
3  *
4  * Copyright (c) 2012-2013 NVIDIA CORPORATION. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/spinlock.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
23 #include <linux/io.h>
24 #include <linux/clk.h>
25 #include <linux/interrupt.h>
26 #include <linux/suspend.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/uaccess.h>
30 #include <linux/module.h>
31 #include <linux/platform_device.h>
32 #include <linux/gpio.h>
33 #include <linux/regulator/tegra-dfll-bypass-regulator.h>
34 #include <linux/tegra-soc.h>
35
36 #include <mach/irqs.h>
37 #include <mach/pinmux.h>
38
39 #include "tegra_cl_dvfs.h"
40 #include "clock.h"
41 #include "dvfs.h"
42 #include "iomap.h"
43 #include "tegra_simon.h"
44
45 #define OUT_MASK                        0x3f
46
47 #define CL_DVFS_CTRL                    0x00
48 #define CL_DVFS_CONFIG                  0x04
49 #define CL_DVFS_CONFIG_DIV_MASK         0xff
50
51 #define CL_DVFS_PARAMS                  0x08
52 #define CL_DVFS_PARAMS_CG_SCALE         (0x1 << 24)
53 #define CL_DVFS_PARAMS_FORCE_MODE_SHIFT 22
54 #define CL_DVFS_PARAMS_FORCE_MODE_MASK  (0x3 << CL_DVFS_PARAMS_FORCE_MODE_SHIFT)
55 #define CL_DVFS_PARAMS_CF_PARAM_SHIFT   16
56 #define CL_DVFS_PARAMS_CF_PARAM_MASK    (0x3f << CL_DVFS_PARAMS_CF_PARAM_SHIFT)
57 #define CL_DVFS_PARAMS_CI_PARAM_SHIFT   8
58 #define CL_DVFS_PARAMS_CI_PARAM_MASK    (0x7 << CL_DVFS_PARAMS_CI_PARAM_SHIFT)
59 #define CL_DVFS_PARAMS_CG_PARAM_SHIFT   0
60 #define CL_DVFS_PARAMS_CG_PARAM_MASK    (0xff << CL_DVFS_PARAMS_CG_PARAM_SHIFT)
61
62 #define CL_DVFS_TUNE0                   0x0c
63 #define CL_DVFS_TUNE1                   0x10
64
65 #define CL_DVFS_FREQ_REQ                0x14
66 #define CL_DVFS_FREQ_REQ_FORCE_ENABLE   (0x1 << 28)
67 #define CL_DVFS_FREQ_REQ_FORCE_SHIFT    16
68 #define CL_DVFS_FREQ_REQ_FORCE_MASK     (0xfff << CL_DVFS_FREQ_REQ_FORCE_SHIFT)
69 #define FORCE_MAX                       2047
70 #define FORCE_MIN                       -2048
71 #define CL_DVFS_FREQ_REQ_SCALE_SHIFT    8
72 #define CL_DVFS_FREQ_REQ_SCALE_MASK     (0xff << CL_DVFS_FREQ_REQ_SCALE_SHIFT)
73 #define SCALE_MAX                       256
74 #define CL_DVFS_FREQ_REQ_FREQ_VALID     (0x1 << 7)
75 #define CL_DVFS_FREQ_REQ_FREQ_SHIFT     0
76 #define CL_DVFS_FREQ_REQ_FREQ_MASK      (0x7f << CL_DVFS_FREQ_REQ_FREQ_SHIFT)
77 #define FREQ_MAX                        127
78
79 #define CL_DVFS_SCALE_RAMP              0x18
80
81 #define CL_DVFS_DROOP_CTRL              0x1c
82 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT 16
83 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK  \
84                 (0xff << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT)
85 #define CL_DVFS_DROOP_CTRL_CUT_SHIFT    8
86 #define CL_DVFS_DROOP_CTRL_CUT_MASK     (0xf << CL_DVFS_DROOP_CTRL_CUT_SHIFT)
87 #define CL_DVFS_DROOP_CTRL_RAMP_SHIFT   0
88 #define CL_DVFS_DROOP_CTRL_RAMP_MASK    (0xff << CL_DVFS_DROOP_CTRL_RAMP_SHIFT)
89
90 #define CL_DVFS_OUTPUT_CFG              0x20
91 #define CL_DVFS_OUTPUT_CFG_I2C_ENABLE   (0x1 << 30)
92 #define CL_DVFS_OUTPUT_CFG_SAFE_SHIFT   24
93 #define CL_DVFS_OUTPUT_CFG_SAFE_MASK    \
94                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT)
95 #define CL_DVFS_OUTPUT_CFG_MAX_SHIFT    16
96 #define CL_DVFS_OUTPUT_CFG_MAX_MASK     \
97                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MAX_SHIFT)
98 #define CL_DVFS_OUTPUT_CFG_MIN_SHIFT    8
99 #define CL_DVFS_OUTPUT_CFG_MIN_MASK     \
100                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MIN_SHIFT)
101 #define CL_DVFS_OUTPUT_CFG_PWM_DELTA    (0x1 << 7)
102 #define CL_DVFS_OUTPUT_CFG_PWM_ENABLE   (0x1 << 6)
103 #define CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT 0
104 #define CL_DVFS_OUTPUT_CFG_PWM_DIV_MASK  \
105                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT)
106
107 #define CL_DVFS_OUTPUT_FORCE            0x24
108 #define CL_DVFS_OUTPUT_FORCE_ENABLE     (0x1 << 6)
109 #define CL_DVFS_OUTPUT_FORCE_VALUE_SHIFT 0
110 #define CL_DVFS_OUTPUT_FORCE_VALUE_MASK  \
111                 (OUT_MASK << CL_DVFS_OUTPUT_FORCE_VALUE_SHIFT)
112
113 #define CL_DVFS_MONITOR_CTRL            0x28
114 #define CL_DVFS_MONITOR_CTRL_DISABLE    0
115 #define CL_DVFS_MONITOR_CTRL_OUT        5
116 #define CL_DVFS_MONITOR_CTRL_FREQ       6
117 #define CL_DVFS_MONITOR_DATA            0x2c
118 #define CL_DVFS_MONITOR_DATA_NEW        (0x1 << 16)
119 #define CL_DVFS_MONITOR_DATA_MASK       0xFFFF
120
121 #define CL_DVFS_I2C_CFG                 0x40
122 #define CL_DVFS_I2C_CFG_ARB_ENABLE      (0x1 << 20)
123 #define CL_DVFS_I2C_CFG_HS_CODE_SHIFT   16
124 #define CL_DVFS_I2C_CFG_HS_CODE_MASK    (0x7 << CL_DVFS_I2C_CFG_HS_CODE_SHIFT)
125 #define CL_DVFS_I2C_CFG_PACKET_ENABLE   (0x1 << 15)
126 #define CL_DVFS_I2C_CFG_SIZE_SHIFT      12
127 #define CL_DVFS_I2C_CFG_SIZE_MASK       (0x7 << CL_DVFS_I2C_CFG_SIZE_SHIFT)
128 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_10   (0x1 << 10)
129 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT 0
130 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_MASK \
131                 (0x3ff << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT)
132
133 #define CL_DVFS_I2C_VDD_REG_ADDR        0x44
134 #define CL_DVFS_I2C_STS                 0x48
135 #define CL_DVFS_I2C_STS_I2C_LAST_SHIFT  1
136 #define CL_DVFS_I2C_STS_I2C_REQ_PENDING 0x1
137
138 #define CL_DVFS_INTR_STS                0x5c
139 #define CL_DVFS_INTR_EN                 0x60
140 #define CL_DVFS_INTR_MIN_MASK           0x1
141 #define CL_DVFS_INTR_MAX_MASK           0x2
142
143 #define CL_DVFS_I2C_CLK_DIVISOR         0x16c
144 #define CL_DVFS_I2C_CLK_DIVISOR_MASK    0xffff
145 #define CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT 16
146 #define CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT 0
147
148 #define CL_DVFS_OUTPUT_LUT              0x200
149
150 #define CL_DVFS_CALIBR_TIME             40000
151 #define CL_DVFS_OUTPUT_PENDING_TIMEOUT  1000
152 #define CL_DVFS_OUTPUT_RAMP_DELAY       100
153 #define CL_DVFS_TUNE_HIGH_DELAY         2000
154
155 #define CL_DVFS_TUNE_HIGH_MARGIN_MV     20
156
157 enum tegra_cl_dvfs_ctrl_mode {
158         TEGRA_CL_DVFS_UNINITIALIZED = 0,
159         TEGRA_CL_DVFS_DISABLED = 1,
160         TEGRA_CL_DVFS_OPEN_LOOP = 2,
161         TEGRA_CL_DVFS_CLOSED_LOOP = 3,
162 };
163
164 enum tegra_cl_dvfs_tune_state {
165         TEGRA_CL_DVFS_TUNE_LOW = 0,
166         TEGRA_CL_DVFS_TUNE_HIGH_REQUEST,
167         TEGRA_CL_DVFS_TUNE_HIGH,
168 };
169
170 struct dfll_rate_req {
171         u8      freq;
172         u8      scale;
173         u8      output;
174         u8      cap;
175         unsigned long rate;
176 };
177
178 struct tegra_cl_dvfs {
179         void                                    *cl_base;
180         void                                    *cl_i2c_base;
181         struct tegra_cl_dvfs_platform_data      *p_data;
182
183         struct dvfs                     *safe_dvfs;
184         struct thermal_cooling_device   *vmax_cdev;
185         struct thermal_cooling_device   *vmin_cdev;
186         struct work_struct              init_cdev_work;
187
188         struct clk                      *soc_clk;
189         struct clk                      *ref_clk;
190         struct clk                      *i2c_clk;
191         struct clk                      *dfll_clk;
192         unsigned long                   ref_rate;
193         unsigned long                   i2c_rate;
194
195         /* output voltage mapping:
196          * legacy dvfs table index -to- cl_dvfs output LUT index
197          * cl_dvfs output LUT index -to- PMU value/voltage pair ptr
198          */
199         u8                              clk_dvfs_map[MAX_DVFS_FREQS];
200         struct voltage_reg_map          *out_map[MAX_CL_DVFS_VOLTAGES];
201         u8                              num_voltages;
202         u8                              safe_output;
203         u8                              tune_high_out_start;
204         u8                              tune_high_out_min;
205         u8                              minimax_output;
206         u8                              thermal_out_caps[MAX_THERMAL_LIMITS];
207         u8                              thermal_out_floors[MAX_THERMAL_LIMITS];
208         int                             therm_caps_num;
209         int                             therm_floors_num;
210         unsigned long                   dvco_rate_floors[MAX_THERMAL_LIMITS+1];
211         unsigned long                   dvco_rate_min;
212
213         u8                              lut_min;
214         u8                              lut_max;
215         u8                              force_out_min;
216         u32                             suspended_force_out;
217         int                             therm_cap_idx;
218         int                             therm_floor_idx;
219         struct dfll_rate_req            last_req;
220         enum tegra_cl_dvfs_tune_state   tune_state;
221         enum tegra_cl_dvfs_ctrl_mode    mode;
222
223         struct timer_list               tune_timer;
224         unsigned long                   tune_delay;
225         struct timer_list               calibration_timer;
226         unsigned long                   calibration_delay;
227         ktime_t                         last_calibration;
228         unsigned long                   calibration_range_min;
229         unsigned long                   calibration_range_max;
230
231         struct notifier_block           simon_grade_nb;
232 };
233
234 /* Conversion macros (different scales for frequency request, and monitored
235    rate is not a typo) */
236 #define RATE_STEP(cld)                          ((cld)->ref_rate / 2)
237 #define GET_REQUEST_FREQ(rate, ref_rate)        ((rate) / ((ref_rate) / 2))
238 #define GET_REQUEST_RATE(freq, ref_rate)        ((freq) * ((ref_rate) / 2))
239 #define GET_MONITORED_RATE(freq, ref_rate)      ((freq) * ((ref_rate) / 4))
240 #define GET_DROOP_FREQ(rate, ref_rate)          ((rate) / ((ref_rate) / 4))
241 #define ROUND_MIN_RATE(rate, ref_rate)          \
242                 (DIV_ROUND_UP(rate, (ref_rate) / 2) * ((ref_rate) / 2))
243 #define GET_DIV(ref_rate, out_rate, scale)      \
244                 DIV_ROUND_UP((ref_rate), (out_rate) * (scale))
245
246 static const char *mode_name[] = {
247         [TEGRA_CL_DVFS_UNINITIALIZED] = "uninitialized",
248         [TEGRA_CL_DVFS_DISABLED] = "disabled",
249         [TEGRA_CL_DVFS_OPEN_LOOP] = "open_loop",
250         [TEGRA_CL_DVFS_CLOSED_LOOP] = "closed_loop",
251 };
252
253 /*
254  * In some h/w configurations CL-DVFS module registers have two different
255  * address bases: one for I2C control/status registers, and one for all other
256  * registers. Registers accessors are separated below accordingly just by
257  * comparing register offset with start of I2C section - CL_DVFS_I2C_CFG. One
258  * special case is CL_DVFS_OUTPUT_CFG register: when I2C controls are separated
259  * I2C_ENABLE bit of this register is accessed from I2C base, and all other bits
260  * are accessed from the main base.
261  */
262 static inline u32 cl_dvfs_i2c_readl(struct tegra_cl_dvfs *cld, u32 offs)
263 {
264         return __raw_readl(cld->cl_i2c_base + offs);
265 }
266 static inline void cl_dvfs_i2c_writel(struct tegra_cl_dvfs *cld,
267                                       u32 val, u32 offs)
268 {
269         __raw_writel(val, cld->cl_i2c_base + offs);
270 }
271 static inline void cl_dvfs_i2c_wmb(struct tegra_cl_dvfs *cld)
272 {
273         wmb();
274         cl_dvfs_i2c_readl(cld, CL_DVFS_I2C_CFG);
275 }
276
277 static inline u32 cl_dvfs_readl(struct tegra_cl_dvfs *cld, u32 offs)
278 {
279         if (offs >= CL_DVFS_I2C_CFG)
280                 return cl_dvfs_i2c_readl(cld, offs);
281         return __raw_readl((void *)cld->cl_base + offs);
282 }
283 static inline void cl_dvfs_writel(struct tegra_cl_dvfs *cld, u32 val, u32 offs)
284 {
285         if (offs >= CL_DVFS_I2C_CFG) {
286                 cl_dvfs_i2c_writel(cld, val, offs);
287                 return;
288         }
289         __raw_writel(val, (void *)cld->cl_base + offs);
290 }
291 static inline void cl_dvfs_wmb(struct tegra_cl_dvfs *cld)
292 {
293         wmb();
294         cl_dvfs_readl(cld, CL_DVFS_CTRL);
295 }
296
297 static inline void switch_monitor(struct tegra_cl_dvfs *cld, u32 selector)
298 {
299         /* delay to make sure selector has switched */
300         cl_dvfs_writel(cld, selector, CL_DVFS_MONITOR_CTRL);
301         cl_dvfs_wmb(cld);
302         udelay(1);
303 }
304
305 static inline void invalidate_request(struct tegra_cl_dvfs *cld)
306 {
307         u32 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
308         val &= ~CL_DVFS_FREQ_REQ_FREQ_VALID;
309         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
310         cl_dvfs_wmb(cld);
311 }
312
313 static inline void disable_forced_output(struct tegra_cl_dvfs *cld)
314 {
315         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
316         val &= ~CL_DVFS_OUTPUT_FORCE_ENABLE;
317         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
318         cl_dvfs_wmb(cld);
319 }
320
321 /*
322  * Reading monitor data concurrently with the update may render intermediate
323  * (neither "old" nor "new") values. Synchronization with the "rising edge"
324  * of DATA_NEW makes it very unlikely, but still possible. Use simple filter:
325  * compare 2 consecutive readings for data consistency within 2 LSb range.
326  * Return error otherwise. On the platform that does not allow to use DATA_NEW
327  * at all check for consistency of consecutive reads is the only protection.
328  */
329 static int filter_monitor_data(struct tegra_cl_dvfs *cld, u32 *data)
330 {
331         u32 val = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
332                 CL_DVFS_MONITOR_DATA_MASK;
333         *data &= CL_DVFS_MONITOR_DATA_MASK;
334         if (abs(*data - val) <= 2)
335                 return 0;
336
337         *data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
338                 CL_DVFS_MONITOR_DATA_MASK;
339         if (abs(*data - val) <= 2)
340                 return 0;
341
342         return -EINVAL;
343 }
344
345 static inline void wait_data_new(struct tegra_cl_dvfs *cld, u32 *data)
346 {
347         cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA); /* clear data new */
348         if (!(cld->p_data->flags & TEGRA_CL_DVFS_DATA_NEW_NO_USE)) {
349                 do {
350                         *data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
351                 } while (!(*data & CL_DVFS_MONITOR_DATA_NEW) &&
352                          (cld->mode > TEGRA_CL_DVFS_DISABLED));
353         }
354 }
355
356 static inline u32 get_last_output(struct tegra_cl_dvfs *cld)
357 {
358         switch_monitor(cld, CL_DVFS_MONITOR_CTRL_OUT);
359         return cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
360                 CL_DVFS_MONITOR_DATA_MASK;
361 }
362
363 /* out monitored before forced value applied - return the latter if enabled */
364 static inline u32 cl_dvfs_get_output(struct tegra_cl_dvfs *cld)
365 {
366         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
367         if (val & CL_DVFS_OUTPUT_FORCE_ENABLE)
368                 return val & OUT_MASK;
369
370         switch_monitor(cld, CL_DVFS_MONITOR_CTRL_OUT);
371         wait_data_new(cld, &val);
372         return filter_monitor_data(cld, &val) ? : val;
373 }
374
375 static inline bool is_i2c(struct tegra_cl_dvfs *cld)
376 {
377         return cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C;
378 }
379
380 static inline u8 get_output_bottom(struct tegra_cl_dvfs *cld)
381 {
382         return is_i2c(cld) ? 0 : cld->out_map[0]->reg_value;
383 }
384
385 static inline u8 get_output_top(struct tegra_cl_dvfs *cld)
386 {
387         return is_i2c(cld) ?  cld->num_voltages - 1 :
388                 cld->out_map[cld->num_voltages - 1]->reg_value;
389 }
390
391 static inline int get_mv(struct tegra_cl_dvfs *cld, u32 out_val)
392 {
393         return is_i2c(cld) ? cld->out_map[out_val]->reg_uV / 1000 :
394                 cld->p_data->vdd_map[out_val].reg_uV / 1000;
395 }
396
397 static int output_enable(struct tegra_cl_dvfs *cld)
398 {
399         if (is_i2c(cld)) {
400                 u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
401                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
402                 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
403                 cl_dvfs_i2c_wmb(cld);
404         } else {
405                 u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
406                 struct tegra_cl_dvfs_platform_data *d = cld->p_data;
407                 if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_BUFFER) {
408                         int gpio = d->u.pmu_pwm.out_gpio;
409                         int v = d->u.pmu_pwm.out_enable_high ? 1 : 0;
410                         __gpio_set_value(gpio, v);
411                         return 0;
412                 }
413
414                 if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_DIRECT) {
415                         int pg = d->u.pmu_pwm.pwm_pingroup;
416                         tegra_pinmux_set_tristate(pg, TEGRA_TRI_NORMAL);
417                         return 0;
418                 }
419
420                 val |= CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
421                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
422                 cl_dvfs_wmb(cld);
423         }
424
425         return  0;
426 }
427
428 static int output_disable_pwm(struct tegra_cl_dvfs *cld)
429 {
430         u32 val;
431         struct tegra_cl_dvfs_platform_data *d = cld->p_data;
432
433         if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_BUFFER) {
434                 int gpio = d->u.pmu_pwm.out_gpio;
435                 int v = d->u.pmu_pwm.out_enable_high ? 0 : 1;
436                 __gpio_set_value(gpio, v);
437                 return 0;
438         }
439
440         if (d->u.pmu_pwm.pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_DIRECT) {
441                 int pg = d->u.pmu_pwm.pwm_pingroup;
442                 tegra_pinmux_set_tristate(pg, TEGRA_TRI_TRISTATE);
443                 return 0;
444         }
445
446         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
447         val &= ~CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
448         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
449         cl_dvfs_wmb(cld);
450         return  0;
451 }
452
453 static noinline int output_flush_disable(struct tegra_cl_dvfs *cld)
454 {
455         int i;
456         u32 sts;
457         u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
458
459         /* Flush transactions in flight, and then disable */
460         for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
461                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
462                 udelay(2);
463                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
464                         sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
465                         if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
466                                 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
467                                 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
468                                 wmb();
469                                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
470                                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
471                                         return 0; /* no pending rqst */
472
473                                 /* Re-enable, continue wait */
474                                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
475                                 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
476                                 wmb();
477                         }
478                 }
479         }
480
481         /* I2C request is still pending - disable, anyway, but report error */
482         val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
483         cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
484         cl_dvfs_i2c_wmb(cld);
485         return -ETIMEDOUT;
486 }
487
488 static noinline int output_disable_flush(struct tegra_cl_dvfs *cld)
489 {
490         int i;
491         u32 sts;
492         u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
493
494         /* Disable output interface right away */
495         val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
496         cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
497         cl_dvfs_i2c_wmb(cld);
498
499         /* Flush possible transaction in flight */
500         for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
501                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
502                 udelay(2);
503                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
504                         sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
505                         if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
506                                 return 0;
507                 }
508         }
509
510         /* I2C request is still pending - report error */
511         return -ETIMEDOUT;
512 }
513
514 static inline int output_disable_ol_prepare(struct tegra_cl_dvfs *cld)
515 {
516         /* PWM output control */
517         if (!is_i2c(cld))
518                 return output_disable_pwm(cld);
519
520         /*
521          * If cl-dvfs h/w does not require output to be quiet before disable,
522          * s/w can stop I2C communications at any time (including operations
523          * in closed loop mode), and I2C bus integrity is guaranteed even in
524          * case of flush timeout.
525          */
526         if (!(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET)) {
527                 int ret = output_disable_flush(cld);
528                 if (ret)
529                         pr_debug("cl_dvfs: I2C pending timeout ol_prepare\n");
530                 return ret;
531         }
532         return 0;
533 }
534
535 static inline int output_disable_post_ol(struct tegra_cl_dvfs *cld)
536 {
537         /* PWM output control */
538         if (!is_i2c(cld))
539                 return 0;
540
541         /*
542          * If cl-dvfs h/w requires output to be quiet before disable, s/w
543          * should stop I2C communications only after the switch to open loop
544          * mode, and I2C bus integrity is not guaranteed in case of flush
545          * timeout
546         */
547         if (cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) {
548                 int ret = output_flush_disable(cld);
549                 if (ret)
550                         pr_err("cl_dvfs: I2C pending timeout post_ol\n");
551                 return ret;
552         }
553         return 0;
554 }
555
556 static inline void set_mode(struct tegra_cl_dvfs *cld,
557                             enum tegra_cl_dvfs_ctrl_mode mode)
558 {
559         cld->mode = mode;
560         cl_dvfs_writel(cld, mode - 1, CL_DVFS_CTRL);
561         cl_dvfs_wmb(cld);
562 }
563
564 static inline u8 get_output_cap(struct tegra_cl_dvfs *cld,
565                                 struct dfll_rate_req *req)
566 {
567         u32 thermal_cap = get_output_top(cld);
568
569         if (cld->therm_cap_idx && (cld->therm_cap_idx <= cld->therm_caps_num))
570                 thermal_cap = cld->thermal_out_caps[cld->therm_cap_idx - 1];
571         if (req && (req->cap < thermal_cap))
572                 return req->cap;
573         return thermal_cap;
574 }
575
576 static inline u8 get_output_min(struct tegra_cl_dvfs *cld)
577 {
578         u32 tune_min = get_output_bottom(cld);
579         u32 thermal_min = tune_min;
580
581         tune_min = cld->tune_state == TEGRA_CL_DVFS_TUNE_LOW ?
582                 tune_min : cld->tune_high_out_min;
583
584         if (cld->therm_floor_idx < cld->therm_floors_num)
585                 thermal_min = cld->thermal_out_floors[cld->therm_floor_idx];
586
587         return max(tune_min, thermal_min);
588 }
589
590 static inline void _load_lut(struct tegra_cl_dvfs *cld)
591 {
592         int i;
593         u32 val;
594
595         val = cld->out_map[cld->lut_min]->reg_value;
596         for (i = 0; i <= cld->lut_min; i++)
597                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
598
599         for (; i < cld->lut_max; i++) {
600                 val = cld->out_map[i]->reg_value;
601                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
602         }
603
604         val = cld->out_map[cld->lut_max]->reg_value;
605         for (; i < cld->num_voltages; i++)
606                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
607
608         cl_dvfs_i2c_wmb(cld);
609 }
610
611 static void cl_dvfs_load_lut(struct tegra_cl_dvfs *cld)
612 {
613         u32 val = cl_dvfs_i2c_readl(cld, CL_DVFS_OUTPUT_CFG);
614         bool disable_out_for_load =
615                 !(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) &&
616                 (val & CL_DVFS_OUTPUT_CFG_I2C_ENABLE);
617
618         if (disable_out_for_load) {
619                 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
620                 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
621                 cl_dvfs_i2c_wmb(cld);
622                 udelay(2); /* 2us (big margin) window for disable propafation */
623         }
624
625         _load_lut(cld);
626
627         if (disable_out_for_load) {
628                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
629                 cl_dvfs_i2c_writel(cld, val, CL_DVFS_OUTPUT_CFG);
630                 cl_dvfs_i2c_wmb(cld);
631         }
632 }
633
634 #define set_tune_state(cld, state) \
635         do {                                                            \
636                 cld->tune_state = state;                                \
637                 pr_debug("%s: set tune state %d\n", __func__, state);   \
638         } while (0)
639
640 static inline void tune_low(struct tegra_cl_dvfs *cld)
641 {
642         /* a must order: 1st tune dfll low, then tune trimmers low */
643         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
644         cl_dvfs_wmb(cld);
645         if (cld->safe_dvfs->dfll_data.tune_trimmers)
646                 cld->safe_dvfs->dfll_data.tune_trimmers(false);
647 }
648
649 static inline void tune_high(struct tegra_cl_dvfs *cld)
650 {
651         /* a must order: 1st tune trimmers high, then tune dfll high */
652         if (cld->safe_dvfs->dfll_data.tune_trimmers)
653                 cld->safe_dvfs->dfll_data.tune_trimmers(true);
654         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0_high_mv,
655                        CL_DVFS_TUNE0);
656         cl_dvfs_wmb(cld);
657 }
658
659 static void set_ol_config(struct tegra_cl_dvfs *cld)
660 {
661         u32 val, out_min;
662
663         /* always tune low (safe) in open loop */
664         if (cld->tune_state != TEGRA_CL_DVFS_TUNE_LOW) {
665                 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
666                 tune_low(cld);
667
668                 out_min = get_output_min(cld);
669                 if (cld->lut_min != out_min) {
670                         cld->lut_min = out_min;
671                         if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
672                                 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
673                                 val &= ~CL_DVFS_OUTPUT_CFG_MIN_MASK;
674                                 val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
675                                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
676                         } else {
677                                 cl_dvfs_load_lut(cld);
678                         }
679                 }
680         }
681
682         /* 1:1 scaling in open loop */
683         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
684         val |= (SCALE_MAX - 1) << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
685         val &= ~CL_DVFS_FREQ_REQ_FORCE_ENABLE;
686         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
687 }
688
689 static void set_cl_config(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
690 {
691         u32 out_max, out_min;
692         u32 out_cap = get_output_cap(cld, req);
693
694         switch (cld->tune_state) {
695         case TEGRA_CL_DVFS_TUNE_LOW:
696                 if (out_cap > cld->tune_high_out_start) {
697                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH_REQUEST);
698                         mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
699                 }
700                 break;
701
702         case TEGRA_CL_DVFS_TUNE_HIGH:
703         case TEGRA_CL_DVFS_TUNE_HIGH_REQUEST:
704                 if (out_cap <= cld->tune_high_out_start) {
705                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
706                         tune_low(cld);
707                 }
708                 break;
709         default:
710                 BUG();
711         }
712
713         out_min = get_output_min(cld);
714         if (out_cap > (out_min + 1))
715                 req->output = out_cap - 1;
716         else
717                 req->output = out_min + 1;
718         if (req->output == cld->safe_output)
719                 req->output++;
720         out_max = max((u8)(req->output + 1), cld->minimax_output);
721         out_max = max((u8)(out_max), cld->force_out_min);
722
723         if ((cld->lut_min != out_min) || (cld->lut_max != out_max)) {
724                 cld->lut_min = out_min;
725                 cld->lut_max = out_max;
726                 if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
727                         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
728                         val &= ~(CL_DVFS_OUTPUT_CFG_MAX_MASK |
729                                  CL_DVFS_OUTPUT_CFG_MIN_MASK);
730                         val |= out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT;
731                         val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
732                         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
733                 } else {
734                         cl_dvfs_load_lut(cld);
735                 }
736         }
737 }
738
739 static void tune_timer_cb(unsigned long data)
740 {
741         unsigned long flags;
742         u32 val, out_min, out_last;
743         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
744
745         clk_lock_save(cld->dfll_clk, &flags);
746
747         if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH_REQUEST) {
748                 out_min = cld->lut_min;
749                 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
750                 out_last = is_i2c(cld) ?
751                         (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK :
752                         out_min; /* no way to stall PWM: out_last >= out_min */
753
754                 if (!(val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) &&
755                     (out_last >= cld->tune_high_out_min)  &&
756                     (out_min >= cld->tune_high_out_min)) {
757                         udelay(CL_DVFS_OUTPUT_RAMP_DELAY);
758                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH);
759                         tune_high(cld);
760                 } else {
761                         mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
762                 }
763         }
764         clk_unlock_restore(cld->dfll_clk, &flags);
765 }
766
767 static inline void calibration_timer_update(struct tegra_cl_dvfs *cld)
768 {
769         if (!cld->calibration_delay)
770                 return;
771         mod_timer(&cld->calibration_timer, jiffies + cld->calibration_delay);
772 }
773
774 static void cl_dvfs_calibrate(struct tegra_cl_dvfs *cld)
775 {
776         u32 val, data;
777         ktime_t now;
778         unsigned long rate;
779         unsigned long step = RATE_STEP(cld);
780         unsigned long rate_min = cld->dvco_rate_min;
781         u8 out_min = get_output_min(cld);
782
783         /*
784          *  Enter calibration procedure only if
785          *  - closed loop operations
786          *  - last request engaged clock skipper
787          *  - at least specified time after the last calibration attempt
788          */
789         if ((cld->mode != TEGRA_CL_DVFS_CLOSED_LOOP) ||
790             (cld->last_req.rate > rate_min))
791                 return;
792
793         now = ktime_get();
794         if (ktime_us_delta(now, cld->last_calibration) < CL_DVFS_CALIBR_TIME)
795                 return;
796         cld->last_calibration = now;
797
798         /* Synchronize with sample period, and get rate measurements */
799         switch_monitor(cld, CL_DVFS_MONITOR_CTRL_FREQ);
800
801         if (cld->p_data->flags & TEGRA_CL_DVFS_DATA_NEW_NO_USE) {
802                 /* Cannot use DATA_NEW synch - get data after one full sample
803                    period (with 10us margin) */
804                 int delay = 1000000 / cld->p_data->cfg_param->sample_rate + 10;
805                 udelay(delay);
806         }
807         wait_data_new(cld, &data);
808         wait_data_new(cld, &data);
809
810         /* Defer calibration if data reading is not consistent */
811         if (filter_monitor_data(cld, &data)) {
812                 calibration_timer_update(cld);
813                 return;
814         }
815
816         if (is_i2c(cld)) {
817                 /* Defer calibration if I2C transaction is pending */
818                 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
819                 if (val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) {
820                         calibration_timer_update(cld);
821                         return;
822                 }
823                 val = (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK;
824         } else {
825                 /* Forced output must be disabled in closed loop mode */
826                 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
827                 if (val & CL_DVFS_OUTPUT_FORCE_ENABLE) {
828                         disable_forced_output(cld);
829                         calibration_timer_update(cld);
830                         return;
831                 }
832                 /* Get last output (there is no such thing as pending PWM) */
833                 val = get_last_output(cld);
834
835                 /* Defer calibration if data reading is not consistent */
836                 if (filter_monitor_data(cld, &val)) {
837                         calibration_timer_update(cld);
838                         return;
839                 }
840         }
841
842         /* Adjust minimum rate */
843         rate = GET_MONITORED_RATE(data, cld->ref_rate);
844         if ((val > out_min) || (rate < (rate_min - step)))
845                 rate_min -= step;
846         else if (rate > (cld->dvco_rate_min + step))
847                 rate_min += step;
848         else {
849                 cld->dvco_rate_floors[cld->therm_floor_idx] = rate_min;
850                 return;
851         }
852
853         cld->dvco_rate_min = clamp(rate_min,
854                         cld->calibration_range_min, cld->calibration_range_max);
855         calibration_timer_update(cld);
856         pr_debug("%s: calibrated dvco_rate_min %lu\n",
857                  __func__, cld->dvco_rate_min);
858 }
859
860 static void calibration_timer_cb(unsigned long data)
861 {
862         unsigned long flags;
863         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
864
865         pr_debug("%s\n", __func__);
866
867         clk_lock_save(cld->dfll_clk, &flags);
868         cl_dvfs_calibrate(cld);
869         clk_unlock_restore(cld->dfll_clk, &flags);
870 }
871
872 static void set_request(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
873 {
874         u32 val, f;
875         int force_val = req->output - cld->safe_output;
876         int coef = 128; /* FIXME: cld->p_data->cfg_param->cg_scale? */;
877
878         /* If going down apply force output floor */
879         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
880         f = (val & CL_DVFS_FREQ_REQ_FREQ_MASK) >> CL_DVFS_FREQ_REQ_FREQ_SHIFT;
881         if ((!(val & CL_DVFS_FREQ_REQ_FREQ_VALID) || (f > req->freq)) &&
882             (cld->force_out_min > req->output))
883                 force_val = cld->force_out_min - cld->safe_output;
884
885         force_val = force_val * coef / cld->p_data->cfg_param->cg;
886         force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
887
888         /*
889          * 1st set new frequency request and force values, then set force enable
890          * bit (if not set already). Use same CL_DVFS_FREQ_REQ register read
891          * (not other cl_dvfs register) plus explicit delay as a fence.
892          */
893         val &= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
894         val |= req->freq << CL_DVFS_FREQ_REQ_FREQ_SHIFT;
895         val |= req->scale << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
896         val |= ((u32)force_val << CL_DVFS_FREQ_REQ_FORCE_SHIFT) &
897                 CL_DVFS_FREQ_REQ_FORCE_MASK;
898         val |= CL_DVFS_FREQ_REQ_FREQ_VALID;
899         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
900         wmb();
901         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
902
903         if (!(val & CL_DVFS_FREQ_REQ_FORCE_ENABLE)) {
904                 udelay(1);  /* 1us (big margin) window for force value settle */
905                 val |= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
906                 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
907                 cl_dvfs_wmb(cld);
908         }
909 }
910
911 static u8 find_mv_out_cap(struct tegra_cl_dvfs *cld, int mv)
912 {
913         u8 cap;
914         int uv;
915
916         for (cap = 0; cap < cld->num_voltages; cap++) {
917                 uv = cld->out_map[cap]->reg_uV;
918                 if (uv >= mv * 1000)
919                         return is_i2c(cld) ? cap : cld->out_map[cap]->reg_value;
920         }
921         return get_output_top(cld);     /* maximum possible output */
922 }
923
924 static u8 find_mv_out_floor(struct tegra_cl_dvfs *cld, int mv)
925 {
926         u8 floor;
927         int uv;
928
929         for (floor = 0; floor < cld->num_voltages; floor++) {
930                 uv = cld->out_map[floor]->reg_uV;
931                 if (uv > mv * 1000) {
932                         if (!floor)     /* minimum possible output */
933                                 return get_output_bottom(cld);
934                         break;
935                 }
936         }
937         return is_i2c(cld) ? floor - 1 : cld->out_map[floor - 1]->reg_value;
938 }
939
940 static int find_safe_output(
941         struct tegra_cl_dvfs *cld, unsigned long rate, u8 *safe_output)
942 {
943         int i;
944         int n = cld->safe_dvfs->num_freqs;
945         unsigned long *freqs = cld->safe_dvfs->freqs;
946
947         for (i = 0; i < n; i++) {
948                 if (freqs[i] >= rate) {
949                         *safe_output = cld->clk_dvfs_map[i];
950                         return 0;
951                 }
952         }
953         return -EINVAL;
954 }
955
956 static unsigned long find_dvco_rate_min(struct tegra_cl_dvfs *cld, u8 out_min)
957 {
958         int i;
959
960         for (i = 0; i < cld->safe_dvfs->num_freqs; i++) {
961                 if (cld->clk_dvfs_map[i] > out_min)
962                         break;
963         }
964         i = i ? i-1 : 0;
965         return cld->safe_dvfs->freqs[i];
966 }
967
968 static void cl_dvfs_set_dvco_rate_min(struct tegra_cl_dvfs *cld)
969 {
970         unsigned long rate = cld->dvco_rate_floors[cld->therm_floor_idx];
971         if (!rate) {
972                 rate = cld->safe_dvfs->dfll_data.out_rate_min;
973                 if (cld->therm_floor_idx < cld->therm_floors_num)
974                         rate = find_dvco_rate_min(cld,
975                                 cld->thermal_out_floors[cld->therm_floor_idx]);
976         }
977
978         /* round minimum rate to request unit (ref_rate/2) boundary */
979         cld->dvco_rate_min = ROUND_MIN_RATE(rate, cld->ref_rate);
980         pr_debug("%s: calibrated dvco_rate_min %lu\n",
981                  __func__, cld->dvco_rate_min);
982
983         /* dvco min rate is under-estimated - skewed range up */
984         cld->calibration_range_min = cld->dvco_rate_min - 4 * RATE_STEP(cld);
985         if (cld->calibration_range_min < cld->safe_dvfs->freqs[0])
986                 cld->calibration_range_min = cld->safe_dvfs->freqs[0];
987         cld->calibration_range_max = cld->dvco_rate_min + 24 * RATE_STEP(cld);
988         rate = cld->safe_dvfs->freqs[cld->safe_dvfs->num_freqs - 1];
989         if (cld->calibration_range_max > rate)
990                 cld->calibration_range_max = rate;
991 }
992
993 static void cl_dvfs_set_force_out_min(struct tegra_cl_dvfs *cld)
994 {
995         u8 force_out_min = get_output_bottom(cld);
996         int force_mv_min = cld->p_data->pmu_undershoot_gb;
997
998         if (!force_mv_min) {
999                 cld->force_out_min = force_out_min;
1000                 return;
1001         }
1002
1003         if (cld->therm_floor_idx < cld->therm_floors_num)
1004                 force_out_min = cld->thermal_out_floors[cld->therm_floor_idx];
1005         force_mv_min += get_mv(cld, force_out_min);
1006         force_out_min = find_mv_out_cap(cld, force_mv_min);
1007         if (force_out_min == cld->safe_output)
1008                 force_out_min++;
1009         cld->force_out_min = force_out_min;
1010 }
1011
1012 static struct voltage_reg_map *find_vdd_map_entry(
1013         struct tegra_cl_dvfs *cld, int mV, bool exact)
1014 {
1015         int i, reg_mV;
1016
1017         for (i = 0; i < cld->p_data->vdd_map_size; i++) {
1018                 /* round down to 1mV */
1019                 reg_mV = cld->p_data->vdd_map[i].reg_uV / 1000;
1020                 if (mV <= reg_mV)
1021                         break;
1022         }
1023
1024         if (i < cld->p_data->vdd_map_size) {
1025                 if (!exact || (mV == reg_mV))
1026                         return &cld->p_data->vdd_map[i];
1027         }
1028         return NULL;
1029 }
1030
1031 static void cl_dvfs_init_maps(struct tegra_cl_dvfs *cld)
1032 {
1033         int i, j, v, v_max, n;
1034         const int *millivolts;
1035         struct voltage_reg_map *m;
1036
1037         BUILD_BUG_ON(MAX_CL_DVFS_VOLTAGES > OUT_MASK + 1);
1038
1039         n = cld->safe_dvfs->num_freqs;
1040         BUG_ON(n >= MAX_CL_DVFS_VOLTAGES);
1041
1042         millivolts = cld->safe_dvfs->dfll_millivolts;
1043         v_max = millivolts[n - 1];
1044
1045         v = cld->safe_dvfs->dfll_data.min_millivolts;
1046         BUG_ON(v > millivolts[0]);
1047
1048         cld->out_map[0] = find_vdd_map_entry(cld, v, true);
1049         BUG_ON(!cld->out_map[0]);
1050
1051         for (i = 0, j = 1; i < n; i++) {
1052                 for (;;) {
1053                         v += max(1, (v_max - v) / (MAX_CL_DVFS_VOLTAGES - j));
1054                         if (v >= millivolts[i])
1055                                 break;
1056
1057                         m = find_vdd_map_entry(cld, v, false);
1058                         BUG_ON(!m);
1059                         if (m != cld->out_map[j - 1])
1060                                 cld->out_map[j++] = m;
1061                 }
1062
1063                 v = (j == MAX_CL_DVFS_VOLTAGES - 1) ? v_max : millivolts[i];
1064                 m = find_vdd_map_entry(cld, v, true);
1065                 BUG_ON(!m);
1066                 if (m != cld->out_map[j - 1])
1067                         cld->out_map[j++] = m;
1068                 if (is_i2c(cld)) {
1069                         cld->clk_dvfs_map[i] = j - 1;
1070                 } else {
1071                         cld->clk_dvfs_map[i] = cld->out_map[j - 1]->reg_value;
1072                         BUG_ON(cld->clk_dvfs_map[i] > OUT_MASK + 1);
1073                 }
1074
1075                 if (v >= v_max)
1076                         break;
1077         }
1078         cld->num_voltages = j;
1079
1080         /* hit Vmax before last freq was mapped: map the rest to max output */
1081         for (j = i++; i < n; i++)
1082                 cld->clk_dvfs_map[i] = cld->clk_dvfs_map[j];
1083 }
1084
1085 static void cl_dvfs_init_tuning_thresholds(struct tegra_cl_dvfs *cld)
1086 {
1087         int mv;
1088
1089         /*
1090          * Convert high tuning voltage threshold into output LUT index, and
1091          * add necessary margin.  If voltage threshold is outside operating
1092          * range set it at maximum output level to effectively disable tuning
1093          * parameters adjustment.
1094          */
1095         cld->tune_high_out_min = get_output_top(cld);
1096         cld->tune_high_out_start = cld->tune_high_out_min;
1097         mv = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
1098         if (mv >= cld->safe_dvfs->dfll_data.min_millivolts) {
1099                 int margin = cld->safe_dvfs->dfll_data.tune_high_margin_mv ? :
1100                                 CL_DVFS_TUNE_HIGH_MARGIN_MV;
1101                 u8 out_min = find_mv_out_cap(cld, mv);
1102                 u8 out_start = find_mv_out_cap(cld, mv + margin);
1103                 out_start = max(out_start, (u8)(out_min + 1));
1104                 if (out_start < get_output_top(cld)) {
1105                         cld->tune_high_out_min = out_min;
1106                         cld->tune_high_out_start = out_start;
1107                         if (cld->minimax_output <= out_start)
1108                                 cld->minimax_output = out_start + 1;
1109                 }
1110         }
1111 }
1112
1113 static void cl_dvfs_init_hot_output_cap(struct tegra_cl_dvfs *cld)
1114 {
1115         int i;
1116         if (!cld->safe_dvfs->dvfs_rail->therm_mv_caps ||
1117             !cld->safe_dvfs->dvfs_rail->therm_mv_caps_num)
1118                 return;
1119
1120         if (!cld->safe_dvfs->dvfs_rail->vmax_cdev)
1121                 WARN(1, "%s: missing dfll cap cooling device\n",
1122                      cld->safe_dvfs->dvfs_rail->reg_id);
1123         /*
1124          * Convert monotonically decreasing thermal caps at high temperature
1125          * into output LUT indexes; make sure there is a room for regulation
1126          * below minimum thermal cap.
1127          */
1128         cld->therm_caps_num = cld->safe_dvfs->dvfs_rail->therm_mv_caps_num;
1129         for (i = 0; i < cld->therm_caps_num; i++) {
1130                 cld->thermal_out_caps[i] = find_mv_out_floor(
1131                         cld, cld->safe_dvfs->dvfs_rail->therm_mv_caps[i]);
1132         }
1133         BUG_ON(cld->thermal_out_caps[cld->therm_caps_num - 1] <
1134                cld->minimax_output);
1135 }
1136
1137 static void cl_dvfs_convert_cold_output_floor(struct tegra_cl_dvfs *cld,
1138                                               int offset)
1139 {
1140         int i;
1141
1142         /*
1143          * Convert monotonically decreasing thermal floors at low temperature
1144          * into output LUT indexes; make sure there is a room for regulation
1145          * above maximum thermal floor. The latter is also exempt from offset
1146          * application.
1147          */
1148         cld->therm_floors_num = cld->safe_dvfs->dvfs_rail->therm_mv_floors_num;
1149         for (i = 0; i < cld->therm_floors_num; i++) {
1150                 int mv = cld->safe_dvfs->dvfs_rail->therm_mv_floors[i] +
1151                         (i ? offset : 0);
1152                 cld->thermal_out_floors[i] = find_mv_out_cap(cld, mv);
1153         }
1154         BUG_ON(cld->thermal_out_floors[0] + 1 >= get_output_top(cld));
1155 }
1156
1157 static void cl_dvfs_init_cold_output_floor(struct tegra_cl_dvfs *cld)
1158 {
1159         if (!cld->safe_dvfs->dvfs_rail->therm_mv_floors ||
1160             !cld->safe_dvfs->dvfs_rail->therm_mv_floors_num)
1161                 return;
1162
1163         if (!cld->safe_dvfs->dvfs_rail->vmin_cdev)
1164                 WARN(1, "%s: missing dfll floor cooling device\n",
1165                      cld->safe_dvfs->dvfs_rail->reg_id);
1166
1167         /* Most conservative offset 0 always safe */
1168         cl_dvfs_convert_cold_output_floor(cld, 0);
1169
1170         if (cld->minimax_output <= cld->thermal_out_floors[0])
1171                 cld->minimax_output = cld->thermal_out_floors[0] + 1;
1172 }
1173
1174 static void cl_dvfs_init_output_thresholds(struct tegra_cl_dvfs *cld)
1175 {
1176         cld->minimax_output = 0;
1177         cl_dvfs_init_tuning_thresholds(cld);
1178         cl_dvfs_init_cold_output_floor(cld);
1179
1180         /* make sure safe output is safe at any temperature */
1181         cld->safe_output = cld->thermal_out_floors[0] ? :
1182                 get_output_bottom(cld) + 1;
1183         if (cld->minimax_output <= cld->safe_output)
1184                 cld->minimax_output = cld->safe_output + 1;
1185
1186         /* init caps after minimax output is determined */
1187         cl_dvfs_init_hot_output_cap(cld);
1188 }
1189
1190 static void cl_dvfs_init_pwm_if(struct tegra_cl_dvfs *cld)
1191 {
1192         u32 val, div;
1193         struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
1194         bool delta_mode = p_data->u.pmu_pwm.delta_mode;
1195         int pg = p_data->u.pmu_pwm.pwm_pingroup;
1196         int pcg = p_data->u.pmu_pwm.pwm_clk_pingroup;
1197
1198         div = GET_DIV(cld->ref_rate, p_data->u.pmu_pwm.pwm_rate, 1);
1199
1200         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
1201         val |= delta_mode ? CL_DVFS_OUTPUT_CFG_PWM_DELTA : 0;
1202         val |= (div << CL_DVFS_OUTPUT_CFG_PWM_DIV_SHIFT) &
1203                 CL_DVFS_OUTPUT_CFG_PWM_DIV_MASK;
1204
1205         /*
1206          * Different ways to enable/disable PWM depending on board design:
1207          * a) Use native CL-DVFS output PWM_ENABLE control (2WIRE bus)
1208          * b) Use gpio control of external buffer (1WIRE bus with buffer)
1209          * c) Use tristate PWM pingroup control (1WIRE bus with direct connect)
1210          * in cases (b) and (c) keep CL-DVFS native control always enabled
1211          */
1212
1213         switch (p_data->u.pmu_pwm.pwm_bus) {
1214         case TEGRA_CL_DVFS_PWM_1WIRE_BUFFER:
1215                 tegra_pinmux_set_tristate(pg, TEGRA_TRI_NORMAL);
1216                 val |= CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
1217                 break;
1218
1219         case TEGRA_CL_DVFS_PWM_1WIRE_DIRECT:
1220                 tegra_pinmux_set_tristate(pg, TEGRA_TRI_TRISTATE);
1221                 val |= CL_DVFS_OUTPUT_CFG_PWM_ENABLE;
1222                 break;
1223
1224         case TEGRA_CL_DVFS_PWM_2WIRE:
1225                 tegra_pinmux_set_tristate(pg, TEGRA_TRI_NORMAL);
1226                 tegra_pinmux_set_tristate(pcg, TEGRA_TRI_NORMAL);
1227                 break;
1228
1229         default:
1230                 BUG();
1231         }
1232
1233         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
1234         cl_dvfs_wmb(cld);
1235 }
1236
1237 static void cl_dvfs_init_i2c_if(struct tegra_cl_dvfs *cld)
1238 {
1239         u32 val, div;
1240         struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
1241         bool hs_mode = p_data->u.pmu_i2c.hs_rate;
1242
1243         /* PMU slave address, vdd register offset, and transfer mode */
1244         val = p_data->u.pmu_i2c.slave_addr << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT;
1245         if (p_data->u.pmu_i2c.addr_10)
1246                 val |= CL_DVFS_I2C_CFG_SLAVE_ADDR_10;
1247         if (hs_mode) {
1248                 val |= p_data->u.pmu_i2c.hs_master_code <<
1249                         CL_DVFS_I2C_CFG_HS_CODE_SHIFT;
1250                 val |= CL_DVFS_I2C_CFG_PACKET_ENABLE;
1251         }
1252         val |= CL_DVFS_I2C_CFG_SIZE_MASK;
1253         val |= CL_DVFS_I2C_CFG_ARB_ENABLE;
1254         cl_dvfs_writel(cld, val, CL_DVFS_I2C_CFG);
1255         cl_dvfs_writel(cld, p_data->u.pmu_i2c.reg, CL_DVFS_I2C_VDD_REG_ADDR);
1256
1257
1258         val = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.fs_rate, 8);
1259         BUG_ON(!val || (val > CL_DVFS_I2C_CLK_DIVISOR_MASK));
1260         val = (val - 1) << CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT;
1261         if (hs_mode) {
1262                 div = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.hs_rate, 12);
1263                 BUG_ON(!div || (div > CL_DVFS_I2C_CLK_DIVISOR_MASK));
1264         } else {
1265                 div = 2;        /* default hs divisor just in case */
1266         }
1267         val |= (div - 1) << CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT;
1268         cl_dvfs_writel(cld, val, CL_DVFS_I2C_CLK_DIVISOR);
1269         cl_dvfs_i2c_wmb(cld);
1270 }
1271
1272 static void cl_dvfs_init_out_if(struct tegra_cl_dvfs *cld)
1273 {
1274         u32 val, out_min, out_max;
1275
1276         /*
1277          * Disable output, and set safe voltage and output limits;
1278          * disable and clear limit interrupts.
1279          */
1280         cld->tune_state = TEGRA_CL_DVFS_TUNE_LOW;
1281         cld->therm_cap_idx = cld->therm_caps_num;
1282         cld->therm_floor_idx = 0;
1283         cl_dvfs_set_dvco_rate_min(cld);
1284         cl_dvfs_set_force_out_min(cld);
1285
1286         if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
1287                 /*
1288                  * If h/w supports dynamic chanage of output register, limit
1289                  * LUT * index range using cl_dvfs h/w controls, and load full
1290                  * range LUT table once.
1291                  */
1292                 out_min = get_output_min(cld);
1293                 out_max = get_output_cap(cld, NULL);
1294                 cld->lut_min = get_output_bottom(cld);
1295                 cld->lut_max = get_output_top(cld);
1296         } else {
1297                 /* LUT available only for I2C, no dynamic config WAR for PWM */
1298                 BUG_ON(!is_i2c(cld));
1299
1300                 /*
1301                  * Allow the entire range of LUT indexes, but limit output
1302                  * voltage in LUT mapping (this "indirect" application of limits
1303                  * is used, because h/w does not support dynamic change of index
1304                  * limits, but dynamic reload of LUT is fine).
1305                  */
1306                 out_min = get_output_bottom(cld);
1307                 out_max = get_output_top(cld);
1308                 cld->lut_min = get_output_min(cld);
1309                 cld->lut_max = get_output_cap(cld, NULL);
1310         }
1311
1312         cl_dvfs_i2c_writel(cld, 0, CL_DVFS_OUTPUT_CFG);
1313         val = (cld->safe_output << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT) |
1314                 (out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT) |
1315                 (out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT);
1316         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
1317         cl_dvfs_wmb(cld);
1318
1319         cl_dvfs_writel(cld, 0, CL_DVFS_OUTPUT_FORCE);
1320         cl_dvfs_writel(cld, 0, CL_DVFS_INTR_EN);
1321         cl_dvfs_writel(cld, CL_DVFS_INTR_MAX_MASK | CL_DVFS_INTR_MIN_MASK,
1322                        CL_DVFS_INTR_STS);
1323
1324         /* fill in LUT table */
1325         if (is_i2c(cld))
1326                 cl_dvfs_load_lut(cld);
1327
1328         if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
1329                 /* dynamic update of output register allowed - no need to reload
1330                    lut - use lut limits as output register setting shadow */
1331                 cld->lut_min = out_min;
1332                 cld->lut_max = out_max;
1333         }
1334
1335         /* configure transport */
1336         if (is_i2c(cld))
1337                 cl_dvfs_init_i2c_if(cld);
1338         else
1339                 cl_dvfs_init_pwm_if(cld);
1340 }
1341
1342 static void cl_dvfs_init_cntrl_logic(struct tegra_cl_dvfs *cld)
1343 {
1344         u32 val;
1345         struct tegra_cl_dvfs_cfg_param *param = cld->p_data->cfg_param;
1346
1347         /* configure mode, control loop parameters, DFLL tuning */
1348         set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1349
1350         val = GET_DIV(cld->ref_rate, param->sample_rate, 32);
1351         BUG_ON(val > CL_DVFS_CONFIG_DIV_MASK);
1352         cl_dvfs_writel(cld, val, CL_DVFS_CONFIG);
1353
1354         val = (param->force_mode << CL_DVFS_PARAMS_FORCE_MODE_SHIFT) |
1355                 (param->cf << CL_DVFS_PARAMS_CF_PARAM_SHIFT) |
1356                 (param->ci << CL_DVFS_PARAMS_CI_PARAM_SHIFT) |
1357                 ((u8)param->cg << CL_DVFS_PARAMS_CG_PARAM_SHIFT) |
1358                 (param->cg_scale ? CL_DVFS_PARAMS_CG_SCALE : 0);
1359         cl_dvfs_writel(cld, val, CL_DVFS_PARAMS);
1360
1361         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
1362         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune1, CL_DVFS_TUNE1);
1363         cl_dvfs_wmb(cld);
1364         if (cld->safe_dvfs->dfll_data.tune_trimmers)
1365                 cld->safe_dvfs->dfll_data.tune_trimmers(false);
1366
1367         /* configure droop (skipper 1) and scale (skipper 2) */
1368         val = GET_DROOP_FREQ(cld->safe_dvfs->dfll_data.droop_rate_min,
1369                         cld->ref_rate) << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT;
1370         BUG_ON(val > CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK);
1371         val |= (param->droop_cut_value << CL_DVFS_DROOP_CTRL_CUT_SHIFT);
1372         val |= (param->droop_restore_ramp << CL_DVFS_DROOP_CTRL_RAMP_SHIFT);
1373         cl_dvfs_writel(cld, val, CL_DVFS_DROOP_CTRL);
1374
1375         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ) &
1376                 CL_DVFS_FREQ_REQ_SCALE_MASK;
1377         cld->last_req.scale = val >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1378         cld->last_req.cap = 0;
1379         cld->last_req.freq = 0;
1380         cld->last_req.output = 0;
1381         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
1382         cl_dvfs_writel(cld, param->scale_out_ramp, CL_DVFS_SCALE_RAMP);
1383
1384         /* select frequency for monitoring */
1385         cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ, CL_DVFS_MONITOR_CTRL);
1386         cl_dvfs_wmb(cld);
1387 }
1388
1389 static int cl_dvfs_enable_clocks(struct tegra_cl_dvfs *cld)
1390 {
1391         if (is_i2c(cld))
1392                 clk_enable(cld->i2c_clk);
1393
1394         clk_enable(cld->ref_clk);
1395         clk_enable(cld->soc_clk);
1396         return 0;
1397 }
1398
1399 static void cl_dvfs_disable_clocks(struct tegra_cl_dvfs *cld)
1400 {
1401         if (is_i2c(cld))
1402                 clk_disable(cld->i2c_clk);
1403
1404         clk_disable(cld->ref_clk);
1405         clk_disable(cld->soc_clk);
1406 }
1407
1408 static int cl_dvfs_init(struct tegra_cl_dvfs *cld)
1409 {
1410         int ret, gpio, flags;
1411
1412         /* Enable output inerface clock */
1413         if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C) {
1414                 ret = clk_enable(cld->i2c_clk);
1415                 if (ret) {
1416                         pr_err("%s: Failed to enable %s\n",
1417                                __func__, cld->i2c_clk->name);
1418                         return ret;
1419                 }
1420                 cld->i2c_rate = clk_get_rate(cld->i2c_clk);
1421         } else if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_PWM) {
1422                 int pwm_bus = cld->p_data->u.pmu_pwm.pwm_bus;
1423                 if (pwm_bus > TEGRA_CL_DVFS_PWM_1WIRE_DIRECT) {
1424                         /* FIXME: PWM 2-wire support */
1425                         pr_err("%s: not supported PWM 2-wire bus\n", __func__);
1426                         return -ENOSYS;
1427                 } else if (pwm_bus == TEGRA_CL_DVFS_PWM_1WIRE_BUFFER) {
1428                         gpio = cld->p_data->u.pmu_pwm.out_gpio;
1429                         flags = cld->p_data->u.pmu_pwm.out_enable_high ?
1430                                 GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH;
1431                         if (gpio_request_one(gpio, flags, "cl_dvfs_pwm")) {
1432                                 pr_err("%s: Failed to request pwm gpio %d\n",
1433                                        __func__, gpio);
1434                                 return -EPERM;
1435                         }
1436                 }
1437         } else {
1438                 pr_err("%s: unknown PMU interface\n", __func__);
1439                 return -EINVAL;
1440         }
1441
1442         /* Enable module clocks, release control logic reset */
1443         ret = clk_enable(cld->ref_clk);
1444         if (ret) {
1445                 pr_err("%s: Failed to enable %s\n",
1446                        __func__, cld->ref_clk->name);
1447                 return ret;
1448         }
1449         ret = clk_enable(cld->soc_clk);
1450         if (ret) {
1451                 pr_err("%s: Failed to enable %s\n",
1452                        __func__, cld->ref_clk->name);
1453                 return ret;
1454         }
1455         cld->ref_rate = clk_get_rate(cld->ref_clk);
1456         BUG_ON(!cld->ref_rate);
1457
1458         /* init tuning timer */
1459         init_timer(&cld->tune_timer);
1460         cld->tune_timer.function = tune_timer_cb;
1461         cld->tune_timer.data = (unsigned long)cld;
1462         cld->tune_delay = usecs_to_jiffies(CL_DVFS_TUNE_HIGH_DELAY);
1463
1464         /* init calibration timer */
1465         init_timer_deferrable(&cld->calibration_timer);
1466         cld->calibration_timer.function = calibration_timer_cb;
1467         cld->calibration_timer.data = (unsigned long)cld;
1468         cld->calibration_delay = usecs_to_jiffies(CL_DVFS_CALIBR_TIME);
1469
1470         /* Get ready ouput voltage mapping*/
1471         cl_dvfs_init_maps(cld);
1472
1473         /* Setup output range thresholds */
1474         cl_dvfs_init_output_thresholds(cld);
1475
1476         /* Setup PMU interface */
1477         cl_dvfs_init_out_if(cld);
1478
1479         /* Configure control registers in disabled mode and disable clocks */
1480         cl_dvfs_init_cntrl_logic(cld);
1481         cl_dvfs_disable_clocks(cld);
1482
1483         return 0;
1484 }
1485
1486 /*
1487  * Re-initialize and enable target device clock in open loop mode. Called
1488  * directly from SoC clock resume syscore operation. Closed loop will be
1489  * re-entered in platform syscore ops as well.
1490  */
1491 void tegra_cl_dvfs_resume(struct tegra_cl_dvfs *cld)
1492 {
1493         enum tegra_cl_dvfs_ctrl_mode mode = cld->mode;
1494         struct dfll_rate_req req = cld->last_req;
1495
1496         cl_dvfs_enable_clocks(cld);
1497
1498         /* Setup PMU interface, and configure controls in disabled mode */
1499         cl_dvfs_init_out_if(cld);
1500         cl_dvfs_init_cntrl_logic(cld);
1501
1502         /* Restore force output */
1503         cl_dvfs_writel(cld, cld->suspended_force_out, CL_DVFS_OUTPUT_FORCE);
1504
1505         cl_dvfs_disable_clocks(cld);
1506
1507         /* Restore last request and mode */
1508         cld->last_req = req;
1509         if (mode != TEGRA_CL_DVFS_DISABLED) {
1510                 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1511                 WARN(mode > TEGRA_CL_DVFS_OPEN_LOOP,
1512                      "DFLL was left locked in suspend\n");
1513         }
1514 }
1515
1516 #ifdef CONFIG_THERMAL
1517 /* cl_dvfs cap cooling device */
1518 static int tegra_cl_dvfs_get_vmax_cdev_max_state(
1519         struct thermal_cooling_device *cdev, unsigned long *max_state)
1520 {
1521         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1522         *max_state = cld->therm_caps_num;
1523         return 0;
1524 }
1525
1526 static int tegra_cl_dvfs_get_vmax_cdev_cur_state(
1527         struct thermal_cooling_device *cdev, unsigned long *cur_state)
1528 {
1529         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1530         *cur_state = cld->therm_cap_idx;
1531         return 0;
1532 }
1533
1534 static int tegra_cl_dvfs_set_vmax_cdev_state(
1535         struct thermal_cooling_device *cdev, unsigned long cur_state)
1536 {
1537         unsigned long flags;
1538         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1539
1540         clk_lock_save(cld->dfll_clk, &flags);
1541
1542         if (cld->therm_cap_idx != cur_state) {
1543                 cld->therm_cap_idx = cur_state;
1544                 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1545                         tegra_cl_dvfs_request_rate(cld,
1546                                 tegra_cl_dvfs_request_get(cld));
1547                 }
1548         }
1549         clk_unlock_restore(cld->dfll_clk, &flags);
1550         return 0;
1551 }
1552
1553 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmax_cool_ops = {
1554         .get_max_state = tegra_cl_dvfs_get_vmax_cdev_max_state,
1555         .get_cur_state = tegra_cl_dvfs_get_vmax_cdev_cur_state,
1556         .set_cur_state = tegra_cl_dvfs_set_vmax_cdev_state,
1557 };
1558
1559 /* cl_dvfs vmin cooling device */
1560 static int tegra_cl_dvfs_get_vmin_cdev_max_state(
1561         struct thermal_cooling_device *cdev, unsigned long *max_state)
1562 {
1563         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1564         *max_state = cld->therm_floors_num;
1565         return 0;
1566 }
1567
1568 static int tegra_cl_dvfs_get_vmin_cdev_cur_state(
1569         struct thermal_cooling_device *cdev, unsigned long *cur_state)
1570 {
1571         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1572         *cur_state = cld->therm_floor_idx;
1573         return 0;
1574 }
1575
1576 static int tegra_cl_dvfs_set_vmin_cdev_state(
1577         struct thermal_cooling_device *cdev, unsigned long cur_state)
1578 {
1579         unsigned long flags;
1580         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1581
1582         clk_lock_save(cld->dfll_clk, &flags);
1583
1584         if (cld->therm_floor_idx != cur_state) {
1585                 cld->therm_floor_idx = cur_state;
1586                 cl_dvfs_set_dvco_rate_min(cld);
1587                 cl_dvfs_set_force_out_min(cld);
1588                 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1589                         tegra_cl_dvfs_request_rate(cld,
1590                                 tegra_cl_dvfs_request_get(cld));
1591                 }
1592         }
1593         clk_unlock_restore(cld->dfll_clk, &flags);
1594         return 0;
1595 }
1596
1597 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmin_cool_ops = {
1598         .get_max_state = tegra_cl_dvfs_get_vmin_cdev_max_state,
1599         .get_cur_state = tegra_cl_dvfs_get_vmin_cdev_cur_state,
1600         .set_cur_state = tegra_cl_dvfs_set_vmin_cdev_state,
1601 };
1602
1603 static void tegra_cl_dvfs_init_cdev(struct work_struct *work)
1604 {
1605         struct tegra_cl_dvfs *cld = container_of(
1606                 work, struct tegra_cl_dvfs, init_cdev_work);
1607
1608         /* just report error - initialized at WC temperature, anyway */
1609         if (cld->safe_dvfs->dvfs_rail->vmin_cdev) {
1610                 char *type = cld->safe_dvfs->dvfs_rail->vmin_cdev->cdev_type;
1611                 cld->vmin_cdev = thermal_cooling_device_register(
1612                         type, (void *)cld, &tegra_cl_dvfs_vmin_cool_ops);
1613                 if (IS_ERR_OR_NULL(cld->vmin_cdev)) {
1614                         cld->vmin_cdev = NULL;
1615                         pr_err("tegra cooling device %s failed to register\n",
1616                                type);
1617                         return;
1618                 }
1619                 pr_info("%s cooling device is registered\n", type);
1620         }
1621
1622         if (cld->safe_dvfs->dvfs_rail->vmax_cdev) {
1623                 char *type = cld->safe_dvfs->dvfs_rail->vmax_cdev->cdev_type;
1624                 cld->vmax_cdev = thermal_cooling_device_register(
1625                         type, (void *)cld, &tegra_cl_dvfs_vmax_cool_ops);
1626                 if (IS_ERR_OR_NULL(cld->vmax_cdev)) {
1627                         cld->vmax_cdev = NULL;
1628                         pr_err("tegra cooling device %s failed to register\n",
1629                                type);
1630                         return;
1631                 }
1632                 pr_info("%s cooling device is registered\n", type);
1633         }
1634 }
1635 #endif
1636
1637 #ifdef CONFIG_PM_SLEEP
1638 /*
1639  * cl_dvfs controls clock/voltage to other devices, including CPU. Therefore,
1640  * cl_dvfs driver pm suspend callback does not stop cl-dvfs operations. It is
1641  * only used to enforce cold/hot volatge limit, since temperature may change in
1642  * suspend without waking up. The correct temperature zone after supend will
1643  * be updated via cl_dvfs cooling device interface during resume of temperature
1644  * sensor.
1645  */
1646 static int tegra_cl_dvfs_suspend_cl(struct device *dev)
1647 {
1648         unsigned long flags;
1649         struct tegra_cl_dvfs *cld = dev_get_drvdata(dev);
1650
1651         clk_lock_save(cld->dfll_clk, &flags);
1652         if (cld->vmax_cdev)
1653                 cld->vmax_cdev->updated = false;
1654         cld->therm_cap_idx = cld->therm_caps_num;
1655         if (cld->vmin_cdev)
1656                 cld->vmin_cdev->updated = false;
1657         cld->therm_floor_idx = 0;
1658         cl_dvfs_set_dvco_rate_min(cld);
1659         cl_dvfs_set_force_out_min(cld);
1660         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1661                 set_cl_config(cld, &cld->last_req);
1662                 set_request(cld, &cld->last_req);
1663         }
1664         cld->suspended_force_out = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
1665         clk_unlock_restore(cld->dfll_clk, &flags);
1666
1667         return 0;
1668 }
1669
1670 static const struct dev_pm_ops tegra_cl_dvfs_pm_ops = {
1671         .suspend = tegra_cl_dvfs_suspend_cl,
1672 };
1673 #endif
1674
1675 /*
1676  * These dfll bypass APIs provide direct access to force output register.
1677  * Set operation always updates force value, but applies it only in open loop,
1678  * or disabled mode. Get operation returns force value back if it is applied,
1679  * and return monitored output, otherwise. Hence, get value matches real output
1680  * in any mode.
1681  */
1682 static int tegra_cl_dvfs_force_output(void *data, unsigned int out_sel)
1683 {
1684         u32 val;
1685         unsigned long flags;
1686         struct tegra_cl_dvfs *cld = data;
1687
1688         if (out_sel > OUT_MASK)
1689                 return -EINVAL;
1690
1691         clk_lock_save(cld->dfll_clk, &flags);
1692
1693         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
1694         val = (val & CL_DVFS_OUTPUT_FORCE_ENABLE) | out_sel;
1695         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
1696         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
1697
1698         if ((cld->mode < TEGRA_CL_DVFS_CLOSED_LOOP) &&
1699             !(val & CL_DVFS_OUTPUT_FORCE_ENABLE)) {
1700                 val |= CL_DVFS_OUTPUT_FORCE_ENABLE;
1701                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_FORCE);
1702                 cl_dvfs_wmb(cld);
1703                 /* enable output only if bypass h/w is alive */
1704                 if (!cld->safe_dvfs->dfll_data.is_bypass_down ||
1705                     !cld->safe_dvfs->dfll_data.is_bypass_down())
1706                         output_enable(cld);
1707         }
1708
1709         clk_unlock_restore(cld->dfll_clk, &flags);
1710         return 0;
1711 }
1712
1713 static int tegra_cl_dvfs_get_output(void *data)
1714 {
1715         u32 val;
1716         unsigned long flags;
1717         struct tegra_cl_dvfs *cld = data;
1718
1719         clk_lock_save(cld->dfll_clk, &flags);
1720         val = cl_dvfs_get_output(cld);
1721         clk_unlock_restore(cld->dfll_clk, &flags);
1722         return val;
1723 }
1724
1725 static void tegra_cl_dvfs_bypass_dev_register(struct tegra_cl_dvfs *cld,
1726                                               struct platform_device *byp_dev)
1727 {
1728         struct tegra_dfll_bypass_platform_data *p_data =
1729                 byp_dev->dev.platform_data;
1730         p_data->set_bypass_sel = tegra_cl_dvfs_force_output;
1731         p_data->get_bypass_sel = tegra_cl_dvfs_get_output;
1732         p_data->dfll_data = cld;
1733
1734         platform_device_register(byp_dev);
1735 }
1736
1737 /*
1738  * The Silicon Monitor (SiMon) notification provides grade information on
1739  * the DFLL controlled rail. The resepctive minimum voltage offset is applied
1740  * to thermal floors profile. SiMon offsets are negative, the higher the grade
1741  * the lower the floor.
1742  */
1743 static int cl_dvfs_simon_grade_notify_cb(struct notifier_block *nb,
1744                                          unsigned long grade, void *v)
1745 {
1746         unsigned long flags;
1747         int i, simon_offset;
1748         int curr_domain = (int)v;
1749         struct tegra_cl_dvfs *cld = container_of(
1750                 nb, struct tegra_cl_dvfs, simon_grade_nb);
1751         struct dvfs_rail *rail = cld->safe_dvfs->dvfs_rail;
1752
1753         if (!cld->therm_floors_num || (curr_domain != rail->simon_domain))
1754                 return NOTIFY_DONE;
1755
1756         if (grade >= rail->simon_vmin_offs_num)
1757                 grade = rail->simon_vmin_offs_num - 1;
1758         simon_offset = rail->simon_vmin_offsets[grade];
1759         BUG_ON(simon_offset > 0);
1760
1761         clk_lock_save(cld->dfll_clk, &flags);
1762
1763         /* Convert new floors and invalidate minimum rates */
1764         cl_dvfs_convert_cold_output_floor(cld, simon_offset);
1765         for (i = 0; i < cld->therm_floors_num; i++)
1766                 cld->dvco_rate_floors[i] = 0;
1767
1768         cl_dvfs_set_dvco_rate_min(cld);
1769         cl_dvfs_set_force_out_min(cld);
1770         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1771                 tegra_cl_dvfs_request_rate(cld,
1772                         tegra_cl_dvfs_request_get(cld));
1773         }
1774
1775         clk_unlock_restore(cld->dfll_clk, &flags);
1776
1777         pr_info("tegra_dvfs: set %s simon grade %lu\n", rail->reg_id, grade);
1778
1779         return NOTIFY_OK;
1780 };
1781
1782 static void tegra_cl_dvfs_register_simon_notifier(struct tegra_cl_dvfs *cld)
1783 {
1784         struct dvfs_rail *rail = cld->safe_dvfs->dvfs_rail;
1785
1786         /* Stay at default if no simon offsets */
1787         if (!rail->simon_vmin_offsets)
1788                 return;
1789
1790         cld->simon_grade_nb.notifier_call = cl_dvfs_simon_grade_notify_cb;
1791
1792         if (tegra_register_simon_notifier(&cld->simon_grade_nb)) {
1793                 pr_err("tegra_dvfs: failed to register %s simon notifier\n",
1794                        rail->reg_id);
1795                 return;
1796         }
1797
1798         pr_info("tegra_dvfs: registered %s simon notifier\n", rail->reg_id);
1799         return;
1800 }
1801
1802 static int __init tegra_cl_dvfs_probe(struct platform_device *pdev)
1803 {
1804         int ret;
1805         struct tegra_cl_dvfs_platform_data *p_data;
1806         struct resource *res, *res_i2c = NULL;
1807         struct tegra_cl_dvfs *cld;
1808         struct clk *ref_clk, *soc_clk, *i2c_clk, *safe_dvfs_clk, *dfll_clk;
1809
1810         /* Get resources */
1811         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1812         if (!res) {
1813                 dev_err(&pdev->dev, "missing register base\n");
1814                 return -ENOMEM;
1815         }
1816
1817         if (pdev->num_resources > 1) {
1818                 res_i2c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1819                 if (!res_i2c) {
1820                         dev_err(&pdev->dev, "missing i2c register base\n");
1821                         return -ENOMEM;
1822                 }
1823         }
1824
1825         p_data = pdev->dev.platform_data;
1826         if (!p_data || !p_data->cfg_param || !p_data->vdd_map) {
1827                 dev_err(&pdev->dev, "missing platform data\n");
1828                 return -ENODATA;
1829         }
1830
1831         ref_clk = clk_get(&pdev->dev, "ref");
1832         soc_clk = clk_get(&pdev->dev, "soc");
1833         i2c_clk = clk_get(&pdev->dev, "i2c");
1834         safe_dvfs_clk = clk_get(&pdev->dev, "safe_dvfs");
1835         dfll_clk = clk_get(&pdev->dev, p_data->dfll_clk_name);
1836         if (IS_ERR(ref_clk) || IS_ERR(soc_clk) || IS_ERR(i2c_clk)) {
1837                 dev_err(&pdev->dev, "missing control clock\n");
1838                 return -ENODEV;
1839         }
1840         if (IS_ERR(safe_dvfs_clk)) {
1841                 dev_err(&pdev->dev, "missing safe dvfs source clock\n");
1842                 return PTR_ERR(safe_dvfs_clk);
1843         }
1844         if (IS_ERR(dfll_clk)) {
1845                 dev_err(&pdev->dev, "missing target dfll clock\n");
1846                 return PTR_ERR(dfll_clk);
1847         }
1848         if (!safe_dvfs_clk->dvfs || !safe_dvfs_clk->dvfs->dvfs_rail) {
1849                 dev_err(&pdev->dev, "invalid safe dvfs source\n");
1850                 return -EINVAL;
1851         }
1852
1853         /* Allocate cl_dvfs object and populate resource accessors */
1854         cld = kzalloc(sizeof(*cld), GFP_KERNEL);
1855         if (!cld) {
1856                 dev_err(&pdev->dev, "failed to allocate cl_dvfs object\n");
1857                 return -ENOMEM;
1858         }
1859
1860         cld->cl_base = IO_ADDRESS(res->start);
1861         cld->cl_i2c_base = res_i2c ? IO_ADDRESS(res_i2c->start) : cld->cl_base;
1862         cld->p_data = p_data;
1863         cld->ref_clk = ref_clk;
1864         cld->soc_clk = soc_clk;
1865         cld->i2c_clk = i2c_clk;
1866         cld->dfll_clk = dfll_clk;
1867         cld->safe_dvfs = safe_dvfs_clk->dvfs;
1868 #ifdef CONFIG_THERMAL
1869         INIT_WORK(&cld->init_cdev_work, tegra_cl_dvfs_init_cdev);
1870 #endif
1871         /* Initialize cl_dvfs */
1872         ret = cl_dvfs_init(cld);
1873         if (ret) {
1874                 kfree(cld);
1875                 return ret;
1876         }
1877
1878         platform_set_drvdata(pdev, cld);
1879
1880         /*
1881          *  I2C interface mux is embedded into cl_dvfs h/w, so the attached
1882          *  regulator can be accessed by s/w independently. PWM interface,
1883          *  on the other hand, is accessible solely through cl_dvfs registers.
1884          *  Hence, bypass device is supported in PWM mode only.
1885          */
1886         if ((p_data->pmu_if == TEGRA_CL_DVFS_PMU_PWM) &&
1887             p_data->u.pmu_pwm.dfll_bypass_dev) {
1888                 clk_enable(cld->soc_clk);
1889                 tegra_cl_dvfs_bypass_dev_register(
1890                         cld, p_data->u.pmu_pwm.dfll_bypass_dev);
1891         }
1892
1893         /* Register SiMon notifier */
1894         tegra_cl_dvfs_register_simon_notifier(cld);
1895
1896         /*
1897          * Schedule cooling device registration as a separate work to address
1898          * the following race: when cl_dvfs is probed the DFLL child clock
1899          * (e.g., CPU) cannot be changed; on the other hand cooling device
1900          * registration will update the entire thermal zone, and may trigger
1901          * rate change of the target clock
1902          */
1903         if (cld->safe_dvfs->dvfs_rail->vmin_cdev ||
1904             cld->safe_dvfs->dvfs_rail->vmax_cdev)
1905                 schedule_work(&cld->init_cdev_work);
1906         return 0;
1907 }
1908
1909 static struct platform_driver tegra_cl_dvfs_driver = {
1910         .driver         = {
1911                 .name   = "tegra_cl_dvfs",
1912                 .owner  = THIS_MODULE,
1913 #ifdef CONFIG_PM_SLEEP
1914                 .pm = &tegra_cl_dvfs_pm_ops,
1915 #endif
1916         },
1917 };
1918
1919 int __init tegra_init_cl_dvfs(void)
1920 {
1921         return platform_driver_probe(&tegra_cl_dvfs_driver,
1922                                      tegra_cl_dvfs_probe);
1923 }
1924
1925 /*
1926  * CL_DVFS states:
1927  *
1928  * - DISABLED: control logic mode - DISABLED, output interface disabled,
1929  *   dfll in reset
1930  * - OPEN_LOOP: control logic mode - OPEN_LOOP, output interface disabled,
1931  *   dfll is running "unlocked"
1932  * - CLOSED_LOOP: control logic mode - CLOSED_LOOP, output interface enabled,
1933  *   dfll is running "locked"
1934  */
1935
1936 /* Switch from any other state to DISABLED state */
1937 void tegra_cl_dvfs_disable(struct tegra_cl_dvfs *cld)
1938 {
1939         switch (cld->mode) {
1940         case TEGRA_CL_DVFS_CLOSED_LOOP:
1941                 WARN(1, "DFLL is disabled directly from closed loop mode\n");
1942                 set_ol_config(cld);
1943                 output_disable_ol_prepare(cld);
1944                 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1945                 output_disable_post_ol(cld);
1946                 invalidate_request(cld);
1947                 cl_dvfs_disable_clocks(cld);
1948                 return;
1949
1950         case TEGRA_CL_DVFS_OPEN_LOOP:
1951                 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1952                 invalidate_request(cld);
1953                 cl_dvfs_disable_clocks(cld);
1954                 return;
1955
1956         default:
1957                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1958                 return;
1959         }
1960 }
1961
1962 /* Switch from DISABLE state to OPEN_LOOP state */
1963 int tegra_cl_dvfs_enable(struct tegra_cl_dvfs *cld)
1964 {
1965         if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
1966                 pr_err("%s: Cannot enable DFLL in %s mode\n",
1967                        __func__, mode_name[cld->mode]);
1968                 return -EPERM;
1969         }
1970
1971         if (cld->mode != TEGRA_CL_DVFS_DISABLED)
1972                 return 0;
1973
1974         cl_dvfs_enable_clocks(cld);
1975         set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1976         return 0;
1977 }
1978
1979 /* Switch from OPEN_LOOP state to CLOSED_LOOP state */
1980 int tegra_cl_dvfs_lock(struct tegra_cl_dvfs *cld)
1981 {
1982         struct dfll_rate_req *req = &cld->last_req;
1983
1984         switch (cld->mode) {
1985         case TEGRA_CL_DVFS_CLOSED_LOOP:
1986                 return 0;
1987
1988         case TEGRA_CL_DVFS_OPEN_LOOP:
1989                 if (req->freq == 0) {
1990                         pr_err("%s: Cannot lock DFLL at rate 0\n", __func__);
1991                         return -EINVAL;
1992                 }
1993
1994                 /*
1995                  * Update control logic setting with last rate request;
1996                  * sync output limits with current tuning and thermal state,
1997                  * enable output and switch to closed loop mode. Make sure
1998                  * forced output does not interfere with closed loop.
1999                  */
2000                 set_cl_config(cld, req);
2001                 output_enable(cld);
2002                 set_mode(cld, TEGRA_CL_DVFS_CLOSED_LOOP);
2003                 set_request(cld, req);
2004                 disable_forced_output(cld);
2005                 calibration_timer_update(cld);
2006                 return 0;
2007
2008         default:
2009                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
2010                 pr_err("%s: Cannot lock DFLL in %s mode\n",
2011                        __func__, mode_name[cld->mode]);
2012                 return -EPERM;
2013         }
2014 }
2015
2016 /* Switch from CLOSED_LOOP state to OPEN_LOOP state */
2017 int tegra_cl_dvfs_unlock(struct tegra_cl_dvfs *cld)
2018 {
2019         int ret;
2020
2021         switch (cld->mode) {
2022         case TEGRA_CL_DVFS_CLOSED_LOOP:
2023                 set_ol_config(cld);
2024                 ret = output_disable_ol_prepare(cld);
2025                 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
2026                 if (!ret)
2027                         ret = output_disable_post_ol(cld);
2028                 return ret;
2029
2030         case TEGRA_CL_DVFS_OPEN_LOOP:
2031                 return 0;
2032
2033         default:
2034                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
2035                 pr_err("%s: Cannot unlock DFLL in %s mode\n",
2036                        __func__, mode_name[cld->mode]);
2037                 return -EPERM;
2038         }
2039 }
2040
2041 /*
2042  * Convert requested rate into the control logic settings. In CLOSED_LOOP mode,
2043  * update new settings immediately to adjust DFLL output rate accordingly.
2044  * Otherwise, just save them until next switch to closed loop.
2045  */
2046 int tegra_cl_dvfs_request_rate(struct tegra_cl_dvfs *cld, unsigned long rate)
2047 {
2048         u32 val;
2049         struct dfll_rate_req req;
2050         req.rate = rate;
2051
2052         if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
2053                 pr_err("%s: Cannot set DFLL rate in %s mode\n",
2054                        __func__, mode_name[cld->mode]);
2055                 return -EPERM;
2056         }
2057
2058         /* Calibrate dfll minimum rate */
2059         cl_dvfs_calibrate(cld);
2060
2061         /* Determine DFLL output scale */
2062         req.scale = SCALE_MAX - 1;
2063         if (rate < cld->dvco_rate_min) {
2064                 int scale = DIV_ROUND_CLOSEST((rate / 1000 * SCALE_MAX),
2065                         (cld->dvco_rate_min / 1000));
2066                 if (!scale) {
2067                         pr_err("%s: Rate %lu is below scalable range\n",
2068                                __func__, rate);
2069                         return -EINVAL;
2070                 }
2071                 req.scale = scale - 1;
2072                 rate = cld->dvco_rate_min;
2073         }
2074
2075         /* Convert requested rate into frequency request and scale settings */
2076         val = GET_REQUEST_FREQ(rate, cld->ref_rate);
2077         if (val > FREQ_MAX) {
2078                 pr_err("%s: Rate %lu is above dfll range\n", __func__, rate);
2079                 return -EINVAL;
2080         }
2081         req.freq = val;
2082         rate = GET_REQUEST_RATE(val, cld->ref_rate);
2083
2084         /* Find safe voltage for requested rate */
2085         if (find_safe_output(cld, rate, &req.output)) {
2086                 pr_err("%s: Failed to find safe output for rate %lu\n",
2087                        __func__, rate);
2088                 return -EINVAL;
2089         }
2090         req.cap = req.output;
2091
2092         /*
2093          * Save validated request, and in CLOSED_LOOP mode actually update
2094          * control logic settings; use request output to set maximum voltage
2095          * limit, but keep one LUT step room above safe voltage
2096          */
2097         cld->last_req = req;
2098
2099         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
2100                 set_cl_config(cld, &cld->last_req);
2101                 set_request(cld, &cld->last_req);
2102         }
2103         return 0;
2104 }
2105
2106 unsigned long tegra_cl_dvfs_request_get(struct tegra_cl_dvfs *cld)
2107 {
2108         struct dfll_rate_req *req = &cld->last_req;
2109
2110         /*
2111          * If running below dvco minimum rate with skipper resolution:
2112          * dvco min rate / 256 - return last requested rate rounded to 1kHz.
2113          * If running above dvco minimum, with closed loop resolution:
2114          * ref rate / 2 - return cl_dvfs target rate.
2115          */
2116         if ((req->scale + 1) < SCALE_MAX)
2117                 return req->rate / 1000 * 1000;
2118
2119         return GET_REQUEST_RATE(req->freq, cld->ref_rate);
2120 }
2121
2122 #ifdef CONFIG_DEBUG_FS
2123
2124 static int lock_get(void *data, u64 *val)
2125 {
2126         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2127         *val = cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP;
2128         return 0;
2129 }
2130 static int lock_set(void *data, u64 val)
2131 {
2132         struct clk *c = (struct clk *)data;
2133         return tegra_clk_cfg_ex(c, TEGRA_CLK_DFLL_LOCK, val);
2134 }
2135 DEFINE_SIMPLE_ATTRIBUTE(lock_fops, lock_get, lock_set, "%llu\n");
2136
2137 static int monitor_get(void *data, u64 *val)
2138 {
2139         u32 v, s;
2140         unsigned long flags;
2141         struct clk *c = (struct clk *)data;
2142         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2143
2144         clk_enable(cld->soc_clk);
2145         clk_lock_save(c, &flags);
2146
2147         switch_monitor(cld, CL_DVFS_MONITOR_CTRL_FREQ);
2148         wait_data_new(cld, &v);
2149         filter_monitor_data(cld, &v); /* ignore error, use "some value" */
2150
2151         v = GET_MONITORED_RATE(v, cld->ref_rate);
2152         s = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
2153         s = (s & CL_DVFS_FREQ_REQ_SCALE_MASK) >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
2154         *val = (u64)v * (s + 1) / 256;
2155
2156         clk_unlock_restore(c, &flags);
2157         clk_disable(cld->soc_clk);
2158         return 0;
2159 }
2160 DEFINE_SIMPLE_ATTRIBUTE(monitor_fops, monitor_get, NULL, "%llu\n");
2161
2162 static int output_get(void *data, u64 *val)
2163 {
2164         u32 v;
2165         unsigned long flags;
2166         struct clk *c = (struct clk *)data;
2167         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2168
2169         clk_enable(cld->soc_clk);
2170         clk_lock_save(c, &flags);
2171
2172         v = cl_dvfs_get_output(cld);
2173         if (IS_ERR_VALUE(v))
2174                 v = get_last_output(cld); /* ignore error, use "some value" */
2175         *val = get_mv(cld, v);
2176
2177         clk_unlock_restore(c, &flags);
2178         clk_disable(cld->soc_clk);
2179         return 0;
2180 }
2181 DEFINE_SIMPLE_ATTRIBUTE(output_fops, output_get, NULL, "%llu\n");
2182
2183 static int vmax_get(void *data, u64 *val)
2184 {
2185         u32 v;
2186         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2187         v = cld->lut_max;
2188         *val = get_mv(cld, v);
2189         return 0;
2190 }
2191 DEFINE_SIMPLE_ATTRIBUTE(vmax_fops, vmax_get, NULL, "%llu\n");
2192
2193 static int vmin_get(void *data, u64 *val)
2194 {
2195         u32 v;
2196         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2197         v = cld->lut_min;
2198         *val = get_mv(cld, v);
2199         return 0;
2200 }
2201 DEFINE_SIMPLE_ATTRIBUTE(vmin_fops, vmin_get, NULL, "%llu\n");
2202
2203 static int tune_high_mv_get(void *data, u64 *val)
2204 {
2205         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2206         *val = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
2207         return 0;
2208 }
2209 static int tune_high_mv_set(void *data, u64 val)
2210 {
2211         unsigned long flags;
2212         struct clk *c = (struct clk *)data;
2213         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2214
2215         clk_lock_save(c, &flags);
2216
2217         cld->safe_dvfs->dfll_data.tune_high_min_millivolts = val;
2218         cl_dvfs_init_output_thresholds(cld);
2219         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
2220                 set_cl_config(cld, &cld->last_req);
2221                 set_request(cld, &cld->last_req);
2222         }
2223
2224         clk_unlock_restore(c, &flags);
2225         return 0;
2226 }
2227 DEFINE_SIMPLE_ATTRIBUTE(tune_high_mv_fops, tune_high_mv_get, tune_high_mv_set,
2228                         "%llu\n");
2229
2230 static int fout_mv_get(void *data, u64 *val)
2231 {
2232         u32 v;
2233         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2234         v = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE) & OUT_MASK;
2235         *val = cld->p_data->vdd_map[v].reg_uV / 1000;
2236         return 0;
2237 }
2238 static int fout_mv_set(void *data, u64 val)
2239 {
2240         u32 v;
2241         unsigned long flags;
2242         struct clk *c = (struct clk *)data;
2243         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2244
2245         /* FIXME: do we need it in i2c mode ? */
2246         if (is_i2c(cld))
2247                 return -ENOSYS;
2248
2249         clk_lock_save(c, &flags);
2250         clk_enable(cld->soc_clk);
2251
2252         v = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_FORCE);
2253         if (val) {
2254                 val = find_mv_out_cap(cld, (int)val);
2255                 v = (v & CL_DVFS_OUTPUT_FORCE_ENABLE) | (u32)val;
2256                 cl_dvfs_writel(cld, v, CL_DVFS_OUTPUT_FORCE);
2257                 cl_dvfs_wmb(cld);
2258
2259                 if (!(v & CL_DVFS_OUTPUT_FORCE_ENABLE)) {
2260                         v |= CL_DVFS_OUTPUT_FORCE_ENABLE;
2261                         cl_dvfs_writel(cld, v, CL_DVFS_OUTPUT_FORCE);
2262                         cl_dvfs_wmb(cld);
2263                 }
2264         } else if (v & CL_DVFS_OUTPUT_FORCE_ENABLE) {
2265                 v &= ~CL_DVFS_OUTPUT_FORCE_ENABLE;
2266                 cl_dvfs_writel(cld, v, CL_DVFS_OUTPUT_FORCE);
2267                 cl_dvfs_wmb(cld);
2268         }
2269
2270         clk_disable(cld->soc_clk);
2271         clk_unlock_restore(c, &flags);
2272         return 0;
2273 }
2274 DEFINE_SIMPLE_ATTRIBUTE(fout_mv_fops, fout_mv_get, fout_mv_set, "%llu\n");
2275
2276 static int fmin_get(void *data, u64 *val)
2277 {
2278         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2279         *val = cld->dvco_rate_min;
2280         return 0;
2281 }
2282 DEFINE_SIMPLE_ATTRIBUTE(dvco_rate_min_fops, fmin_get, NULL, "%llu\n");
2283
2284 static int calibr_delay_get(void *data, u64 *val)
2285 {
2286         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2287         *val = jiffies_to_msecs(cld->calibration_delay);
2288         return 0;
2289 }
2290 static int calibr_delay_set(void *data, u64 val)
2291 {
2292         unsigned long flags;
2293         struct clk *c = (struct clk *)data;
2294         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2295
2296         clk_lock_save(c, &flags);
2297         cld->calibration_delay = msecs_to_jiffies(val);
2298         clk_unlock_restore(c, &flags);
2299         return 0;
2300 }
2301 DEFINE_SIMPLE_ATTRIBUTE(calibr_delay_fops, calibr_delay_get, calibr_delay_set,
2302                         "%llu\n");
2303
2304 static int undershoot_get(void *data, u64 *val)
2305 {
2306         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
2307         *val = cld->p_data->pmu_undershoot_gb;
2308         return 0;
2309 }
2310 static int undershoot_set(void *data, u64 val)
2311 {
2312         unsigned long flags;
2313         struct clk *c = (struct clk *)data;
2314         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2315
2316         clk_lock_save(c, &flags);
2317         cld->p_data->pmu_undershoot_gb = val;
2318         cl_dvfs_set_force_out_min(cld);
2319         clk_unlock_restore(c, &flags);
2320         return 0;
2321 }
2322 DEFINE_SIMPLE_ATTRIBUTE(undershoot_fops, undershoot_get, undershoot_set,
2323                         "%llu\n");
2324
2325 static int cl_profiles_show(struct seq_file *s, void *data)
2326 {
2327         u8 v;
2328         int i, *trips;
2329         unsigned long r;
2330         struct clk *c = s->private;
2331         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2332
2333         seq_printf(s, "THERM CAPS:%s\n", cld->therm_caps_num ? "" : " NONE");
2334         for (i = 0; i < cld->therm_caps_num; i++) {
2335                 v = cld->thermal_out_caps[i];
2336                 trips = cld->safe_dvfs->dvfs_rail->vmax_cdev->trip_temperatures;
2337                 seq_printf(s, "%3dC.. %5dmV\n", trips[i], get_mv(cld, v));
2338         }
2339
2340         seq_puts(s, "TUNE HIGH:\n");
2341         seq_printf(s, "start  %5dmV\n", get_mv(cld, cld->tune_high_out_start));
2342         seq_printf(s, "min    %5dmV\n", get_mv(cld, cld->tune_high_out_min));
2343
2344         seq_printf(s, "THERM FLOORS:%s\n", cld->therm_floors_num ? "" : " NONE");
2345         for (i = 0; i < cld->therm_floors_num; i++) {
2346                 v = cld->thermal_out_floors[i];
2347                 r = cld->dvco_rate_floors[i];
2348                 trips = cld->safe_dvfs->dvfs_rail->vmin_cdev->trip_temperatures;
2349                 seq_printf(s, " ..%3dC%5dmV%9lukHz%s\n",
2350                            trips[i], get_mv(cld, v),
2351                            (r ? : find_dvco_rate_min(cld, v)) / 1000,
2352                            r ? " (calibrated)"  : "");
2353         }
2354         r = cld->dvco_rate_floors[i];
2355         seq_printf(s, "  vmin:%5dmV%9lukHz%s\n", cld->out_map[0]->reg_uV / 1000,
2356                    (r ? : cld->safe_dvfs->dfll_data.out_rate_min) / 1000,
2357                    r ? " (calibrated)"  : "");
2358
2359         return 0;
2360 }
2361
2362 static int cl_profiles_open(struct inode *inode, struct file *file)
2363 {
2364         return single_open(file, cl_profiles_show, inode->i_private);
2365 }
2366
2367 static const struct file_operations cl_profiles_fops = {
2368         .open           = cl_profiles_open,
2369         .read           = seq_read,
2370         .llseek         = seq_lseek,
2371         .release        = single_release,
2372 };
2373
2374 static int cl_register_show(struct seq_file *s, void *data)
2375 {
2376         u32 offs;
2377         struct clk *c = s->private;
2378         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2379
2380         clk_enable(cld->soc_clk);
2381
2382         seq_printf(s, "CONTROL REGISTERS:\n");
2383         for (offs = 0; offs <= CL_DVFS_MONITOR_DATA; offs += 4)
2384                 seq_printf(s, "[0x%02x] = 0x%08x\n",
2385                            offs, cl_dvfs_readl(cld, offs));
2386
2387         seq_printf(s, "\nI2C and INTR REGISTERS:\n");
2388         for (offs = CL_DVFS_I2C_CFG; offs <= CL_DVFS_I2C_STS; offs += 4)
2389                 seq_printf(s, "[0x%02x] = 0x%08x\n",
2390                            offs, cl_dvfs_readl(cld, offs));
2391
2392         offs = CL_DVFS_INTR_STS;
2393         seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
2394         offs = CL_DVFS_INTR_EN;
2395         seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
2396
2397         seq_printf(s, "\nLUT:\n");
2398         for (offs = CL_DVFS_OUTPUT_LUT;
2399              offs < CL_DVFS_OUTPUT_LUT + 4 * MAX_CL_DVFS_VOLTAGES;
2400              offs += 4)
2401                 seq_printf(s, "[0x%02x] = 0x%08x\n",
2402                            offs, cl_dvfs_readl(cld, offs));
2403
2404         clk_disable(cld->soc_clk);
2405         return 0;
2406 }
2407
2408 static int cl_register_open(struct inode *inode, struct file *file)
2409 {
2410         return single_open(file, cl_register_show, inode->i_private);
2411 }
2412
2413 static ssize_t cl_register_write(struct file *file,
2414         const char __user *userbuf, size_t count, loff_t *ppos)
2415 {
2416         char buf[80];
2417         u32 offs;
2418         u32 val;
2419         struct clk *c = file->f_path.dentry->d_inode->i_private;
2420         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
2421
2422         if (sizeof(buf) <= count)
2423                 return -EINVAL;
2424
2425         if (copy_from_user(buf, userbuf, count))
2426                 return -EFAULT;
2427
2428         /* terminate buffer and trim - white spaces may be appended
2429          *  at the end when invoked from shell command line */
2430         buf[count] = '\0';
2431         strim(buf);
2432
2433         if (sscanf(buf, "[0x%x] = 0x%x", &offs, &val) != 2)
2434                 return -1;
2435
2436         clk_enable(cld->soc_clk);
2437         cl_dvfs_writel(cld, val, offs & (~0x3));
2438         clk_disable(cld->soc_clk);
2439         return count;
2440 }
2441
2442 static const struct file_operations cl_register_fops = {
2443         .open           = cl_register_open,
2444         .read           = seq_read,
2445         .write          = cl_register_write,
2446         .llseek         = seq_lseek,
2447         .release        = single_release,
2448 };
2449
2450 int __init tegra_cl_dvfs_debug_init(struct clk *dfll_clk)
2451 {
2452         struct dentry *cl_dvfs_dentry;
2453
2454         if (!dfll_clk || !dfll_clk->dent || (dfll_clk->state == UNINITIALIZED))
2455                 return 0;
2456
2457         if (!debugfs_create_file("lock", S_IRUGO | S_IWUSR,
2458                 dfll_clk->dent, dfll_clk, &lock_fops))
2459                 goto err_out;
2460
2461         cl_dvfs_dentry = debugfs_create_dir("cl_dvfs", dfll_clk->dent);
2462         if (!cl_dvfs_dentry)
2463                 goto err_out;
2464
2465         if (!debugfs_create_file("monitor", S_IRUGO,
2466                 cl_dvfs_dentry, dfll_clk, &monitor_fops))
2467                 goto err_out;
2468
2469         if (!debugfs_create_file("output_mv", S_IRUGO,
2470                 cl_dvfs_dentry, dfll_clk, &output_fops))
2471                 goto err_out;
2472
2473         if (!debugfs_create_file("vmax_mv", S_IRUGO,
2474                 cl_dvfs_dentry, dfll_clk, &vmax_fops))
2475                 goto err_out;
2476
2477         if (!debugfs_create_file("vmin_mv", S_IRUGO,
2478                 cl_dvfs_dentry, dfll_clk, &vmin_fops))
2479                 goto err_out;
2480
2481         if (!debugfs_create_file("tune_high_mv", S_IRUGO | S_IWUSR,
2482                 cl_dvfs_dentry, dfll_clk, &tune_high_mv_fops))
2483                 goto err_out;
2484
2485         if (!debugfs_create_file("force_out_mv", S_IRUGO,
2486                 cl_dvfs_dentry, dfll_clk, &fout_mv_fops))
2487                 goto err_out;
2488
2489         if (!debugfs_create_file("dvco_min", S_IRUGO,
2490                 cl_dvfs_dentry, dfll_clk, &dvco_rate_min_fops))
2491                 goto err_out;
2492
2493         if (!debugfs_create_file("calibr_delay", S_IRUGO,
2494                 cl_dvfs_dentry, dfll_clk, &calibr_delay_fops))
2495                 goto err_out;
2496
2497         if (!debugfs_create_file("pmu_undershoot_gb", S_IRUGO,
2498                 cl_dvfs_dentry, dfll_clk, &undershoot_fops))
2499                 goto err_out;
2500
2501         if (!debugfs_create_file("profiles", S_IRUGO,
2502                 cl_dvfs_dentry, dfll_clk, &cl_profiles_fops))
2503                 goto err_out;
2504
2505         if (!debugfs_create_file("registers", S_IRUGO | S_IWUSR,
2506                 cl_dvfs_dentry, dfll_clk, &cl_register_fops))
2507                 goto err_out;
2508
2509         return 0;
2510
2511 err_out:
2512         debugfs_remove_recursive(dfll_clk->dent);
2513         return -ENOMEM;
2514 }
2515 #endif