]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - arch/arm/mach-tegra/tegra_cl_dvfs.c
arm: tegra: disable smc91x eth device for dsim
[linux-3.10.git] / arch / arm / mach-tegra / tegra_cl_dvfs.c
1 /*
2  * arch/arm/mach-tegra/tegra_cl_dvfs.c
3  *
4  * Copyright (c) 2012-2013 NVIDIA CORPORATION. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/spinlock.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
23 #include <linux/io.h>
24 #include <linux/clk.h>
25 #include <linux/interrupt.h>
26 #include <linux/suspend.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/uaccess.h>
30 #include <linux/module.h>
31 #include <linux/platform_device.h>
32
33 #include <mach/irqs.h>
34 #include <mach/hardware.h>
35
36 #include "tegra_cl_dvfs.h"
37 #include "clock.h"
38 #include "dvfs.h"
39 #include "iomap.h"
40
41 #define OUT_MASK                        0x3f
42
43 #define CL_DVFS_CTRL                    0x00
44 #define CL_DVFS_CONFIG                  0x04
45 #define CL_DVFS_CONFIG_DIV_MASK         0xff
46
47 #define CL_DVFS_PARAMS                  0x08
48 #define CL_DVFS_PARAMS_CG_SCALE         (0x1 << 24)
49 #define CL_DVFS_PARAMS_FORCE_MODE_SHIFT 22
50 #define CL_DVFS_PARAMS_FORCE_MODE_MASK  (0x3 << CL_DVFS_PARAMS_FORCE_MODE_SHIFT)
51 #define CL_DVFS_PARAMS_CF_PARAM_SHIFT   16
52 #define CL_DVFS_PARAMS_CF_PARAM_MASK    (0x3f << CL_DVFS_PARAMS_CF_PARAM_SHIFT)
53 #define CL_DVFS_PARAMS_CI_PARAM_SHIFT   8
54 #define CL_DVFS_PARAMS_CI_PARAM_MASK    (0x7 << CL_DVFS_PARAMS_CI_PARAM_SHIFT)
55 #define CL_DVFS_PARAMS_CG_PARAM_SHIFT   0
56 #define CL_DVFS_PARAMS_CG_PARAM_MASK    (0xff << CL_DVFS_PARAMS_CG_PARAM_SHIFT)
57
58 #define CL_DVFS_TUNE0                   0x0c
59 #define CL_DVFS_TUNE1                   0x10
60
61 #define CL_DVFS_FREQ_REQ                0x14
62 #define CL_DVFS_FREQ_REQ_FORCE_ENABLE   (0x1 << 28)
63 #define CL_DVFS_FREQ_REQ_FORCE_SHIFT    16
64 #define CL_DVFS_FREQ_REQ_FORCE_MASK     (0xfff << CL_DVFS_FREQ_REQ_FORCE_SHIFT)
65 #define FORCE_MAX                       2047
66 #define FORCE_MIN                       -2048
67 #define CL_DVFS_FREQ_REQ_SCALE_SHIFT    8
68 #define CL_DVFS_FREQ_REQ_SCALE_MASK     (0xff << CL_DVFS_FREQ_REQ_SCALE_SHIFT)
69 #define SCALE_MAX                       256
70 #define CL_DVFS_FREQ_REQ_FREQ_VALID     (0x1 << 7)
71 #define CL_DVFS_FREQ_REQ_FREQ_SHIFT     0
72 #define CL_DVFS_FREQ_REQ_FREQ_MASK      (0x7f << CL_DVFS_FREQ_REQ_FREQ_SHIFT)
73 #define FREQ_MAX                        127
74
75 #define CL_DVFS_SCALE_RAMP              0x18
76
77 #define CL_DVFS_DROOP_CTRL              0x1c
78 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT 16
79 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK  \
80                 (0xff << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT)
81 #define CL_DVFS_DROOP_CTRL_CUT_SHIFT    8
82 #define CL_DVFS_DROOP_CTRL_CUT_MASK     (0xf << CL_DVFS_DROOP_CTRL_CUT_SHIFT)
83 #define CL_DVFS_DROOP_CTRL_RAMP_SHIFT   0
84 #define CL_DVFS_DROOP_CTRL_RAMP_MASK    (0xff << CL_DVFS_DROOP_CTRL_RAMP_SHIFT)
85
86 #define CL_DVFS_OUTPUT_CFG              0x20
87 #define CL_DVFS_OUTPUT_CFG_I2C_ENABLE   (0x1 << 30)
88 #define CL_DVFS_OUTPUT_CFG_SAFE_SHIFT   24
89 #define CL_DVFS_OUTPUT_CFG_SAFE_MASK    \
90                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT)
91 #define CL_DVFS_OUTPUT_CFG_MAX_SHIFT    16
92 #define CL_DVFS_OUTPUT_CFG_MAX_MASK     \
93                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MAX_SHIFT)
94 #define CL_DVFS_OUTPUT_CFG_MIN_SHIFT    8
95 #define CL_DVFS_OUTPUT_CFG_MIN_MASK     \
96                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MIN_SHIFT)
97
98 #define CL_DVFS_OUTPUT_FORCE            0x24
99 #define CL_DVFS_MONITOR_CTRL            0x28
100 #define CL_DVFS_MONITOR_CTRL_DISABLE    0
101 #define CL_DVFS_MONITOR_CTRL_FREQ       6
102 #define CL_DVFS_MONITOR_DATA            0x2c
103 #define CL_DVFS_MONITOR_DATA_NEW        (0x1 << 16)
104 #define CL_DVFS_MONITOR_DATA_MASK       0xFFFF
105
106 #define CL_DVFS_I2C_CFG                 0x40
107 #define CL_DVFS_I2C_CFG_ARB_ENABLE      (0x1 << 20)
108 #define CL_DVFS_I2C_CFG_HS_CODE_SHIFT   16
109 #define CL_DVFS_I2C_CFG_HS_CODE_MASK    (0x7 << CL_DVFS_I2C_CFG_HS_CODE_SHIFT)
110 #define CL_DVFS_I2C_CFG_PACKET_ENABLE   (0x1 << 15)
111 #define CL_DVFS_I2C_CFG_SIZE_SHIFT      12
112 #define CL_DVFS_I2C_CFG_SIZE_MASK       (0x7 << CL_DVFS_I2C_CFG_SIZE_SHIFT)
113 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_10   (0x1 << 10)
114 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT 0
115 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_MASK \
116                 (0x3ff << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT)
117
118 #define CL_DVFS_I2C_VDD_REG_ADDR        0x44
119 #define CL_DVFS_I2C_STS                 0x48
120 #define CL_DVFS_I2C_STS_I2C_LAST_SHIFT  1
121 #define CL_DVFS_I2C_STS_I2C_REQ_PENDING 0x1
122
123 #define CL_DVFS_INTR_STS                0x5c
124 #define CL_DVFS_INTR_EN                 0x60
125 #define CL_DVFS_INTR_MIN_MASK           0x1
126 #define CL_DVFS_INTR_MAX_MASK           0x2
127
128 #define CL_DVFS_I2C_CLK_DIVISOR         0x16c
129 #define CL_DVFS_I2C_CLK_DIVISOR_MASK    0xffff
130 #define CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT 16
131 #define CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT 0
132
133 #define CL_DVFS_OUTPUT_LUT              0x200
134
135 #define CL_DVFS_CALIBR_TIME             40000
136 #define CL_DVFS_OUTPUT_PENDING_TIMEOUT  1000
137 #define CL_DVFS_OUTPUT_RAMP_DELAY       100
138 #define CL_DVFS_TUNE_HIGH_DELAY         2000
139
140 #define CL_DVFS_TUNE_HIGH_MARGIN_MV     20
141
142 enum tegra_cl_dvfs_ctrl_mode {
143         TEGRA_CL_DVFS_UNINITIALIZED = 0,
144         TEGRA_CL_DVFS_DISABLED = 1,
145         TEGRA_CL_DVFS_OPEN_LOOP = 2,
146         TEGRA_CL_DVFS_CLOSED_LOOP = 3,
147 };
148
149 enum tegra_cl_dvfs_tune_state {
150         TEGRA_CL_DVFS_TUNE_LOW = 0,
151         TEGRA_CL_DVFS_TUNE_HIGH_REQUEST,
152         TEGRA_CL_DVFS_TUNE_HIGH,
153 };
154
155 struct dfll_rate_req {
156         u8      freq;
157         u8      scale;
158         u8      output;
159         u8      cap;
160         unsigned long rate;
161 };
162
163 struct tegra_cl_dvfs {
164         void                                    *cl_base;
165         struct tegra_cl_dvfs_platform_data      *p_data;
166
167         struct dvfs                     *safe_dvfs;
168         struct thermal_cooling_device   *vmax_cdev;
169         struct thermal_cooling_device   *vmin_cdev;
170         struct work_struct              init_cdev_work;
171
172         struct clk                      *soc_clk;
173         struct clk                      *ref_clk;
174         struct clk                      *i2c_clk;
175         struct clk                      *dfll_clk;
176         unsigned long                   ref_rate;
177         unsigned long                   i2c_rate;
178
179         /* output voltage mapping:
180          * legacy dvfs table index -to- cl_dvfs output LUT index
181          * cl_dvfs output LUT index -to- PMU value/voltage pair ptr
182          */
183         u8                              clk_dvfs_map[MAX_DVFS_FREQS];
184         struct voltage_reg_map          *out_map[MAX_CL_DVFS_VOLTAGES];
185         u8                              num_voltages;
186         u8                              safe_output;
187         u8                              tune_high_out_start;
188         u8                              tune_high_out_min;
189         u8                              minimax_output;
190         u8                              thermal_out_caps[MAX_THERMAL_LIMITS];
191         u8                              thermal_out_floors[MAX_THERMAL_LIMITS];
192         int                             therm_caps_num;
193         int                             therm_floors_num;
194         unsigned long                   dvco_rate_min;
195
196         u8                              lut_min;
197         u8                              lut_max;
198         u8                              force_out_min;
199         int                             therm_cap_idx;
200         int                             therm_floor_idx;
201         struct dfll_rate_req            last_req;
202         enum tegra_cl_dvfs_tune_state   tune_state;
203         enum tegra_cl_dvfs_ctrl_mode    mode;
204
205         struct timer_list               tune_timer;
206         unsigned long                   tune_delay;
207         struct timer_list               calibration_timer;
208         unsigned long                   calibration_delay;
209         ktime_t                         last_calibration;
210         unsigned long                   calibration_range_min;
211         unsigned long                   calibration_range_max;
212 };
213
214 /* Conversion macros (different scales for frequency request, and monitored
215    rate is not a typo) */
216 #define RATE_STEP(cld)                          ((cld)->ref_rate / 2)
217 #define GET_REQUEST_FREQ(rate, ref_rate)        ((rate) / ((ref_rate) / 2))
218 #define GET_REQUEST_RATE(freq, ref_rate)        ((freq) * ((ref_rate) / 2))
219 #define GET_MONITORED_RATE(freq, ref_rate)      ((freq) * ((ref_rate) / 4))
220 #define GET_DROOP_FREQ(rate, ref_rate)          ((rate) / ((ref_rate) / 4))
221 #define ROUND_MIN_RATE(rate, ref_rate)          \
222                 (DIV_ROUND_UP(rate, (ref_rate) / 2) * ((ref_rate) / 2))
223 #define GET_DIV(ref_rate, out_rate, scale)      \
224                 DIV_ROUND_UP((ref_rate), (out_rate) * (scale))
225
226 static const char *mode_name[] = {
227         [TEGRA_CL_DVFS_UNINITIALIZED] = "uninitialized",
228         [TEGRA_CL_DVFS_DISABLED] = "disabled",
229         [TEGRA_CL_DVFS_OPEN_LOOP] = "open_loop",
230         [TEGRA_CL_DVFS_CLOSED_LOOP] = "closed_loop",
231 };
232
233 static inline u32 cl_dvfs_readl(struct tegra_cl_dvfs *cld, u32 offs)
234 {
235         return __raw_readl((void *)cld->cl_base + offs);
236 }
237 static inline void cl_dvfs_writel(struct tegra_cl_dvfs *cld, u32 val, u32 offs)
238 {
239         __raw_writel(val, (void *)cld->cl_base + offs);
240 }
241 static inline void cl_dvfs_wmb(struct tegra_cl_dvfs *cld)
242 {
243         wmb();
244         cl_dvfs_readl(cld, CL_DVFS_CTRL);
245 }
246
247 static inline void invalidate_request(struct tegra_cl_dvfs *cld)
248 {
249         u32 val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
250         val &= ~CL_DVFS_FREQ_REQ_FREQ_VALID;
251         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
252         cl_dvfs_wmb(cld);
253 }
254
255 static inline int output_enable(struct tegra_cl_dvfs *cld)
256 {
257         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
258
259         /* FIXME: PWM output control */
260         val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
261         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
262         cl_dvfs_wmb(cld);
263         return  0;
264 }
265
266 static noinline int output_flush_disable(struct tegra_cl_dvfs *cld)
267 {
268         int i;
269         u32 sts;
270         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
271
272         /* Flush transactions in flight, and then disable */
273         for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
274                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
275                 udelay(2);
276                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
277                         sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
278                         if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
279                                 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
280                                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
281                                 wmb();
282                                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
283                                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
284                                         return 0; /* no pending rqst */
285
286                                 /* Re-enable, continue wait */
287                                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
288                                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
289                                 wmb();
290                         }
291                 }
292         }
293
294         /* I2C request is still pending - disable, anyway, but report error */
295         val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
296         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
297         cl_dvfs_wmb(cld);
298         return -ETIMEDOUT;
299 }
300
301 static noinline int output_disable_flush(struct tegra_cl_dvfs *cld)
302 {
303         int i;
304         u32 sts;
305         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
306
307         /* Disable output interface right away */
308         val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
309         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
310         cl_dvfs_wmb(cld);
311
312         /* Flush possible transaction in flight */
313         for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
314                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
315                 udelay(2);
316                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
317                         sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
318                         if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
319                                 return 0;
320                 }
321         }
322
323         /* I2C request is still pending - report error */
324         return -ETIMEDOUT;
325 }
326
327 static inline int output_disable_ol_prepare(struct tegra_cl_dvfs *cld)
328 {
329         /* FIXME: PWM output control */
330         /*
331          * If cl-dvfs h/w does not require output to be quiet before disable,
332          * s/w can stop I2C communications at any time (including operations
333          * in closed loop mode), and I2C bus integrity is guaranteed even in
334          * case of flush timeout.
335          */
336         if (!(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET)) {
337                 int ret = output_disable_flush(cld);
338                 if (ret)
339                         pr_debug("cl_dvfs: I2C pending timeout ol_prepare\n");
340                 return ret;
341         }
342         return 0;
343 }
344
345 static inline int output_disable_post_ol(struct tegra_cl_dvfs *cld)
346 {
347         /* FIXME: PWM output control */
348         /*
349          * If cl-dvfs h/w requires output to be quiet before disable, s/w
350          * should stop I2C communications only after the switch to open loop
351          * mode, and I2C bus integrity is not guaranteed in case of flush
352          * timeout
353         */
354         if (cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) {
355                 int ret = output_flush_disable(cld);
356                 if (ret)
357                         pr_err("cl_dvfs: I2C pending timeout post_ol\n");
358                 return ret;
359         }
360         return 0;
361 }
362
363 static inline void set_mode(struct tegra_cl_dvfs *cld,
364                             enum tegra_cl_dvfs_ctrl_mode mode)
365 {
366         cld->mode = mode;
367         cl_dvfs_writel(cld, mode - 1, CL_DVFS_CTRL);
368         cl_dvfs_wmb(cld);
369 }
370
371 static inline u8 get_output_cap(struct tegra_cl_dvfs *cld,
372                                 struct dfll_rate_req *req)
373 {
374         u32 thermal_cap = cld->num_voltages - 1;
375
376         if (cld->therm_cap_idx && (cld->therm_cap_idx <= cld->therm_caps_num))
377                 thermal_cap = cld->thermal_out_caps[cld->therm_cap_idx - 1];
378         if (req && (req->cap < thermal_cap))
379                 return req->cap;
380         return thermal_cap;
381 }
382
383 static inline u8 get_output_min(struct tegra_cl_dvfs *cld)
384 {
385         u32 tune_min, thermal_min;
386
387         tune_min = cld->tune_state == TEGRA_CL_DVFS_TUNE_LOW ?
388                 0 : cld->tune_high_out_min;
389         thermal_min = 0;
390         if (cld->therm_floor_idx < cld->therm_floors_num)
391                 thermal_min = cld->thermal_out_floors[cld->therm_floor_idx];
392
393         return max(tune_min, thermal_min);
394 }
395
396 static inline void _load_lut(struct tegra_cl_dvfs *cld)
397 {
398         int i;
399         u32 val;
400
401         val = cld->out_map[cld->lut_min]->reg_value;
402         for (i = 0; i <= cld->lut_min; i++)
403                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
404
405         for (; i < cld->lut_max; i++) {
406                 val = cld->out_map[i]->reg_value;
407                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
408         }
409
410         val = cld->out_map[cld->lut_max]->reg_value;
411         for (; i < cld->num_voltages; i++)
412                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
413
414         cl_dvfs_wmb(cld);
415 }
416
417 static void cl_dvfs_load_lut(struct tegra_cl_dvfs *cld)
418 {
419         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
420         bool disable_out_for_load =
421                 !(cld->p_data->flags & TEGRA_CL_DVFS_FLAGS_I2C_WAIT_QUIET) &&
422                 (val & CL_DVFS_OUTPUT_CFG_I2C_ENABLE);
423
424         if (disable_out_for_load) {
425                 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
426                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
427                 cl_dvfs_wmb(cld);
428                 udelay(2); /* 2us (big margin) window for disable propafation */
429         }
430
431         _load_lut(cld);
432
433         if (disable_out_for_load) {
434                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
435                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
436                 cl_dvfs_wmb(cld);
437         }
438 }
439
440 #define set_tune_state(cld, state) \
441         do {                                                            \
442                 cld->tune_state = state;                                \
443                 pr_debug("%s: set tune state %d\n", __func__, state);   \
444         } while (0)
445
446 static inline void tune_low(struct tegra_cl_dvfs *cld)
447 {
448         /* a must order: 1st tune dfll low, then tune trimmers low */
449         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
450         cl_dvfs_wmb(cld);
451         if (cld->safe_dvfs->dfll_data.tune_trimmers)
452                 cld->safe_dvfs->dfll_data.tune_trimmers(false);
453 }
454
455 static inline void tune_high(struct tegra_cl_dvfs *cld)
456 {
457         /* a must order: 1st tune trimmers high, then tune dfll high */
458         if (cld->safe_dvfs->dfll_data.tune_trimmers)
459                 cld->safe_dvfs->dfll_data.tune_trimmers(true);
460         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0_high_mv,
461                        CL_DVFS_TUNE0);
462         cl_dvfs_wmb(cld);
463 }
464
465 static void set_ol_config(struct tegra_cl_dvfs *cld)
466 {
467         u32 val, out_min;
468
469         /* always tune low (safe) in open loop */
470         if (cld->tune_state != TEGRA_CL_DVFS_TUNE_LOW) {
471                 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
472                 tune_low(cld);
473
474                 out_min = get_output_min(cld);
475                 if (cld->lut_min != out_min) {
476                         cld->lut_min = out_min;
477                         if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
478                                 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
479                                 val &= ~CL_DVFS_OUTPUT_CFG_MIN_MASK;
480                                 val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
481                                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
482                         } else {
483                                 cl_dvfs_load_lut(cld);
484                         }
485                 }
486         }
487
488         /* 1:1 scaling in open loop */
489         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
490         val |= (SCALE_MAX - 1) << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
491         val &= ~CL_DVFS_FREQ_REQ_FORCE_ENABLE;
492         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
493 }
494
495 static void set_cl_config(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
496 {
497         u32 out_max, out_min;
498         u32 out_cap = get_output_cap(cld, req);
499
500         switch (cld->tune_state) {
501         case TEGRA_CL_DVFS_TUNE_LOW:
502                 if (out_cap > cld->tune_high_out_start) {
503                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH_REQUEST);
504                         mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
505                 }
506                 break;
507
508         case TEGRA_CL_DVFS_TUNE_HIGH:
509         case TEGRA_CL_DVFS_TUNE_HIGH_REQUEST:
510                 if (out_cap <= cld->tune_high_out_start) {
511                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
512                         tune_low(cld);
513                 }
514                 break;
515         default:
516                 BUG();
517         }
518
519         out_min = get_output_min(cld);
520         if (out_cap > (out_min + 1))
521                 req->output = out_cap - 1;
522         else
523                 req->output = out_min + 1;
524         if (req->output == cld->safe_output)
525                 req->output++;
526         out_max = max((u8)(req->output + 1), cld->minimax_output);
527         out_max = max((u8)(out_max), cld->force_out_min);
528
529         if ((cld->lut_min != out_min) || (cld->lut_max != out_max)) {
530                 cld->lut_min = out_min;
531                 cld->lut_max = out_max;
532                 if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
533                         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
534                         val &= ~(CL_DVFS_OUTPUT_CFG_MAX_MASK |
535                                  CL_DVFS_OUTPUT_CFG_MIN_MASK);
536                         val |= out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT;
537                         val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
538                         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
539                 } else {
540                         cl_dvfs_load_lut(cld);
541                 }
542         }
543 }
544
545 static void tune_timer_cb(unsigned long data)
546 {
547         unsigned long flags;
548         u32 val, out_min, out_last;
549         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
550
551         clk_lock_save(cld->dfll_clk, &flags);
552
553         /* FIXME: PWM output control */
554         if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH_REQUEST) {
555                 out_min = cld->lut_min;
556                 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
557                 out_last = (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK;
558
559                 if (!(val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) &&
560                     (out_last >= cld->tune_high_out_min)  &&
561                     (out_min >= cld->tune_high_out_min)) {
562                         udelay(CL_DVFS_OUTPUT_RAMP_DELAY);
563                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH);
564                         tune_high(cld);
565                 } else {
566                         mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
567                 }
568         }
569         clk_unlock_restore(cld->dfll_clk, &flags);
570 }
571
572 static inline void calibration_timer_update(struct tegra_cl_dvfs *cld)
573 {
574         if (!cld->calibration_delay)
575                 return;
576         mod_timer(&cld->calibration_timer, jiffies + cld->calibration_delay);
577 }
578
579 static void cl_dvfs_calibrate(struct tegra_cl_dvfs *cld)
580 {
581         u32 val;
582         ktime_t now;
583         unsigned long data;
584         u8 out_min = get_output_min(cld);
585
586         /*
587          *  Enter calibration procedure only if
588          *  - closed loop operations
589          *  - last request engaged clock skipper
590          *  - at least specified time after the last calibration attempt
591          */
592         if ((cld->mode != TEGRA_CL_DVFS_CLOSED_LOOP) ||
593             (cld->last_req.rate > cld->dvco_rate_min))
594                 return;
595
596         now = ktime_get();
597         if (ktime_us_delta(now, cld->last_calibration) < CL_DVFS_CALIBR_TIME)
598                 return;
599         cld->last_calibration = now;
600
601         if (cl_dvfs_readl(cld, CL_DVFS_MONITOR_CTRL) !=
602             CL_DVFS_MONITOR_CTRL_FREQ)
603                 cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ,
604                                 CL_DVFS_MONITOR_CTRL);
605
606         /* Synchronize with sample period, and get rate measurements */
607         data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
608         do {
609                 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
610         } while (!(data & CL_DVFS_MONITOR_DATA_NEW));
611         do {
612                 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
613         } while (!(data & CL_DVFS_MONITOR_DATA_NEW));
614
615         /* Defer calibration if I2C transaction is pending */
616         /* FIXME: PWM output control */
617         val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
618         if (val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) {
619                 calibration_timer_update(cld);
620                 return;
621         }
622
623         /* Adjust minimum rate */
624         data &= CL_DVFS_MONITOR_DATA_MASK;
625         data = GET_MONITORED_RATE(data, cld->ref_rate);
626         if ((val > out_min) || (data < (cld->dvco_rate_min - RATE_STEP(cld))))
627                 cld->dvco_rate_min -= RATE_STEP(cld);
628         else if (data > (cld->dvco_rate_min + RATE_STEP(cld)))
629                 cld->dvco_rate_min += RATE_STEP(cld);
630         else
631                 return;
632
633         cld->dvco_rate_min = clamp(cld->dvco_rate_min,
634                         cld->calibration_range_min, cld->calibration_range_max);
635         calibration_timer_update(cld);
636         pr_debug("%s: calibrated dvco_rate_min %lu\n",
637                  __func__, cld->dvco_rate_min);
638 }
639
640 static void calibration_timer_cb(unsigned long data)
641 {
642         unsigned long flags;
643         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
644
645         pr_debug("%s\n", __func__);
646
647         clk_lock_save(cld->dfll_clk, &flags);
648         cl_dvfs_calibrate(cld);
649         clk_unlock_restore(cld->dfll_clk, &flags);
650 }
651
652 static void set_request(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
653 {
654         u32 val, f;
655         int force_val = req->output - cld->safe_output;
656         int coef = 128; /* FIXME: cld->p_data->cfg_param->cg_scale? */;
657
658         /* If going down apply force output floor */
659         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
660         f = (val & CL_DVFS_FREQ_REQ_FREQ_MASK) >> CL_DVFS_FREQ_REQ_FREQ_SHIFT;
661         if ((!(val & CL_DVFS_FREQ_REQ_FREQ_VALID) || (f > req->freq)) &&
662             (cld->force_out_min > req->output))
663                 force_val = cld->force_out_min - cld->safe_output;
664
665         force_val = force_val * coef / cld->p_data->cfg_param->cg;
666         force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
667
668         /*
669          * 1st set new frequency request and force values, then set force enable
670          * bit (if not set already). Use same CL_DVFS_FREQ_REQ register read
671          * (not other cl_dvfs register) plus explicit delay as a fence.
672          */
673         val &= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
674         val |= req->freq << CL_DVFS_FREQ_REQ_FREQ_SHIFT;
675         val |= req->scale << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
676         val |= ((u32)force_val << CL_DVFS_FREQ_REQ_FORCE_SHIFT) &
677                 CL_DVFS_FREQ_REQ_FORCE_MASK;
678         val |= CL_DVFS_FREQ_REQ_FREQ_VALID;
679         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
680         wmb();
681         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
682
683         if (!(val & CL_DVFS_FREQ_REQ_FORCE_ENABLE)) {
684                 udelay(1);  /* 1us (big margin) window for force value settle */
685                 val |= CL_DVFS_FREQ_REQ_FORCE_ENABLE;
686                 cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
687                 cl_dvfs_wmb(cld);
688         }
689 }
690
691 static u8 find_mv_out_cap(struct tegra_cl_dvfs *cld, int mv)
692 {
693         u8 cap;
694         int uv;
695
696         for (cap = 0; cap < cld->num_voltages; cap++) {
697                 uv = cld->out_map[cap]->reg_uV;
698                 if (uv >= mv * 1000)
699                         return cap;
700         }
701         return cap - 1; /* maximum possible output */
702 }
703
704 static u8 find_mv_out_floor(struct tegra_cl_dvfs *cld, int mv)
705 {
706         u8 floor;
707         int uv;
708
709         for (floor = 0; floor < cld->num_voltages; floor++) {
710                 uv = cld->out_map[floor]->reg_uV;
711                 if (uv > mv * 1000) {
712                         if (!floor)
713                                 return 0; /* minimum possible output */
714                         break;
715                 }
716         }
717         return floor - 1;
718 }
719
720 static int find_safe_output(
721         struct tegra_cl_dvfs *cld, unsigned long rate, u8 *safe_output)
722 {
723         int i;
724         int n = cld->safe_dvfs->num_freqs;
725         unsigned long *freqs = cld->safe_dvfs->freqs;
726
727         for (i = 0; i < n; i++) {
728                 if (freqs[i] >= rate) {
729                         *safe_output = cld->clk_dvfs_map[i];
730                         return 0;
731                 }
732         }
733         return -EINVAL;
734 }
735
736 static unsigned long find_dvco_rate_min(struct tegra_cl_dvfs *cld, u8 out_min)
737 {
738         int i;
739
740         for (i = 0; i < cld->safe_dvfs->num_freqs; i++) {
741                 if (cld->clk_dvfs_map[i] > out_min)
742                         break;
743         }
744         i = i ? i-1 : 0;
745         return cld->safe_dvfs->freqs[i];
746 }
747
748 static void cl_dvfs_set_dvco_rate_min(struct tegra_cl_dvfs *cld)
749 {
750         unsigned long rate = cld->safe_dvfs->dfll_data.out_rate_min;
751         if (cld->therm_floor_idx < cld->therm_floors_num)
752                 rate = find_dvco_rate_min(
753                         cld, cld->thermal_out_floors[cld->therm_floor_idx]);
754
755         /* round minimum rate to request unit (ref_rate/2) boundary */
756         cld->dvco_rate_min = ROUND_MIN_RATE(rate, cld->ref_rate);
757
758         /* dvco min rate is under-estimated - skewed range up */
759         cld->calibration_range_min = cld->dvco_rate_min - 4 * RATE_STEP(cld);
760         cld->calibration_range_max = cld->dvco_rate_min + 8 * RATE_STEP(cld);
761 }
762
763 static void cl_dvfs_set_force_out_min(struct tegra_cl_dvfs *cld)
764 {
765         u8 force_out_min = 0;
766         int force_mv_min = cld->p_data->pmu_undershoot_gb;
767
768         if (!force_mv_min) {
769                 cld->force_out_min = 0;
770                 return;
771         }
772
773         if (cld->therm_floor_idx < cld->therm_floors_num)
774                 force_out_min = cld->thermal_out_floors[cld->therm_floor_idx];
775         force_mv_min += cld->out_map[force_out_min]->reg_uV / 1000;
776         force_out_min = find_mv_out_cap(cld, force_mv_min);
777         if (force_out_min == cld->safe_output)
778                 force_out_min++;
779         cld->force_out_min = force_out_min;
780 }
781
782 static struct voltage_reg_map *find_vdd_map_entry(
783         struct tegra_cl_dvfs *cld, int mV, bool exact)
784 {
785         int i, reg_mV;
786
787         for (i = 0; i < cld->p_data->vdd_map_size; i++) {
788                 /* round down to 1mV */
789                 reg_mV = cld->p_data->vdd_map[i].reg_uV / 1000;
790                 if (mV <= reg_mV)
791                         break;
792         }
793
794         if (i < cld->p_data->vdd_map_size) {
795                 if (!exact || (mV == reg_mV))
796                         return &cld->p_data->vdd_map[i];
797         }
798         return NULL;
799 }
800
801 static void cl_dvfs_init_maps(struct tegra_cl_dvfs *cld)
802 {
803         int i, j, v, v_max, n;
804         const int *millivolts;
805         struct voltage_reg_map *m;
806
807         BUILD_BUG_ON(MAX_CL_DVFS_VOLTAGES > OUT_MASK + 1);
808
809         n = cld->safe_dvfs->num_freqs;
810         BUG_ON(n >= MAX_CL_DVFS_VOLTAGES);
811
812         millivolts = cld->safe_dvfs->dfll_millivolts;
813         v_max = millivolts[n - 1];
814
815         v = cld->safe_dvfs->dfll_data.min_millivolts;
816         BUG_ON(v > millivolts[0]);
817
818         cld->out_map[0] = find_vdd_map_entry(cld, v, true);
819         BUG_ON(!cld->out_map[0]);
820
821         for (i = 0, j = 1; i < n; i++) {
822                 for (;;) {
823                         v += max(1, (v_max - v) / (MAX_CL_DVFS_VOLTAGES - j));
824                         if (v >= millivolts[i])
825                                 break;
826
827                         m = find_vdd_map_entry(cld, v, false);
828                         BUG_ON(!m);
829                         if (m != cld->out_map[j - 1])
830                                 cld->out_map[j++] = m;
831                 }
832
833                 v = millivolts[i];
834                 m = find_vdd_map_entry(cld, v, true);
835                 BUG_ON(!m);
836                 if (m != cld->out_map[j - 1])
837                         cld->out_map[j++] = m;
838                 cld->clk_dvfs_map[i] = j - 1;
839         }
840         BUG_ON(j > MAX_CL_DVFS_VOLTAGES);
841         cld->num_voltages = j;
842 }
843
844 static void cl_dvfs_init_tuning_thresholds(struct tegra_cl_dvfs *cld)
845 {
846         int mv;
847
848         /*
849          * Convert high tuning voltage threshold into output LUT index, and
850          * add necessary margin.  If voltage threshold is outside operating
851          * range set it at maximum output level to effectively disable tuning
852          * parameters adjustment.
853          */
854         cld->tune_high_out_min = cld->num_voltages - 1;
855         cld->tune_high_out_start = cld->num_voltages - 1;
856         mv = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
857         if (mv >= cld->safe_dvfs->dfll_data.min_millivolts) {
858                 u8 out_min = find_mv_out_cap(cld, mv);
859                 u8 out_start = find_mv_out_cap(
860                         cld, mv + CL_DVFS_TUNE_HIGH_MARGIN_MV);
861                 out_start = max(out_start, (u8)(out_min + 1));
862                 if ((out_start + 1) < cld->num_voltages) {
863                         cld->tune_high_out_min = out_min;
864                         cld->tune_high_out_start = out_start;
865                         if (cld->minimax_output <= out_start)
866                                 cld->minimax_output = out_start + 1;
867                 }
868         }
869 }
870
871 static void cl_dvfs_init_hot_output_cap(struct tegra_cl_dvfs *cld)
872 {
873         int i;
874         if (!cld->safe_dvfs->dvfs_rail->therm_mv_caps ||
875             !cld->safe_dvfs->dvfs_rail->therm_mv_caps_num)
876                 return;
877
878         if (!cld->safe_dvfs->dvfs_rail->vmax_cdev)
879                 WARN(1, "%s: missing dfll cap cooling device\n",
880                      cld->safe_dvfs->dvfs_rail->reg_id);
881         /*
882          * Convert monotonically decreasing thermal caps at high temperature
883          * into output LUT indexes; make sure there is a room for regulation
884          * below minimum thermal cap.
885          */
886         cld->therm_caps_num = cld->safe_dvfs->dvfs_rail->therm_mv_caps_num;
887         for (i = 0; i < cld->therm_caps_num; i++) {
888                 cld->thermal_out_caps[i] = find_mv_out_floor(
889                         cld, cld->safe_dvfs->dvfs_rail->therm_mv_caps[i]);
890         }
891         BUG_ON(cld->thermal_out_caps[cld->therm_caps_num - 1] <
892                cld->minimax_output);
893 }
894
895 static void cl_dvfs_init_cold_output_floor(struct tegra_cl_dvfs *cld)
896 {
897         int i;
898         if (!cld->safe_dvfs->dvfs_rail->therm_mv_floors ||
899             !cld->safe_dvfs->dvfs_rail->therm_mv_floors_num)
900                 return;
901
902         if (!cld->safe_dvfs->dvfs_rail->vmin_cdev)
903                 WARN(1, "%s: missing dfll floor cooling device\n",
904                      cld->safe_dvfs->dvfs_rail->reg_id);
905         /*
906          * Convert monotonically decreasing thermal floors at low temperature
907          * into output LUT indexes; make sure there is a room for regulation
908          * above maximum thermal floor.
909          */
910         cld->therm_floors_num = cld->safe_dvfs->dvfs_rail->therm_mv_floors_num;
911         for (i = 0; i < cld->therm_floors_num; i++) {
912                 cld->thermal_out_floors[i] = find_mv_out_cap(
913                         cld, cld->safe_dvfs->dvfs_rail->therm_mv_floors[i]);
914         }
915         BUG_ON(cld->thermal_out_floors[0] + 2 >= cld->num_voltages);
916         if (cld->minimax_output <= cld->thermal_out_floors[0])
917                 cld->minimax_output = cld->thermal_out_floors[0] + 1;
918 }
919
920 static void cl_dvfs_init_output_thresholds(struct tegra_cl_dvfs *cld)
921 {
922         cld->minimax_output = 0;
923         cl_dvfs_init_tuning_thresholds(cld);
924         cl_dvfs_init_cold_output_floor(cld);
925
926         /* make sure safe output is safe at any temperature */
927         cld->safe_output = cld->thermal_out_floors[0] ? : 1;
928         if (cld->minimax_output <= cld->safe_output)
929                 cld->minimax_output = cld->safe_output + 1;
930
931         /* init caps after minimax output is determined */
932         cl_dvfs_init_hot_output_cap(cld);
933 }
934
935 static void cl_dvfs_init_pwm_if(struct tegra_cl_dvfs *cld)
936 {
937         /* FIXME: not supported */
938 }
939
940 static void cl_dvfs_init_i2c_if(struct tegra_cl_dvfs *cld)
941 {
942         u32 val, div;
943         struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
944         bool hs_mode = p_data->u.pmu_i2c.hs_rate;
945
946         /* PMU slave address, vdd register offset, and transfer mode */
947         val = p_data->u.pmu_i2c.slave_addr << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT;
948         if (p_data->u.pmu_i2c.addr_10)
949                 val |= CL_DVFS_I2C_CFG_SLAVE_ADDR_10;
950         if (hs_mode) {
951                 val |= p_data->u.pmu_i2c.hs_master_code <<
952                         CL_DVFS_I2C_CFG_HS_CODE_SHIFT;
953                 val |= CL_DVFS_I2C_CFG_PACKET_ENABLE;
954         }
955         val |= CL_DVFS_I2C_CFG_SIZE_MASK;
956         val |= CL_DVFS_I2C_CFG_ARB_ENABLE;
957         cl_dvfs_writel(cld, val, CL_DVFS_I2C_CFG);
958         cl_dvfs_writel(cld, p_data->u.pmu_i2c.reg, CL_DVFS_I2C_VDD_REG_ADDR);
959
960
961         val = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.fs_rate, 8);
962         BUG_ON(!val || (val > CL_DVFS_I2C_CLK_DIVISOR_MASK));
963         val = (val - 1) << CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT;
964         if (hs_mode) {
965                 div = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.hs_rate, 12);
966                 BUG_ON(!div || (div > CL_DVFS_I2C_CLK_DIVISOR_MASK));
967         } else {
968                 div = 2;        /* default hs divisor just in case */
969         }
970         val |= (div - 1) << CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT;
971         cl_dvfs_writel(cld, val, CL_DVFS_I2C_CLK_DIVISOR);
972         cl_dvfs_wmb(cld);
973 }
974
975 static void cl_dvfs_init_out_if(struct tegra_cl_dvfs *cld)
976 {
977         u32 val, out_min, out_max;
978
979         /*
980          * Disable output, and set safe voltage and output limits;
981          * disable and clear limit interrupts.
982          */
983         cld->tune_state = TEGRA_CL_DVFS_TUNE_LOW;
984         cld->therm_cap_idx = cld->therm_caps_num;
985         cld->therm_floor_idx = 0;
986         cl_dvfs_set_dvco_rate_min(cld);
987         cl_dvfs_set_force_out_min(cld);
988
989         if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
990                 /*
991                  * If h/w supports dynamic chanage of output register, limit
992                  * LUT * index range using cl_dvfs h/w controls, and load full
993                  * range LUT table once.
994                  */
995                 out_min = get_output_min(cld);
996                 out_max = get_output_cap(cld, NULL);
997                 cld->lut_min = 0;
998                 cld->lut_max = cld->num_voltages - 1;
999         } else {
1000                 /*
1001                  * Allow the entire range of LUT indexes, but limit output
1002                  * voltage in LUT mapping (this "indirect" application of limits
1003                  * is used, because h/w does not support dynamic change of index
1004                  * limits, but dynamic reload of LUT is fine).
1005                  */
1006                 out_min = 0;
1007                 out_max = cld->num_voltages - 1;
1008                 cld->lut_min = get_output_min(cld);
1009                 cld->lut_max = get_output_cap(cld, NULL);
1010         }
1011
1012         val = (cld->safe_output << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT) |
1013                 (out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT) |
1014                 (out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT);
1015         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
1016         cl_dvfs_wmb(cld);
1017
1018         cl_dvfs_writel(cld, 0, CL_DVFS_OUTPUT_FORCE);
1019         cl_dvfs_writel(cld, 0, CL_DVFS_INTR_EN);
1020         cl_dvfs_writel(cld, CL_DVFS_INTR_MAX_MASK | CL_DVFS_INTR_MIN_MASK,
1021                        CL_DVFS_INTR_STS);
1022
1023         /* fill in LUT table */
1024         cl_dvfs_load_lut(cld);
1025         if (cld->p_data->flags & TEGRA_CL_DVFS_DYN_OUTPUT_CFG) {
1026                 /* dynamic update of output register allowed - no need to reload
1027                    lut - use lut limits as output register setting shadow */
1028                 cld->lut_min = out_min;
1029                 cld->lut_max = out_max;
1030         }
1031
1032         /* configure transport */
1033         if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
1034                 cl_dvfs_init_i2c_if(cld);
1035         else
1036                 cl_dvfs_init_pwm_if(cld);
1037 }
1038
1039 static void cl_dvfs_init_cntrl_logic(struct tegra_cl_dvfs *cld)
1040 {
1041         u32 val;
1042         struct tegra_cl_dvfs_cfg_param *param = cld->p_data->cfg_param;
1043
1044         /* configure mode, control loop parameters, DFLL tuning */
1045         set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1046
1047         val = GET_DIV(cld->ref_rate, param->sample_rate, 32);
1048         BUG_ON(val > CL_DVFS_CONFIG_DIV_MASK);
1049         cl_dvfs_writel(cld, val, CL_DVFS_CONFIG);
1050
1051         val = (param->force_mode << CL_DVFS_PARAMS_FORCE_MODE_SHIFT) |
1052                 (param->cf << CL_DVFS_PARAMS_CF_PARAM_SHIFT) |
1053                 (param->ci << CL_DVFS_PARAMS_CI_PARAM_SHIFT) |
1054                 ((u8)param->cg << CL_DVFS_PARAMS_CG_PARAM_SHIFT) |
1055                 (param->cg_scale ? CL_DVFS_PARAMS_CG_SCALE : 0);
1056         cl_dvfs_writel(cld, val, CL_DVFS_PARAMS);
1057
1058         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
1059         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune1, CL_DVFS_TUNE1);
1060         cl_dvfs_wmb(cld);
1061         if (cld->safe_dvfs->dfll_data.tune_trimmers)
1062                 cld->safe_dvfs->dfll_data.tune_trimmers(false);
1063
1064         /* configure droop (skipper 1) and scale (skipper 2) */
1065         val = GET_DROOP_FREQ(cld->safe_dvfs->dfll_data.droop_rate_min,
1066                         cld->ref_rate) << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT;
1067         BUG_ON(val > CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK);
1068         val |= (param->droop_cut_value << CL_DVFS_DROOP_CTRL_CUT_SHIFT);
1069         val |= (param->droop_restore_ramp << CL_DVFS_DROOP_CTRL_RAMP_SHIFT);
1070         cl_dvfs_writel(cld, val, CL_DVFS_DROOP_CTRL);
1071
1072         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ) &
1073                 CL_DVFS_FREQ_REQ_SCALE_MASK;
1074         cld->last_req.scale = val >> CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1075         cld->last_req.cap = 0;
1076         cld->last_req.freq = 0;
1077         cld->last_req.output = 0;
1078         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
1079         cl_dvfs_writel(cld, param->scale_out_ramp, CL_DVFS_SCALE_RAMP);
1080
1081         /* select frequency for monitoring */
1082         cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ, CL_DVFS_MONITOR_CTRL);
1083         cl_dvfs_wmb(cld);
1084 }
1085
1086 static int cl_dvfs_enable_clocks(struct tegra_cl_dvfs *cld)
1087 {
1088         if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
1089                 clk_enable(cld->i2c_clk);
1090
1091         clk_enable(cld->ref_clk);
1092         clk_enable(cld->soc_clk);
1093         return 0;
1094 }
1095
1096 static void cl_dvfs_disable_clocks(struct tegra_cl_dvfs *cld)
1097 {
1098         if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
1099                 clk_disable(cld->i2c_clk);
1100
1101         clk_disable(cld->ref_clk);
1102         clk_disable(cld->soc_clk);
1103 }
1104
1105 static int cl_dvfs_init(struct tegra_cl_dvfs *cld)
1106 {
1107         int ret;
1108
1109         /* Enable output inerface clock */
1110         if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C) {
1111                 ret = clk_enable(cld->i2c_clk);
1112                 if (ret) {
1113                         pr_err("%s: Failed to enable %s\n",
1114                                __func__, cld->i2c_clk->name);
1115                         return ret;
1116                 }
1117                 cld->i2c_rate = clk_get_rate(cld->i2c_clk);
1118         } else {
1119                 pr_err("%s: PMU interface is not I2C\n", __func__);
1120                 return -EINVAL;
1121         }
1122
1123         /* Enable module clocks, release control logic reset */
1124         ret = clk_enable(cld->ref_clk);
1125         if (ret) {
1126                 pr_err("%s: Failed to enable %s\n",
1127                        __func__, cld->ref_clk->name);
1128                 return ret;
1129         }
1130         ret = clk_enable(cld->soc_clk);
1131         if (ret) {
1132                 pr_err("%s: Failed to enable %s\n",
1133                        __func__, cld->ref_clk->name);
1134                 return ret;
1135         }
1136         cld->ref_rate = clk_get_rate(cld->ref_clk);
1137         BUG_ON(!cld->ref_rate);
1138
1139         /* init tuning timer */
1140         init_timer(&cld->tune_timer);
1141         cld->tune_timer.function = tune_timer_cb;
1142         cld->tune_timer.data = (unsigned long)cld;
1143         cld->tune_delay = usecs_to_jiffies(CL_DVFS_TUNE_HIGH_DELAY);
1144
1145         /* init calibration timer */
1146         init_timer_deferrable(&cld->calibration_timer);
1147         cld->calibration_timer.function = calibration_timer_cb;
1148         cld->calibration_timer.data = (unsigned long)cld;
1149         cld->calibration_delay = usecs_to_jiffies(CL_DVFS_CALIBR_TIME);
1150
1151         /* Get ready ouput voltage mapping*/
1152         cl_dvfs_init_maps(cld);
1153
1154         /* Setup output range thresholds */
1155         cl_dvfs_init_output_thresholds(cld);
1156
1157         /* Setup PMU interface */
1158         cl_dvfs_init_out_if(cld);
1159
1160         /* Configure control registers in disabled mode and disable clocks */
1161         cl_dvfs_init_cntrl_logic(cld);
1162         cl_dvfs_disable_clocks(cld);
1163
1164         return 0;
1165 }
1166
1167 /*
1168  * Re-initialize and enable target device clock in open loop mode. Called
1169  * directly from SoC clock resume syscore operation. Closed loop will be
1170  * re-entered in platform syscore ops as well.
1171  */
1172 void tegra_cl_dvfs_resume(struct tegra_cl_dvfs *cld)
1173 {
1174         enum tegra_cl_dvfs_ctrl_mode mode = cld->mode;
1175         struct dfll_rate_req req = cld->last_req;
1176
1177         cl_dvfs_enable_clocks(cld);
1178
1179         /* Setup PMU interface, and configure controls in disabled mode */
1180         cl_dvfs_init_out_if(cld);
1181         cl_dvfs_init_cntrl_logic(cld);
1182
1183         cl_dvfs_disable_clocks(cld);
1184
1185         /* Restore last request and mode */
1186         cld->last_req = req;
1187         if (mode != TEGRA_CL_DVFS_DISABLED) {
1188                 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1189                 WARN(mode > TEGRA_CL_DVFS_OPEN_LOOP,
1190                      "DFLL was left locked in suspend\n");
1191         }
1192 }
1193
1194 #ifdef CONFIG_THERMAL
1195 /* cl_dvfs cap cooling device */
1196 static int tegra_cl_dvfs_get_vmax_cdev_max_state(
1197         struct thermal_cooling_device *cdev, unsigned long *max_state)
1198 {
1199         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1200         *max_state = cld->therm_caps_num;
1201         return 0;
1202 }
1203
1204 static int tegra_cl_dvfs_get_vmax_cdev_cur_state(
1205         struct thermal_cooling_device *cdev, unsigned long *cur_state)
1206 {
1207         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1208         *cur_state = cld->therm_cap_idx;
1209         return 0;
1210 }
1211
1212 static int tegra_cl_dvfs_set_vmax_cdev_state(
1213         struct thermal_cooling_device *cdev, unsigned long cur_state)
1214 {
1215         unsigned long flags;
1216         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1217
1218         clk_lock_save(cld->dfll_clk, &flags);
1219
1220         if (cld->therm_cap_idx != cur_state) {
1221                 cld->therm_cap_idx = cur_state;
1222                 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1223                         tegra_cl_dvfs_request_rate(cld,
1224                                 tegra_cl_dvfs_request_get(cld));
1225                 }
1226         }
1227         clk_unlock_restore(cld->dfll_clk, &flags);
1228         return 0;
1229 }
1230
1231 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmax_cool_ops = {
1232         .get_max_state = tegra_cl_dvfs_get_vmax_cdev_max_state,
1233         .get_cur_state = tegra_cl_dvfs_get_vmax_cdev_cur_state,
1234         .set_cur_state = tegra_cl_dvfs_set_vmax_cdev_state,
1235 };
1236
1237 /* cl_dvfs vmin cooling device */
1238 static int tegra_cl_dvfs_get_vmin_cdev_max_state(
1239         struct thermal_cooling_device *cdev, unsigned long *max_state)
1240 {
1241         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1242         *max_state = cld->therm_floors_num;
1243         return 0;
1244 }
1245
1246 static int tegra_cl_dvfs_get_vmin_cdev_cur_state(
1247         struct thermal_cooling_device *cdev, unsigned long *cur_state)
1248 {
1249         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1250         *cur_state = cld->therm_floor_idx;
1251         return 0;
1252 }
1253
1254 static int tegra_cl_dvfs_set_vmin_cdev_state(
1255         struct thermal_cooling_device *cdev, unsigned long cur_state)
1256 {
1257         unsigned long flags;
1258         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1259
1260         clk_lock_save(cld->dfll_clk, &flags);
1261
1262         if (cld->therm_floor_idx != cur_state) {
1263                 cld->therm_floor_idx = cur_state;
1264                 cl_dvfs_set_dvco_rate_min(cld);
1265                 cl_dvfs_set_force_out_min(cld);
1266                 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1267                         tegra_cl_dvfs_request_rate(cld,
1268                                 tegra_cl_dvfs_request_get(cld));
1269                 }
1270         }
1271         clk_unlock_restore(cld->dfll_clk, &flags);
1272         return 0;
1273 }
1274
1275 static struct thermal_cooling_device_ops tegra_cl_dvfs_vmin_cool_ops = {
1276         .get_max_state = tegra_cl_dvfs_get_vmin_cdev_max_state,
1277         .get_cur_state = tegra_cl_dvfs_get_vmin_cdev_cur_state,
1278         .set_cur_state = tegra_cl_dvfs_set_vmin_cdev_state,
1279 };
1280
1281 static void tegra_cl_dvfs_init_cdev(struct work_struct *work)
1282 {
1283         struct tegra_cl_dvfs *cld = container_of(
1284                 work, struct tegra_cl_dvfs, init_cdev_work);
1285
1286         /* just report error - initialized at WC temperature, anyway */
1287         if (cld->safe_dvfs->dvfs_rail->vmin_cdev) {
1288                 char *type = cld->safe_dvfs->dvfs_rail->vmin_cdev->cdev_type;
1289                 cld->vmin_cdev = thermal_cooling_device_register(
1290                         type, (void *)cld, &tegra_cl_dvfs_vmin_cool_ops);
1291                 if (IS_ERR_OR_NULL(cld->vmin_cdev)) {
1292                         cld->vmin_cdev = NULL;
1293                         pr_err("tegra cooling device %s failed to register\n",
1294                                type);
1295                         return;
1296                 }
1297                 pr_info("%s cooling device is registered\n", type);
1298         }
1299
1300         if (cld->safe_dvfs->dvfs_rail->vmax_cdev) {
1301                 char *type = cld->safe_dvfs->dvfs_rail->vmax_cdev->cdev_type;
1302                 cld->vmax_cdev = thermal_cooling_device_register(
1303                         type, (void *)cld, &tegra_cl_dvfs_vmax_cool_ops);
1304                 if (IS_ERR_OR_NULL(cld->vmax_cdev)) {
1305                         cld->vmax_cdev = NULL;
1306                         pr_err("tegra cooling device %s failed to register\n",
1307                                type);
1308                         return;
1309                 }
1310                 pr_info("%s cooling device is registered\n", type);
1311         }
1312 }
1313 #endif
1314
1315 #ifdef CONFIG_PM_SLEEP
1316 /*
1317  * cl_dvfs controls clock/voltage to other devices, including CPU. Therefore,
1318  * cl_dvfs driver pm suspend callback does not stop cl-dvfs operations. It is
1319  * only used to enforce cold/hot volatge limit, since temperature may change in
1320  * suspend without waking up. The correct temperature zone after supend will
1321  * be updated via cl_dvfs cooling device interface during resume of temperature
1322  * sensor.
1323  */
1324 static int tegra_cl_dvfs_suspend_cl(struct device *dev)
1325 {
1326         unsigned long flags;
1327         struct tegra_cl_dvfs *cld = dev_get_drvdata(dev);
1328
1329         clk_lock_save(cld->dfll_clk, &flags);
1330         if (cld->vmax_cdev)
1331                 cld->vmax_cdev->updated = false;
1332         cld->therm_cap_idx = cld->therm_caps_num;
1333         if (cld->vmin_cdev)
1334                 cld->vmin_cdev->updated = false;
1335         cld->therm_floor_idx = 0;
1336         cl_dvfs_set_dvco_rate_min(cld);
1337         cl_dvfs_set_force_out_min(cld);
1338         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1339                 set_cl_config(cld, &cld->last_req);
1340                 set_request(cld, &cld->last_req);
1341         }
1342         clk_unlock_restore(cld->dfll_clk, &flags);
1343
1344         return 0;
1345 }
1346
1347 static const struct dev_pm_ops tegra_cl_dvfs_pm_ops = {
1348         .suspend = tegra_cl_dvfs_suspend_cl,
1349 };
1350 #endif
1351
1352 static int __init tegra_cl_dvfs_probe(struct platform_device *pdev)
1353 {
1354         int ret;
1355         struct tegra_cl_dvfs_platform_data *p_data;
1356         struct resource *res;
1357         struct tegra_cl_dvfs *cld;
1358         struct clk *ref_clk, *soc_clk, *i2c_clk, *safe_dvfs_clk, *dfll_clk;
1359
1360         /* Get resources */
1361         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1362         if (!res) {
1363                 dev_err(&pdev->dev, "missing register base\n");
1364                 return -ENOMEM;
1365         }
1366
1367         p_data = pdev->dev.platform_data;
1368         if (!p_data || !p_data->cfg_param || !p_data->vdd_map) {
1369                 dev_err(&pdev->dev, "missing platform data\n");
1370                 return -ENODATA;
1371         }
1372
1373         ref_clk = clk_get(&pdev->dev, "ref");
1374         soc_clk = clk_get(&pdev->dev, "soc");
1375         i2c_clk = clk_get(&pdev->dev, "i2c");
1376         safe_dvfs_clk = clk_get(&pdev->dev, "safe_dvfs");
1377         dfll_clk = clk_get(&pdev->dev, p_data->dfll_clk_name);
1378         if (IS_ERR(ref_clk) || IS_ERR(soc_clk) || IS_ERR(i2c_clk)) {
1379                 dev_err(&pdev->dev, "missing control clock\n");
1380                 return -ENODEV;
1381         }
1382         if (IS_ERR(safe_dvfs_clk)) {
1383                 dev_err(&pdev->dev, "missing safe dvfs source clock\n");
1384                 return PTR_ERR(safe_dvfs_clk);
1385         }
1386         if (IS_ERR(dfll_clk)) {
1387                 dev_err(&pdev->dev, "missing target dfll clock\n");
1388                 return PTR_ERR(dfll_clk);
1389         }
1390         if (!safe_dvfs_clk->dvfs || !safe_dvfs_clk->dvfs->dvfs_rail) {
1391                 dev_err(&pdev->dev, "invalid safe dvfs source\n");
1392                 return -EINVAL;
1393         }
1394
1395         /* Allocate cl_dvfs object and populate resource accessors */
1396         cld = kzalloc(sizeof(*cld), GFP_KERNEL);
1397         if (!cld) {
1398                 dev_err(&pdev->dev, "failed to allocate cl_dvfs object\n");
1399                 return -ENOMEM;
1400         }
1401
1402         cld->cl_base = IO_ADDRESS(res->start);
1403         cld->p_data = p_data;
1404         cld->ref_clk = ref_clk;
1405         cld->soc_clk = soc_clk;
1406         cld->i2c_clk = i2c_clk;
1407         cld->dfll_clk = dfll_clk;
1408         cld->safe_dvfs = safe_dvfs_clk->dvfs;
1409 #ifdef CONFIG_THERMAL
1410         INIT_WORK(&cld->init_cdev_work, tegra_cl_dvfs_init_cdev);
1411 #endif
1412         /* Initialize cl_dvfs */
1413         ret = cl_dvfs_init(cld);
1414         if (ret) {
1415                 kfree(cld);
1416                 return ret;
1417         }
1418
1419         platform_set_drvdata(pdev, cld);
1420
1421         /*
1422          * Schedule cooling device registration as a separate work to address
1423          * the following race: when cl_dvfs is probed the DFLL child clock
1424          * (e.g., CPU) cannot be changed; on the other hand cooling device
1425          * registration will update the entire thermal zone, and may trigger
1426          * rate change of the target clock
1427          */
1428         if (cld->safe_dvfs->dvfs_rail->vmin_cdev ||
1429             cld->safe_dvfs->dvfs_rail->vmax_cdev)
1430                 schedule_work(&cld->init_cdev_work);
1431         return 0;
1432 }
1433
1434 static struct platform_driver tegra_cl_dvfs_driver = {
1435         .driver         = {
1436                 .name   = "tegra_cl_dvfs",
1437                 .owner  = THIS_MODULE,
1438 #ifdef CONFIG_PM_SLEEP
1439                 .pm = &tegra_cl_dvfs_pm_ops,
1440 #endif
1441         },
1442 };
1443
1444 int __init tegra_init_cl_dvfs(void)
1445 {
1446         return platform_driver_probe(&tegra_cl_dvfs_driver,
1447                                      tegra_cl_dvfs_probe);
1448 }
1449
1450 /*
1451  * CL_DVFS states:
1452  *
1453  * - DISABLED: control logic mode - DISABLED, output interface disabled,
1454  *   dfll in reset
1455  * - OPEN_LOOP: control logic mode - OPEN_LOOP, output interface disabled,
1456  *   dfll is running "unlocked"
1457  * - CLOSED_LOOP: control logic mode - CLOSED_LOOP, output interface enabled,
1458  *   dfll is running "locked"
1459  */
1460
1461 /* Switch from any other state to DISABLED state */
1462 void tegra_cl_dvfs_disable(struct tegra_cl_dvfs *cld)
1463 {
1464         switch (cld->mode) {
1465         case TEGRA_CL_DVFS_CLOSED_LOOP:
1466                 WARN(1, "DFLL is disabled directly from closed loop mode\n");
1467                 set_ol_config(cld);
1468                 output_disable_ol_prepare(cld);
1469                 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1470                 output_disable_post_ol(cld);
1471                 invalidate_request(cld);
1472                 cl_dvfs_disable_clocks(cld);
1473                 return;
1474
1475         case TEGRA_CL_DVFS_OPEN_LOOP:
1476                 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1477                 invalidate_request(cld);
1478                 cl_dvfs_disable_clocks(cld);
1479                 return;
1480
1481         default:
1482                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1483                 return;
1484         }
1485 }
1486
1487 /* Switch from DISABLE state to OPEN_LOOP state */
1488 int tegra_cl_dvfs_enable(struct tegra_cl_dvfs *cld)
1489 {
1490         if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
1491                 pr_err("%s: Cannot enable DFLL in %s mode\n",
1492                        __func__, mode_name[cld->mode]);
1493                 return -EPERM;
1494         }
1495
1496         if (cld->mode != TEGRA_CL_DVFS_DISABLED)
1497                 return 0;
1498
1499         cl_dvfs_enable_clocks(cld);
1500         set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1501         return 0;
1502 }
1503
1504 /* Switch from OPEN_LOOP state to CLOSED_LOOP state */
1505 int tegra_cl_dvfs_lock(struct tegra_cl_dvfs *cld)
1506 {
1507         struct dfll_rate_req *req = &cld->last_req;
1508
1509         switch (cld->mode) {
1510         case TEGRA_CL_DVFS_CLOSED_LOOP:
1511                 return 0;
1512
1513         case TEGRA_CL_DVFS_OPEN_LOOP:
1514                 if (req->freq == 0) {
1515                         pr_err("%s: Cannot lock DFLL at rate 0\n", __func__);
1516                         return -EINVAL;
1517                 }
1518
1519                 /*
1520                  * Update control logic setting with last rate request;
1521                  * sync output limits with current tuning and thermal state,
1522                  * enable output and switch to closed loop mode.
1523                  */
1524                 set_cl_config(cld, req);
1525                 output_enable(cld);
1526                 set_mode(cld, TEGRA_CL_DVFS_CLOSED_LOOP);
1527                 set_request(cld, req);
1528                 calibration_timer_update(cld);
1529                 return 0;
1530
1531         default:
1532                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1533                 pr_err("%s: Cannot lock DFLL in %s mode\n",
1534                        __func__, mode_name[cld->mode]);
1535                 return -EPERM;
1536         }
1537 }
1538
1539 /* Switch from CLOSED_LOOP state to OPEN_LOOP state */
1540 int tegra_cl_dvfs_unlock(struct tegra_cl_dvfs *cld)
1541 {
1542         int ret;
1543
1544         switch (cld->mode) {
1545         case TEGRA_CL_DVFS_CLOSED_LOOP:
1546                 set_ol_config(cld);
1547                 ret = output_disable_ol_prepare(cld);
1548                 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1549                 if (!ret)
1550                         ret = output_disable_post_ol(cld);
1551                 return ret;
1552
1553         case TEGRA_CL_DVFS_OPEN_LOOP:
1554                 return 0;
1555
1556         default:
1557                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1558                 pr_err("%s: Cannot unlock DFLL in %s mode\n",
1559                        __func__, mode_name[cld->mode]);
1560                 return -EPERM;
1561         }
1562 }
1563
1564 /*
1565  * Convert requested rate into the control logic settings. In CLOSED_LOOP mode,
1566  * update new settings immediately to adjust DFLL output rate accordingly.
1567  * Otherwise, just save them until next switch to closed loop.
1568  */
1569 int tegra_cl_dvfs_request_rate(struct tegra_cl_dvfs *cld, unsigned long rate)
1570 {
1571         u32 val;
1572         struct dfll_rate_req req;
1573         req.rate = rate;
1574
1575         if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
1576                 pr_err("%s: Cannot set DFLL rate in %s mode\n",
1577                        __func__, mode_name[cld->mode]);
1578                 return -EPERM;
1579         }
1580
1581         /* Calibrate dfll minimum rate */
1582         cl_dvfs_calibrate(cld);
1583
1584         /* Determine DFLL output scale */
1585         req.scale = SCALE_MAX - 1;
1586         if (rate < cld->dvco_rate_min) {
1587                 int scale = DIV_ROUND_CLOSEST((rate / 1000 * SCALE_MAX),
1588                         (cld->dvco_rate_min / 1000));
1589                 if (!scale) {
1590                         pr_err("%s: Rate %lu is below scalable range\n",
1591                                __func__, rate);
1592                         return -EINVAL;
1593                 }
1594                 req.scale = scale - 1;
1595                 rate = cld->dvco_rate_min;
1596         }
1597
1598         /* Convert requested rate into frequency request and scale settings */
1599         val = GET_REQUEST_FREQ(rate, cld->ref_rate);
1600         if (val > FREQ_MAX) {
1601                 pr_err("%s: Rate %lu is above dfll range\n", __func__, rate);
1602                 return -EINVAL;
1603         }
1604         req.freq = val;
1605         rate = GET_REQUEST_RATE(val, cld->ref_rate);
1606
1607         /* Find safe voltage for requested rate */
1608         if (find_safe_output(cld, rate, &req.output)) {
1609                 pr_err("%s: Failed to find safe output for rate %lu\n",
1610                        __func__, rate);
1611                 return -EINVAL;
1612         }
1613         req.cap = req.output;
1614
1615         /*
1616          * Save validated request, and in CLOSED_LOOP mode actually update
1617          * control logic settings; use request output to set maximum voltage
1618          * limit, but keep one LUT step room above safe voltage
1619          */
1620         cld->last_req = req;
1621
1622         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1623                 set_cl_config(cld, &cld->last_req);
1624                 set_request(cld, &cld->last_req);
1625         }
1626         return 0;
1627 }
1628
1629 unsigned long tegra_cl_dvfs_request_get(struct tegra_cl_dvfs *cld)
1630 {
1631         struct dfll_rate_req *req = &cld->last_req;
1632
1633         /*
1634          * If running below dvco minimum rate with skipper resolution:
1635          * dvco min rate / 256 - return last requested rate rounded to 1kHz.
1636          * If running above dvco minimum, with closed loop resolution:
1637          * ref rate / 2 - return cl_dvfs target rate.
1638          */
1639         if ((req->scale + 1) < SCALE_MAX)
1640                 return req->rate / 1000 * 1000;
1641
1642         return GET_REQUEST_RATE(req->freq, cld->ref_rate);
1643 }
1644
1645 #ifdef CONFIG_DEBUG_FS
1646
1647 static int lock_get(void *data, u64 *val)
1648 {
1649         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1650         *val = cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP;
1651         return 0;
1652 }
1653 static int lock_set(void *data, u64 val)
1654 {
1655         struct clk *c = (struct clk *)data;
1656         return tegra_clk_cfg_ex(c, TEGRA_CLK_DFLL_LOCK, val);
1657 }
1658 DEFINE_SIMPLE_ATTRIBUTE(lock_fops, lock_get, lock_set, "%llu\n");
1659
1660 static int monitor_get(void *data, u64 *val)
1661 {
1662         u32 v, s;
1663         unsigned long flags;
1664         struct clk *c = (struct clk *)data;
1665         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1666
1667         clk_enable(cld->soc_clk);
1668
1669         clk_lock_save(c, &flags);
1670         v = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
1671                 CL_DVFS_MONITOR_DATA_MASK;
1672
1673         if (cl_dvfs_readl(cld, CL_DVFS_MONITOR_CTRL) ==
1674             CL_DVFS_MONITOR_CTRL_FREQ) {
1675                 v = GET_MONITORED_RATE(v, cld->ref_rate);
1676                 s = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
1677                 s = (s & CL_DVFS_FREQ_REQ_SCALE_MASK) >>
1678                         CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1679                 *val = (u64)v * (s + 1) / 256;
1680
1681                 clk_unlock_restore(c, &flags);
1682                 clk_disable(cld->soc_clk);
1683                 return 0;
1684         }
1685         *val = v;
1686
1687         clk_unlock_restore(c, &flags);
1688         clk_disable(cld->soc_clk);
1689         return 0;
1690 }
1691 DEFINE_SIMPLE_ATTRIBUTE(monitor_fops, monitor_get, NULL, "%llu\n");
1692
1693 static int vmax_get(void *data, u64 *val)
1694 {
1695         u32 v;
1696         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1697         v = cld->lut_max;
1698         *val = cld->out_map[v]->reg_uV / 1000;
1699         return 0;
1700 }
1701 DEFINE_SIMPLE_ATTRIBUTE(vmax_fops, vmax_get, NULL, "%llu\n");
1702
1703 static int vmin_get(void *data, u64 *val)
1704 {
1705         u32 v;
1706         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1707         v = cld->lut_min;
1708         *val = cld->out_map[v]->reg_uV / 1000;
1709         return 0;
1710 }
1711 DEFINE_SIMPLE_ATTRIBUTE(vmin_fops, vmin_get, NULL, "%llu\n");
1712
1713 static int tune_high_mv_get(void *data, u64 *val)
1714 {
1715         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1716         *val = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
1717         return 0;
1718 }
1719 static int tune_high_mv_set(void *data, u64 val)
1720 {
1721         unsigned long flags;
1722         struct clk *c = (struct clk *)data;
1723         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1724
1725         clk_lock_save(c, &flags);
1726
1727         cld->safe_dvfs->dfll_data.tune_high_min_millivolts = val;
1728         cl_dvfs_init_output_thresholds(cld);
1729         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1730                 set_cl_config(cld, &cld->last_req);
1731                 set_request(cld, &cld->last_req);
1732         }
1733
1734         clk_unlock_restore(c, &flags);
1735         return 0;
1736 }
1737 DEFINE_SIMPLE_ATTRIBUTE(tune_high_mv_fops, tune_high_mv_get, tune_high_mv_set,
1738                         "%llu\n");
1739 static int fmin_get(void *data, u64 *val)
1740 {
1741         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1742         *val = cld->dvco_rate_min;
1743         return 0;
1744 }
1745 DEFINE_SIMPLE_ATTRIBUTE(dvco_rate_min_fops, fmin_get, NULL, "%llu\n");
1746
1747 static int calibr_delay_get(void *data, u64 *val)
1748 {
1749         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1750         *val = jiffies_to_msecs(cld->calibration_delay);
1751         return 0;
1752 }
1753 static int calibr_delay_set(void *data, u64 val)
1754 {
1755         unsigned long flags;
1756         struct clk *c = (struct clk *)data;
1757         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1758
1759         clk_lock_save(c, &flags);
1760         cld->calibration_delay = msecs_to_jiffies(val);
1761         clk_unlock_restore(c, &flags);
1762         return 0;
1763 }
1764 DEFINE_SIMPLE_ATTRIBUTE(calibr_delay_fops, calibr_delay_get, calibr_delay_set,
1765                         "%llu\n");
1766
1767 static int undershoot_get(void *data, u64 *val)
1768 {
1769         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1770         *val = cld->p_data->pmu_undershoot_gb;
1771         return 0;
1772 }
1773 static int undershoot_set(void *data, u64 val)
1774 {
1775         unsigned long flags;
1776         struct clk *c = (struct clk *)data;
1777         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1778
1779         clk_lock_save(c, &flags);
1780         cld->p_data->pmu_undershoot_gb = val;
1781         cl_dvfs_set_force_out_min(cld);
1782         clk_unlock_restore(c, &flags);
1783         return 0;
1784 }
1785 DEFINE_SIMPLE_ATTRIBUTE(undershoot_fops, undershoot_get, undershoot_set,
1786                         "%llu\n");
1787
1788 static int cl_register_show(struct seq_file *s, void *data)
1789 {
1790         u32 offs;
1791         struct clk *c = s->private;
1792         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1793
1794         clk_enable(cld->soc_clk);
1795
1796         seq_printf(s, "CONTROL REGISTERS:\n");
1797         for (offs = 0; offs <= CL_DVFS_MONITOR_DATA; offs += 4)
1798                 seq_printf(s, "[0x%02x] = 0x%08x\n",
1799                            offs, cl_dvfs_readl(cld, offs));
1800
1801         seq_printf(s, "\nI2C and INTR REGISTERS:\n");
1802         for (offs = CL_DVFS_I2C_CFG; offs <= CL_DVFS_I2C_STS; offs += 4)
1803                 seq_printf(s, "[0x%02x] = 0x%08x\n",
1804                            offs, cl_dvfs_readl(cld, offs));
1805
1806         offs = CL_DVFS_INTR_STS;
1807         seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
1808         offs = CL_DVFS_INTR_EN;
1809         seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
1810
1811         seq_printf(s, "\nLUT:\n");
1812         for (offs = CL_DVFS_OUTPUT_LUT;
1813              offs < CL_DVFS_OUTPUT_LUT + 4 * MAX_CL_DVFS_VOLTAGES;
1814              offs += 4)
1815                 seq_printf(s, "[0x%02x] = 0x%08x\n",
1816                            offs, cl_dvfs_readl(cld, offs));
1817
1818         clk_disable(cld->soc_clk);
1819         return 0;
1820 }
1821
1822 static int cl_register_open(struct inode *inode, struct file *file)
1823 {
1824         return single_open(file, cl_register_show, inode->i_private);
1825 }
1826
1827 static ssize_t cl_register_write(struct file *file,
1828         const char __user *userbuf, size_t count, loff_t *ppos)
1829 {
1830         char buf[80];
1831         u32 offs;
1832         u32 val;
1833         struct clk *c = file->f_path.dentry->d_inode->i_private;
1834         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1835
1836         if (sizeof(buf) <= count)
1837                 return -EINVAL;
1838
1839         if (copy_from_user(buf, userbuf, count))
1840                 return -EFAULT;
1841
1842         /* terminate buffer and trim - white spaces may be appended
1843          *  at the end when invoked from shell command line */
1844         buf[count] = '\0';
1845         strim(buf);
1846
1847         if (sscanf(buf, "[0x%x] = 0x%x", &offs, &val) != 2)
1848                 return -1;
1849
1850         clk_enable(cld->soc_clk);
1851         cl_dvfs_writel(cld, val, offs & (~0x3));
1852         clk_disable(cld->soc_clk);
1853         return count;
1854 }
1855
1856 static const struct file_operations cl_register_fops = {
1857         .open           = cl_register_open,
1858         .read           = seq_read,
1859         .write          = cl_register_write,
1860         .llseek         = seq_lseek,
1861         .release        = single_release,
1862 };
1863
1864 int __init tegra_cl_dvfs_debug_init(struct clk *dfll_clk)
1865 {
1866         struct dentry *cl_dvfs_dentry;
1867
1868         if (!dfll_clk || !dfll_clk->dent || (dfll_clk->state == UNINITIALIZED))
1869                 return 0;
1870
1871         if (!debugfs_create_file("lock", S_IRUGO | S_IWUSR,
1872                 dfll_clk->dent, dfll_clk, &lock_fops))
1873                 goto err_out;
1874
1875         cl_dvfs_dentry = debugfs_create_dir("cl_dvfs", dfll_clk->dent);
1876         if (!cl_dvfs_dentry)
1877                 goto err_out;
1878
1879         if (!debugfs_create_file("monitor", S_IRUGO,
1880                 cl_dvfs_dentry, dfll_clk, &monitor_fops))
1881                 goto err_out;
1882
1883         if (!debugfs_create_file("vmax_mv", S_IRUGO,
1884                 cl_dvfs_dentry, dfll_clk, &vmax_fops))
1885                 goto err_out;
1886
1887         if (!debugfs_create_file("vmin_mv", S_IRUGO,
1888                 cl_dvfs_dentry, dfll_clk, &vmin_fops))
1889                 goto err_out;
1890
1891         if (!debugfs_create_file("tune_high_mv", S_IRUGO | S_IWUSR,
1892                 cl_dvfs_dentry, dfll_clk, &tune_high_mv_fops))
1893                 goto err_out;
1894
1895         if (!debugfs_create_file("dvco_min", S_IRUGO,
1896                 cl_dvfs_dentry, dfll_clk, &dvco_rate_min_fops))
1897                 goto err_out;
1898
1899         if (!debugfs_create_file("calibr_delay", S_IRUGO,
1900                 cl_dvfs_dentry, dfll_clk, &calibr_delay_fops))
1901                 goto err_out;
1902
1903         if (!debugfs_create_file("pmu_undershoot_gb", S_IRUGO,
1904                 cl_dvfs_dentry, dfll_clk, &undershoot_fops))
1905                 goto err_out;
1906
1907         if (!debugfs_create_file("registers", S_IRUGO | S_IWUSR,
1908                 cl_dvfs_dentry, dfll_clk, &cl_register_fops))
1909                 goto err_out;
1910
1911         return 0;
1912
1913 err_out:
1914         debugfs_remove_recursive(dfll_clk->dent);
1915         return -ENOMEM;
1916 }
1917 #endif