ARM: tegra: dvfs: Add CL-DVFS calibration delay to debugfs
[linux-3.10.git] / arch / arm / mach-tegra / tegra_cl_dvfs.c
1 /*
2  * arch/arm/mach-tegra/tegra_cl_dvfs.c
3  *
4  * Copyright (c) 2012-2013 NVIDIA CORPORATION. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/kernel.h>
20 #include <linux/spinlock.h>
21 #include <linux/delay.h>
22 #include <linux/err.h>
23 #include <linux/io.h>
24 #include <linux/clk.h>
25 #include <linux/interrupt.h>
26 #include <linux/suspend.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29 #include <linux/uaccess.h>
30 #include <linux/module.h>
31 #include <linux/platform_device.h>
32
33 #include <mach/irqs.h>
34 #include <mach/hardware.h>
35
36 #include "tegra_cl_dvfs.h"
37 #include "clock.h"
38 #include "dvfs.h"
39 #include "iomap.h"
40
41 #define OUT_MASK                        0x3f
42
43 #define CL_DVFS_CTRL                    0x00
44 #define CL_DVFS_CONFIG                  0x04
45 #define CL_DVFS_CONFIG_DIV_MASK         0xff
46
47 #define CL_DVFS_PARAMS                  0x08
48 #define CL_DVFS_PARAMS_CG_SCALE         (0x1 << 24)
49 #define CL_DVFS_PARAMS_FORCE_MODE_SHIFT 22
50 #define CL_DVFS_PARAMS_FORCE_MODE_MASK  (0x3 << CL_DVFS_PARAMS_FORCE_MODE_SHIFT)
51 #define CL_DVFS_PARAMS_CF_PARAM_SHIFT   16
52 #define CL_DVFS_PARAMS_CF_PARAM_MASK    (0x3f << CL_DVFS_PARAMS_CF_PARAM_SHIFT)
53 #define CL_DVFS_PARAMS_CI_PARAM_SHIFT   8
54 #define CL_DVFS_PARAMS_CI_PARAM_MASK    (0x7 << CL_DVFS_PARAMS_CI_PARAM_SHIFT)
55 #define CL_DVFS_PARAMS_CG_PARAM_SHIFT   0
56 #define CL_DVFS_PARAMS_CG_PARAM_MASK    (0xff << CL_DVFS_PARAMS_CG_PARAM_SHIFT)
57
58 #define CL_DVFS_TUNE0                   0x0c
59 #define CL_DVFS_TUNE1                   0x10
60
61 #define CL_DVFS_FREQ_REQ                0x14
62 #define CL_DVFS_FREQ_REQ_FORCE_ENABLE   (0x1 << 28)
63 #define CL_DVFS_FREQ_REQ_FORCE_SHIFT    16
64 #define CL_DVFS_FREQ_REQ_FORCE_MASK     (0xfff << CL_DVFS_FREQ_REQ_FORCE_SHIFT)
65 #define FORCE_MAX                       2047
66 #define FORCE_MIN                       -2048
67 #define CL_DVFS_FREQ_REQ_SCALE_SHIFT    8
68 #define CL_DVFS_FREQ_REQ_SCALE_MASK     (0xff << CL_DVFS_FREQ_REQ_SCALE_SHIFT)
69 #define SCALE_MAX                       256
70 #define CL_DVFS_FREQ_REQ_FREQ_VALID     (0x1 << 7)
71 #define CL_DVFS_FREQ_REQ_FREQ_SHIFT     0
72 #define CL_DVFS_FREQ_REQ_FREQ_MASK      (0x7f << CL_DVFS_FREQ_REQ_FREQ_SHIFT)
73 #define FREQ_MAX                        127
74
75 #define CL_DVFS_SCALE_RAMP              0x18
76
77 #define CL_DVFS_DROOP_CTRL              0x1c
78 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT 16
79 #define CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK  \
80                 (0xff << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT)
81 #define CL_DVFS_DROOP_CTRL_CUT_SHIFT    8
82 #define CL_DVFS_DROOP_CTRL_CUT_MASK     (0xf << CL_DVFS_DROOP_CTRL_CUT_SHIFT)
83 #define CL_DVFS_DROOP_CTRL_RAMP_SHIFT   0
84 #define CL_DVFS_DROOP_CTRL_RAMP_MASK    (0xff << CL_DVFS_DROOP_CTRL_RAMP_SHIFT)
85
86 #define CL_DVFS_OUTPUT_CFG              0x20
87 #define CL_DVFS_OUTPUT_CFG_I2C_ENABLE   (0x1 << 30)
88 #define CL_DVFS_OUTPUT_CFG_SAFE_SHIFT   24
89 #define CL_DVFS_OUTPUT_CFG_SAFE_MASK    \
90                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT)
91 #define CL_DVFS_OUTPUT_CFG_MAX_SHIFT    16
92 #define CL_DVFS_OUTPUT_CFG_MAX_MASK     \
93                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MAX_SHIFT)
94 #define CL_DVFS_OUTPUT_CFG_MIN_SHIFT    8
95 #define CL_DVFS_OUTPUT_CFG_MIN_MASK     \
96                 (OUT_MASK << CL_DVFS_OUTPUT_CFG_MIN_SHIFT)
97
98 #define CL_DVFS_OUTPUT_FORCE            0x24
99 #define CL_DVFS_MONITOR_CTRL            0x28
100 #define CL_DVFS_MONITOR_CTRL_DISABLE    0
101 #define CL_DVFS_MONITOR_CTRL_FREQ       6
102 #define CL_DVFS_MONITOR_DATA            0x2c
103 #define CL_DVFS_MONITOR_DATA_NEW        (0x1 << 16)
104 #define CL_DVFS_MONITOR_DATA_MASK       0xFFFF
105
106 #define CL_DVFS_I2C_CFG                 0x40
107 #define CL_DVFS_I2C_CFG_ARB_ENABLE      (0x1 << 20)
108 #define CL_DVFS_I2C_CFG_HS_CODE_SHIFT   16
109 #define CL_DVFS_I2C_CFG_HS_CODE_MASK    (0x7 << CL_DVFS_I2C_CFG_HS_CODE_SHIFT)
110 #define CL_DVFS_I2C_CFG_PACKET_ENABLE   (0x1 << 15)
111 #define CL_DVFS_I2C_CFG_SIZE_SHIFT      12
112 #define CL_DVFS_I2C_CFG_SIZE_MASK       (0x7 << CL_DVFS_I2C_CFG_SIZE_SHIFT)
113 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_10   (0x1 << 10)
114 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT 0
115 #define CL_DVFS_I2C_CFG_SLAVE_ADDR_MASK \
116                 (0x3ff << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT)
117
118 #define CL_DVFS_I2C_VDD_REG_ADDR        0x44
119 #define CL_DVFS_I2C_STS                 0x48
120 #define CL_DVFS_I2C_STS_I2C_LAST_SHIFT  1
121 #define CL_DVFS_I2C_STS_I2C_REQ_PENDING 0x1
122
123 #define CL_DVFS_INTR_STS                0x5c
124 #define CL_DVFS_INTR_EN                 0x60
125 #define CL_DVFS_INTR_MIN_MASK           0x1
126 #define CL_DVFS_INTR_MAX_MASK           0x2
127
128 #define CL_DVFS_I2C_CLK_DIVISOR         0x16c
129 #define CL_DVFS_I2C_CLK_DIVISOR_MASK    0xffff
130 #define CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT 16
131 #define CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT 0
132
133 #define CL_DVFS_OUTPUT_LUT              0x200
134
135 #define CL_DVFS_CALIBR_TIME             40000
136 #define CL_DVFS_OUTPUT_PENDING_TIMEOUT  1000
137 #define CL_DVFS_OUTPUT_RAMP_DELAY       100
138 #define CL_DVFS_TUNE_HIGH_DELAY         2000
139
140 #define CL_DVFS_TUNE_HIGH_MARGIN_STEPS  2
141
142 #define CL_DVFS_DYNAMIC_OUTPUT_CFG      0
143
144 enum tegra_cl_dvfs_ctrl_mode {
145         TEGRA_CL_DVFS_UNINITIALIZED = 0,
146         TEGRA_CL_DVFS_DISABLED = 1,
147         TEGRA_CL_DVFS_OPEN_LOOP = 2,
148         TEGRA_CL_DVFS_CLOSED_LOOP = 3,
149 };
150
151 enum tegra_cl_dvfs_tune_state {
152         TEGRA_CL_DVFS_TUNE_LOW = 0,
153         TEGRA_CL_DVFS_TUNE_HIGH_REQUEST,
154         TEGRA_CL_DVFS_TUNE_HIGH,
155 };
156
157 struct dfll_rate_req {
158         u8      freq;
159         u8      scale;
160         u8      output;
161         u8      cap;
162 };
163
164 struct tegra_cl_dvfs {
165         void                                    *cl_base;
166         struct tegra_cl_dvfs_platform_data      *p_data;
167
168         struct dvfs                     *safe_dvfs;
169         struct tegra_cooling_device     *cdev;
170         struct work_struct              init_cdev_work;
171
172         struct clk                      *soc_clk;
173         struct clk                      *ref_clk;
174         struct clk                      *i2c_clk;
175         struct clk                      *dfll_clk;
176         unsigned long                   ref_rate;
177         unsigned long                   i2c_rate;
178
179         /* output voltage mapping:
180          * legacy dvfs table index -to- cl_dvfs output LUT index
181          * cl_dvfs output LUT index -to- PMU value/voltage pair ptr
182          */
183         u8                              clk_dvfs_map[MAX_DVFS_FREQS];
184         struct voltage_reg_map          *out_map[MAX_CL_DVFS_VOLTAGES];
185         u8                              num_voltages;
186         u8                              safe_output;
187         u8                              tune_high_out_start;
188         u8                              tune_high_out_min;
189         u8                              thermal_out_floors[MAX_THERMAL_FLOORS];
190         u8                              minimax_output;
191         unsigned long                   dvco_rate_min;
192
193         u8                              lut_min;
194         u8                              lut_max;
195         int                             thermal_idx;
196         struct dfll_rate_req            last_req;
197         enum tegra_cl_dvfs_tune_state   tune_state;
198         enum tegra_cl_dvfs_ctrl_mode    mode;
199
200         struct timer_list               tune_timer;
201         unsigned long                   tune_delay;
202         struct timer_list               calibration_timer;
203         unsigned long                   calibration_delay;
204         ktime_t                         last_calibration;
205         unsigned long                   calibration_range_min;
206         unsigned long                   calibration_range_max;
207 };
208
209 /* Conversion macros (different scales for frequency request, and monitored
210    rate is not a typo) */
211 #define RATE_STEP(cld)                          ((cld)->ref_rate / 2)
212 #define GET_REQUEST_FREQ(rate, ref_rate)        ((rate) / ((ref_rate) / 2))
213 #define GET_REQUEST_RATE(freq, ref_rate)        ((freq) * ((ref_rate) / 2))
214 #define GET_MONITORED_RATE(freq, ref_rate)      ((freq) * ((ref_rate) / 4))
215 #define GET_DROOP_FREQ(rate, ref_rate)          ((rate) / ((ref_rate) / 4))
216 #define ROUND_MIN_RATE(rate, ref_rate)          \
217                 (DIV_ROUND_UP(rate, (ref_rate) / 2) * ((ref_rate) / 2))
218 #define GET_DIV(ref_rate, out_rate, scale)      \
219                 DIV_ROUND_UP((ref_rate), (out_rate) * (scale))
220
221 static const char *mode_name[] = {
222         [TEGRA_CL_DVFS_UNINITIALIZED] = "uninitialized",
223         [TEGRA_CL_DVFS_DISABLED] = "disabled",
224         [TEGRA_CL_DVFS_OPEN_LOOP] = "open_loop",
225         [TEGRA_CL_DVFS_CLOSED_LOOP] = "closed_loop",
226 };
227
228 static inline u32 cl_dvfs_readl(struct tegra_cl_dvfs *cld, u32 offs)
229 {
230         return __raw_readl(cld->cl_base + offs);
231 }
232 static inline void cl_dvfs_writel(struct tegra_cl_dvfs *cld, u32 val, u32 offs)
233 {
234         __raw_writel(val, cld->cl_base + offs);
235 }
236 static inline void cl_dvfs_wmb(struct tegra_cl_dvfs *cld)
237 {
238         wmb();
239         cl_dvfs_readl(cld, CL_DVFS_CTRL);
240 }
241
242 static inline int output_enable(struct tegra_cl_dvfs *cld)
243 {
244         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
245
246         /* FIXME: PWM output control */
247         val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
248         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
249         cl_dvfs_wmb(cld);
250         return  0;
251 }
252
253 static noinline int output_flush_disable(struct tegra_cl_dvfs *cld)
254 {
255         int i;
256         u32 sts;
257         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
258
259         /* Flush transactions in flight, and then disable */
260         for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
261                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
262                 udelay(2);
263                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
264                         sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
265                         if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
266                                 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
267                                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
268                                 wmb();
269                                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
270                                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
271                                         return 0; /* no pending rqst */
272
273                                 /* Re-enable, continue wait */
274                                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
275                                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
276                                 wmb();
277                         }
278                 }
279         }
280
281         /* I2C request is still pending - disable, anyway, but report error */
282         val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
283         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
284         cl_dvfs_wmb(cld);
285         return -ETIMEDOUT;
286 }
287
288 static noinline int output_disable_flush(struct tegra_cl_dvfs *cld)
289 {
290         int i;
291         u32 sts;
292         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
293
294         /* Disable output interface right away */
295         val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
296         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
297         cl_dvfs_wmb(cld);
298
299         /* Flush possible transaction in flight */
300         for (i = 0; i < CL_DVFS_OUTPUT_PENDING_TIMEOUT / 2; i++) {
301                 sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
302                 udelay(2);
303                 if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING)) {
304                         sts = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
305                         if (!(sts & CL_DVFS_I2C_STS_I2C_REQ_PENDING))
306                                 return 0;
307                 }
308         }
309
310         /* I2C request is still pending - report error */
311         return -ETIMEDOUT;
312 }
313
314 static inline int output_disable_ol_prepare(struct tegra_cl_dvfs *cld)
315 {
316         /* FIXME: PWM output control */
317         /*
318          * If cl-dvfs h/w does not require output to be quiet before disable,
319          * s/w can stop I2C communications at any time (including operations
320          * in closed loop mode), and I2C bus integrity is guaranteed even in
321          * case of flush timeout.
322          */
323         if (!cld->p_data->out_quiet_then_disable) {
324                 int ret = output_disable_flush(cld);
325                 if (ret)
326                         pr_debug("cl_dvfs: I2C pending timeout ol_prepare\n");
327                 return ret;
328         }
329         return 0;
330 }
331
332 static inline int output_disable_post_ol(struct tegra_cl_dvfs *cld)
333 {
334         /* FIXME: PWM output control */
335         /*
336          * If cl-dvfs h/w requires output to be quiet before disable, s/w
337          * should stop I2C communications only after the switch to open loop
338          * mode, and I2C bus integrity is not guaranteed in case of flush
339          * timeout
340         */
341         if (cld->p_data->out_quiet_then_disable) {
342                 int ret = output_flush_disable(cld);
343                 if (ret)
344                         pr_err("cl_dvfs: I2C pending timeout post_ol\n");
345                 return ret;
346         }
347         return 0;
348 }
349
350 static inline void set_mode(struct tegra_cl_dvfs *cld,
351                             enum tegra_cl_dvfs_ctrl_mode mode)
352 {
353         cld->mode = mode;
354         cl_dvfs_writel(cld, mode - 1, CL_DVFS_CTRL);
355         cl_dvfs_wmb(cld);
356 }
357
358 static inline u8 get_output_min(struct tegra_cl_dvfs *cld)
359 {
360         u32 tune_min, thermal_min;
361
362         tune_min = cld->tune_state == TEGRA_CL_DVFS_TUNE_LOW ?
363                 0 : cld->tune_high_out_min;
364         thermal_min = 0;
365         if (cld->cdev && (cld->thermal_idx < cld->cdev->trip_temperatures_num))
366                 thermal_min = cld->thermal_out_floors[cld->thermal_idx];
367
368         return max(tune_min, thermal_min);
369 }
370
371 static inline void _load_lut(struct tegra_cl_dvfs *cld)
372 {
373         int i;
374         u32 val;
375
376         val = cld->out_map[cld->lut_min]->reg_value;
377         for (i = 0; i <= cld->lut_min; i++)
378                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
379
380         for (; i < cld->lut_max; i++) {
381                 val = cld->out_map[i]->reg_value;
382                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
383         }
384
385         val = cld->out_map[cld->lut_max]->reg_value;
386         for (; i < cld->num_voltages; i++)
387                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_LUT + i * 4);
388
389         cl_dvfs_wmb(cld);
390 }
391
392 static void cl_dvfs_load_lut(struct tegra_cl_dvfs *cld)
393 {
394         u32 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
395         bool disable_out_for_load = !cld->p_data->out_quiet_then_disable &&
396                 (val & CL_DVFS_OUTPUT_CFG_I2C_ENABLE);
397
398         if (disable_out_for_load) {
399                 val &= ~CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
400                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
401                 cl_dvfs_wmb(cld);
402                 udelay(2); /* 2us (big margin) window for disable propafation */
403         }
404
405         _load_lut(cld);
406
407         if (disable_out_for_load) {
408                 val |= CL_DVFS_OUTPUT_CFG_I2C_ENABLE;
409                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
410                 cl_dvfs_wmb(cld);
411         }
412 }
413
414 #define set_tune_state(cld, state) \
415         do {                                                            \
416                 cld->tune_state = state;                                \
417                 pr_debug("%s: set tune state %d\n", __func__, state);   \
418         } while (0)
419
420 static inline void tune_low(struct tegra_cl_dvfs *cld)
421 {
422         if (cld->safe_dvfs->dfll_data.tune_trimmers)
423                 cld->safe_dvfs->dfll_data.tune_trimmers(false);
424         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
425         cl_dvfs_wmb(cld);
426 }
427
428 static inline void tune_high(struct tegra_cl_dvfs *cld)
429 {
430         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0_high_mv,
431                        CL_DVFS_TUNE0);
432         cl_dvfs_wmb(cld);
433         if (cld->safe_dvfs->dfll_data.tune_trimmers)
434                 cld->safe_dvfs->dfll_data.tune_trimmers(true);
435 }
436
437 static void set_ol_config(struct tegra_cl_dvfs *cld)
438 {
439         u32 val, out_min;
440
441         /* always tune low (safe) in open loop */
442         if (cld->tune_state != TEGRA_CL_DVFS_TUNE_LOW) {
443                 set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
444                 tune_low(cld);
445
446                 out_min = get_output_min(cld);
447 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
448                 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
449                 val &= ~CL_DVFS_OUTPUT_CFG_MIN_MASK;
450                 val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
451                 cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
452 #else
453                 if (cld->lut_min != out_min) {
454                         cld->lut_min = out_min;
455                         cl_dvfs_load_lut(cld);
456                 }
457 #endif
458         }
459
460         /* 1:1 scaling in open loop */
461         val = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
462         val |= (SCALE_MAX - 1) << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
463         val &= ~CL_DVFS_FREQ_REQ_FORCE_ENABLE;
464         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
465 }
466
467 static void set_cl_config(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
468 {
469 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
470         u32 val;
471 #endif
472         u32 out_max, out_min;
473
474         switch (cld->tune_state) {
475         case TEGRA_CL_DVFS_TUNE_LOW:
476                 if (req->cap > cld->tune_high_out_start) {
477                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH_REQUEST);
478                         mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
479                 }
480                 break;
481
482         case TEGRA_CL_DVFS_TUNE_HIGH:
483         case TEGRA_CL_DVFS_TUNE_HIGH_REQUEST:
484                 if (req->cap <= cld->tune_high_out_start) {
485                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_LOW);
486                         tune_low(cld);
487                 }
488                 break;
489         default:
490                 BUG();
491         }
492
493         out_min = get_output_min(cld);
494         if (req->cap > (out_min + 1))
495                 req->output = req->cap - 1;
496         else
497                 req->output = out_min + 1;
498         if (req->output == cld->safe_output)
499                 req->output++;
500         out_max = max((u8)(req->output + 1), cld->minimax_output);
501
502 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
503         val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
504         val &= ~(CL_DVFS_OUTPUT_CFG_MAX_MASK | CL_DVFS_OUTPUT_CFG_MIN_MASK);
505         val |= out_max << CL_DVFS_OUTPUT_CFG_MAX_SHIFT;
506         val |= out_min << CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
507         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
508 #else
509         if ((cld->lut_min != out_min) || (cld->lut_max != out_max)) {
510                 cld->lut_min = out_min;
511                 cld->lut_max = out_max;
512                 cl_dvfs_load_lut(cld);
513         }
514 #endif
515 }
516
517 static void tune_timer_cb(unsigned long data)
518 {
519         unsigned long flags;
520         u32 val, out_min, out_last;
521         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
522
523         clk_lock_save(cld->dfll_clk, &flags);
524
525         /* FIXME: PWM output control */
526         if (cld->tune_state == TEGRA_CL_DVFS_TUNE_HIGH_REQUEST) {
527 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
528                 val = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
529                 out_min = (val >> CL_DVFS_OUTPUT_CFG_MIN_SHIFT) & OUT_MASK;
530 #else
531                 out_min = cld->lut_min;
532 #endif
533                 val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
534                 out_last = (val >> CL_DVFS_I2C_STS_I2C_LAST_SHIFT) & OUT_MASK;
535
536                 if (!(val & CL_DVFS_I2C_STS_I2C_REQ_PENDING) &&
537                     (out_last >= cld->tune_high_out_min)  &&
538                     (out_min >= cld->tune_high_out_min)) {
539                         udelay(CL_DVFS_OUTPUT_RAMP_DELAY);
540                         set_tune_state(cld, TEGRA_CL_DVFS_TUNE_HIGH);
541                         tune_high(cld);
542                 } else {
543                         mod_timer(&cld->tune_timer, jiffies + cld->tune_delay);
544                 }
545         }
546         clk_unlock_restore(cld->dfll_clk, &flags);
547 }
548
549 static inline void calibration_timer_update(struct tegra_cl_dvfs *cld)
550 {
551         if (!cld->calibration_delay)
552                 return;
553         mod_timer(&cld->calibration_timer, jiffies + cld->calibration_delay);
554 }
555
556 static void cl_dvfs_calibrate(struct tegra_cl_dvfs *cld)
557 {
558         u32 val;
559         ktime_t now;
560         unsigned long data;
561         u8 out_min = get_output_min(cld);
562
563         /*
564          *  Enter calibration procedure only if
565          *  - closed loop operations
566          *  - last request engaged clock skipper
567          *  - at least specified time after the last calibration attempt
568          */
569         if ((cld->mode != TEGRA_CL_DVFS_CLOSED_LOOP) ||
570             ((cld->last_req.scale == (SCALE_MAX - 1)) &&
571              (cld->last_req.cap > out_min)))
572                 return;
573
574         now = ktime_get();
575         if (ktime_us_delta(now, cld->last_calibration) < CL_DVFS_CALIBR_TIME)
576                 return;
577         cld->last_calibration = now;
578
579         if (cl_dvfs_readl(cld, CL_DVFS_MONITOR_CTRL) !=
580             CL_DVFS_MONITOR_CTRL_FREQ)
581                 cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ,
582                                 CL_DVFS_MONITOR_CTRL);
583
584         /* Synchronize with sample period, and get rate measurements */
585         data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
586         do {
587                 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
588         } while (!(data & CL_DVFS_MONITOR_DATA_NEW));
589         do {
590                 data = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA);
591         } while (!(data & CL_DVFS_MONITOR_DATA_NEW));
592
593         /* Skip calibration if I2C transaction is pending */
594         /* FIXME: PWM output control */
595         val = cl_dvfs_readl(cld, CL_DVFS_I2C_STS);
596         if (val & CL_DVFS_I2C_STS_I2C_REQ_PENDING)
597                 return;
598
599         /* Adjust minimum rate */
600         data &= CL_DVFS_MONITOR_DATA_MASK;
601         data = GET_MONITORED_RATE(data, cld->ref_rate);
602         if ((val > out_min) || (data < (cld->dvco_rate_min - RATE_STEP(cld))))
603                 cld->dvco_rate_min -= RATE_STEP(cld);
604         else if (data > (cld->dvco_rate_min + RATE_STEP(cld)))
605                 cld->dvco_rate_min += RATE_STEP(cld);
606         else
607                 return;
608
609         cld->dvco_rate_min = clamp(cld->dvco_rate_min,
610                         cld->calibration_range_min, cld->calibration_range_max);
611         calibration_timer_update(cld);
612         pr_debug("%s: calibrated dvco_rate_min %lu\n",
613                  __func__, cld->dvco_rate_min);
614 }
615
616 static void calibration_timer_cb(unsigned long data)
617 {
618         unsigned long flags;
619         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)data;
620
621         pr_debug("%s\n", __func__);
622
623         clk_lock_save(cld->dfll_clk, &flags);
624         cl_dvfs_calibrate(cld);
625         clk_unlock_restore(cld->dfll_clk, &flags);
626 }
627
628 static void set_request(struct tegra_cl_dvfs *cld, struct dfll_rate_req *req)
629 {
630         u32 val;
631         int force_val = req->output - cld->safe_output;
632         int coef = 128; /* FIXME: cld->p_data->cfg_param->cg_scale? */;
633
634         force_val = force_val * coef / cld->p_data->cfg_param->cg;
635         force_val = clamp(force_val, FORCE_MIN, FORCE_MAX);
636
637         val = req->freq << CL_DVFS_FREQ_REQ_FREQ_SHIFT;
638         val |= req->scale << CL_DVFS_FREQ_REQ_SCALE_SHIFT;
639         val |= ((u32)force_val << CL_DVFS_FREQ_REQ_FORCE_SHIFT) &
640                 CL_DVFS_FREQ_REQ_FORCE_MASK;
641         val |= CL_DVFS_FREQ_REQ_FREQ_VALID | CL_DVFS_FREQ_REQ_FORCE_ENABLE;
642
643         cl_dvfs_writel(cld, val, CL_DVFS_FREQ_REQ);
644         cl_dvfs_wmb(cld);
645 }
646
647 static u8 find_mv_out_cap(struct tegra_cl_dvfs *cld, int mv)
648 {
649         u8 cap;
650         int uv;
651
652         for (cap = 0; cap < cld->num_voltages; cap++) {
653                 uv = cld->out_map[cap]->reg_uV;
654                 if (uv >= mv * 1000)
655                         return cap;
656         }
657         return cap - 1; /* maximum possible output */
658 }
659
660 static int find_safe_output(
661         struct tegra_cl_dvfs *cld, unsigned long rate, u8 *safe_output)
662 {
663         int i;
664         int n = cld->safe_dvfs->num_freqs;
665         unsigned long *freqs = cld->safe_dvfs->freqs;
666
667         for (i = 0; i < n; i++) {
668                 if (freqs[i] >= rate) {
669                         *safe_output = cld->clk_dvfs_map[i];
670                         return 0;
671                 }
672         }
673         return -EINVAL;
674 }
675
676 static unsigned long find_dvco_rate_min(struct tegra_cl_dvfs *cld, u8 out_min)
677 {
678         int i;
679
680         for (i = 0; i < cld->safe_dvfs->num_freqs; i++) {
681                 if (cld->clk_dvfs_map[i] > out_min)
682                         break;
683         }
684         i = i ? i-1 : 0;
685         return cld->safe_dvfs->freqs[i];
686 }
687
688 static void cl_dvfs_set_dvco_rate_min(struct tegra_cl_dvfs *cld)
689 {
690         unsigned long rate = cld->safe_dvfs->dfll_data.out_rate_min;
691         if (cld->cdev && (cld->thermal_idx < cld->cdev->trip_temperatures_num))
692                 rate = find_dvco_rate_min(
693                                 cld, cld->thermal_out_floors[cld->thermal_idx]);
694
695         /* round minimum rate to request unit (ref_rate/2) boundary */
696         cld->dvco_rate_min = ROUND_MIN_RATE(rate, cld->ref_rate);
697
698         /* dvco min rate is under-estimated - skewed range up */
699         cld->calibration_range_min = cld->dvco_rate_min - 2 * RATE_STEP(cld);
700         cld->calibration_range_max = cld->dvco_rate_min + 6 * RATE_STEP(cld);
701 }
702
703 static struct voltage_reg_map *find_vdd_map_entry(
704         struct tegra_cl_dvfs *cld, int mV, bool exact)
705 {
706         int i, reg_mV;
707
708         for (i = 0; i < cld->p_data->vdd_map_size; i++) {
709                 /* round down to 1mV */
710                 reg_mV = cld->p_data->vdd_map[i].reg_uV / 1000;
711                 if (mV <= reg_mV)
712                         break;
713         }
714
715         if (i < cld->p_data->vdd_map_size) {
716                 if (!exact || (mV == reg_mV))
717                         return &cld->p_data->vdd_map[i];
718         }
719         return NULL;
720 }
721
722 static void cl_dvfs_init_maps(struct tegra_cl_dvfs *cld)
723 {
724         int i, j, v, v_max, n;
725         const int *millivolts;
726         struct voltage_reg_map *m;
727
728         BUILD_BUG_ON(MAX_CL_DVFS_VOLTAGES > OUT_MASK + 1);
729
730         n = cld->safe_dvfs->num_freqs;
731         BUG_ON(n >= MAX_CL_DVFS_VOLTAGES);
732
733         millivolts = cld->safe_dvfs->dfll_millivolts;
734         v_max = millivolts[n - 1];
735
736         v = cld->safe_dvfs->dfll_data.min_millivolts;
737         BUG_ON(v > millivolts[0]);
738
739         cld->out_map[0] = find_vdd_map_entry(cld, v, true);
740         BUG_ON(!cld->out_map[0]);
741
742         for (i = 0, j = 1; i < n; i++) {
743                 for (;;) {
744                         v += max(1, (v_max - v) / (MAX_CL_DVFS_VOLTAGES - j));
745                         if (v >= millivolts[i])
746                                 break;
747
748                         m = find_vdd_map_entry(cld, v, false);
749                         BUG_ON(!m);
750                         if (m != cld->out_map[j - 1])
751                                 cld->out_map[j++] = m;
752                 }
753
754                 v = millivolts[i];
755                 m = find_vdd_map_entry(cld, v, true);
756                 BUG_ON(!m);
757                 if (m != cld->out_map[j - 1])
758                         cld->out_map[j++] = m;
759                 cld->clk_dvfs_map[i] = j - 1;
760         }
761         BUG_ON(j > MAX_CL_DVFS_VOLTAGES);
762         cld->num_voltages = j;
763 }
764
765 static void cl_dvfs_init_tuning_thresholds(struct tegra_cl_dvfs *cld)
766 {
767         int mv;
768
769         /*
770          * Convert high tuning voltage threshold into output LUT index, and
771          * add necessary margin.  If voltage threshold is outside operating
772          * range set it at maximum output level to effectively disable tuning
773          * parameters adjustment.
774          */
775         cld->tune_high_out_min = cld->num_voltages - 1;
776         cld->tune_high_out_start = cld->num_voltages - 1;
777         mv = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
778         if (mv >= cld->safe_dvfs->dfll_data.min_millivolts) {
779                 u8 out_min = find_mv_out_cap(cld, mv);
780                 if ((out_min + 2) < cld->num_voltages) {
781                         u8 out_start = out_min + CL_DVFS_TUNE_HIGH_MARGIN_STEPS;
782                         if (out_start < cld->num_voltages) {
783                                 cld->tune_high_out_min = out_min;
784                                 cld->tune_high_out_start = out_start;
785                                 if (cld->minimax_output <= out_min)
786                                         cld->minimax_output = out_min + 1;
787                         }
788                 }
789         }
790 }
791
792 static void cl_dvfs_init_cold_output_floor(struct tegra_cl_dvfs *cld)
793 {
794         int i;
795         if (!cld->cdev)
796                 return;
797         /*
798          * Convert monotonically decreasing thermal floors at low temperature
799          * into output LUT indexes; make sure there is a room for regulation
800          * above maximum thermal floor.
801          */
802         for (i = 0; i < cld->cdev->trip_temperatures_num; i++) {
803                 cld->thermal_out_floors[i] = find_mv_out_cap(
804                         cld, cld->safe_dvfs->dvfs_rail->therm_mv_floors[i]);
805         }
806         BUG_ON(cld->thermal_out_floors[0] + 2 >= cld->num_voltages);
807         if (cld->minimax_output <= cld->thermal_out_floors[0])
808                 cld->minimax_output = cld->thermal_out_floors[0] + 1;
809 }
810
811 static void cl_dvfs_init_output_thresholds(struct tegra_cl_dvfs *cld)
812 {
813         cld->minimax_output = 0;
814         cl_dvfs_init_tuning_thresholds(cld);
815         cl_dvfs_init_cold_output_floor(cld);
816
817         /* make sure safe output is safe at any temperature */
818         cld->safe_output = cld->thermal_out_floors[0] ? : 1;
819         if (cld->minimax_output <= cld->safe_output)
820                 cld->minimax_output = cld->safe_output + 1;
821 }
822
823 static void cl_dvfs_init_pwm_if(struct tegra_cl_dvfs *cld)
824 {
825         /* FIXME: not supported */
826 }
827
828 static void cl_dvfs_init_i2c_if(struct tegra_cl_dvfs *cld)
829 {
830         u32 val, div;
831         struct tegra_cl_dvfs_platform_data *p_data = cld->p_data;
832         bool hs_mode = p_data->u.pmu_i2c.hs_rate;
833
834         /* PMU slave address, vdd register offset, and transfer mode */
835         val = p_data->u.pmu_i2c.slave_addr << CL_DVFS_I2C_CFG_SLAVE_ADDR_SHIFT;
836         if (p_data->u.pmu_i2c.addr_10)
837                 val |= CL_DVFS_I2C_CFG_SLAVE_ADDR_10;
838         if (hs_mode) {
839                 val |= p_data->u.pmu_i2c.hs_master_code <<
840                         CL_DVFS_I2C_CFG_HS_CODE_SHIFT;
841                 val |= CL_DVFS_I2C_CFG_PACKET_ENABLE;
842         }
843         val |= CL_DVFS_I2C_CFG_SIZE_MASK;
844         val |= CL_DVFS_I2C_CFG_ARB_ENABLE;
845         cl_dvfs_writel(cld, val, CL_DVFS_I2C_CFG);
846         cl_dvfs_writel(cld, p_data->u.pmu_i2c.reg, CL_DVFS_I2C_VDD_REG_ADDR);
847
848
849         val = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.fs_rate, 8);
850         BUG_ON(!val || (val > CL_DVFS_I2C_CLK_DIVISOR_MASK));
851         val = (val - 1) << CL_DVFS_I2C_CLK_DIVISOR_FS_SHIFT;
852         if (hs_mode) {
853                 div = GET_DIV(cld->i2c_rate, p_data->u.pmu_i2c.hs_rate, 12);
854                 BUG_ON(!div || (div > CL_DVFS_I2C_CLK_DIVISOR_MASK));
855         } else {
856                 div = 2;        /* default hs divisor just in case */
857         }
858         val |= (div - 1) << CL_DVFS_I2C_CLK_DIVISOR_HS_SHIFT;
859         cl_dvfs_writel(cld, val, CL_DVFS_I2C_CLK_DIVISOR);
860         cl_dvfs_wmb(cld);
861 }
862
863 static void cl_dvfs_init_out_if(struct tegra_cl_dvfs *cld)
864 {
865         u32 val;
866
867         /*
868          * Disable output, and set safe voltage and output limits;
869          * disable and clear limit interrupts.
870          */
871         cld->tune_state = TEGRA_CL_DVFS_TUNE_LOW;
872         cld->thermal_idx = 0;
873         cl_dvfs_set_dvco_rate_min(cld);
874 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
875         val = get_output_min(cld);
876         cld->lut_min = 0;
877         cld->lut_max = cld->num_voltages - 1;
878 #else
879         /*
880          * Allow the entire range of LUT indexes, but limit output voltage in
881          * LUT mapping (this "indirect" application of limits is used, because
882          * h/w does not support dynamic change of index limits, but dynamic
883          * reload of LUT is fine).
884          */
885         val = 0;
886         cld->lut_min = get_output_min(cld);
887         cld->lut_max = cld->num_voltages - 1;
888 #endif
889
890         val = (cld->safe_output << CL_DVFS_OUTPUT_CFG_SAFE_SHIFT) |
891                 ((cld->num_voltages - 1) << CL_DVFS_OUTPUT_CFG_MAX_SHIFT) |
892                 (val << CL_DVFS_OUTPUT_CFG_MIN_SHIFT);
893         cl_dvfs_writel(cld, val, CL_DVFS_OUTPUT_CFG);
894         cl_dvfs_wmb(cld);
895
896         cl_dvfs_writel(cld, 0, CL_DVFS_OUTPUT_FORCE);
897         cl_dvfs_writel(cld, 0, CL_DVFS_INTR_EN);
898         cl_dvfs_writel(cld, CL_DVFS_INTR_MAX_MASK | CL_DVFS_INTR_MIN_MASK,
899                        CL_DVFS_INTR_STS);
900
901         /* fill in LUT table */
902         cl_dvfs_load_lut(cld);
903
904         /* configure transport */
905         if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
906                 cl_dvfs_init_i2c_if(cld);
907         else
908                 cl_dvfs_init_pwm_if(cld);
909 }
910
911 static void cl_dvfs_init_cntrl_logic(struct tegra_cl_dvfs *cld)
912 {
913         u32 val;
914         struct tegra_cl_dvfs_cfg_param *param = cld->p_data->cfg_param;
915
916         /* configure mode, control loop parameters, DFLL tuning */
917         set_mode(cld, TEGRA_CL_DVFS_DISABLED);
918
919         val = GET_DIV(cld->ref_rate, param->sample_rate, 32);
920         BUG_ON(val > CL_DVFS_CONFIG_DIV_MASK);
921         cl_dvfs_writel(cld, val, CL_DVFS_CONFIG);
922
923         val = (param->force_mode << CL_DVFS_PARAMS_FORCE_MODE_SHIFT) |
924                 (param->cf << CL_DVFS_PARAMS_CF_PARAM_SHIFT) |
925                 (param->ci << CL_DVFS_PARAMS_CI_PARAM_SHIFT) |
926                 ((u8)param->cg << CL_DVFS_PARAMS_CG_PARAM_SHIFT) |
927                 (param->cg_scale ? CL_DVFS_PARAMS_CG_SCALE : 0);
928         cl_dvfs_writel(cld, val, CL_DVFS_PARAMS);
929
930         if (cld->safe_dvfs->dfll_data.tune_trimmers)
931                 cld->safe_dvfs->dfll_data.tune_trimmers(false);
932         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune0, CL_DVFS_TUNE0);
933         cl_dvfs_writel(cld, cld->safe_dvfs->dfll_data.tune1, CL_DVFS_TUNE1);
934
935         /* configure droop (skipper 1) and scale (skipper 2) */
936         val = GET_DROOP_FREQ(cld->safe_dvfs->dfll_data.droop_rate_min,
937                         cld->ref_rate) << CL_DVFS_DROOP_CTRL_MIN_FREQ_SHIFT;
938         BUG_ON(val > CL_DVFS_DROOP_CTRL_MIN_FREQ_MASK);
939         val |= (param->droop_cut_value << CL_DVFS_DROOP_CTRL_CUT_SHIFT);
940         val |= (param->droop_restore_ramp << CL_DVFS_DROOP_CTRL_RAMP_SHIFT);
941         cl_dvfs_writel(cld, val, CL_DVFS_DROOP_CTRL);
942
943         cld->last_req.cap = 0;
944         cld->last_req.freq = 0;
945         cld->last_req.output = 0;
946         cld->last_req.scale = SCALE_MAX - 1;
947         cl_dvfs_writel(cld, CL_DVFS_FREQ_REQ_SCALE_MASK, CL_DVFS_FREQ_REQ);
948         cl_dvfs_writel(cld, param->scale_out_ramp, CL_DVFS_SCALE_RAMP);
949
950         /* select frequency for monitoring */
951         cl_dvfs_writel(cld, CL_DVFS_MONITOR_CTRL_FREQ, CL_DVFS_MONITOR_CTRL);
952         cl_dvfs_wmb(cld);
953 }
954
955 static int cl_dvfs_enable_clocks(struct tegra_cl_dvfs *cld)
956 {
957         if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
958                 clk_enable(cld->i2c_clk);
959
960         clk_enable(cld->ref_clk);
961         clk_enable(cld->soc_clk);
962         return 0;
963 }
964
965 static void cl_dvfs_disable_clocks(struct tegra_cl_dvfs *cld)
966 {
967         if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C)
968                 clk_disable(cld->i2c_clk);
969
970         clk_disable(cld->ref_clk);
971         clk_disable(cld->soc_clk);
972 }
973
974 static int cl_dvfs_init(struct tegra_cl_dvfs *cld)
975 {
976         int ret;
977
978         /* Enable output inerface clock */
979         if (cld->p_data->pmu_if == TEGRA_CL_DVFS_PMU_I2C) {
980                 ret = clk_enable(cld->i2c_clk);
981                 if (ret) {
982                         pr_err("%s: Failed to enable %s\n",
983                                __func__, cld->i2c_clk->name);
984                         return ret;
985                 }
986                 cld->i2c_rate = clk_get_rate(cld->i2c_clk);
987         } else {
988                 pr_err("%s: PMU interface is not I2C\n", __func__);
989                 return -EINVAL;
990         }
991
992         /* Enable module clocks, release control logic reset */
993         ret = clk_enable(cld->ref_clk);
994         if (ret) {
995                 pr_err("%s: Failed to enable %s\n",
996                        __func__, cld->ref_clk->name);
997                 return ret;
998         }
999         ret = clk_enable(cld->soc_clk);
1000         if (ret) {
1001                 pr_err("%s: Failed to enable %s\n",
1002                        __func__, cld->ref_clk->name);
1003                 return ret;
1004         }
1005         cld->ref_rate = clk_get_rate(cld->ref_clk);
1006         BUG_ON(!cld->ref_rate);
1007
1008         /* init tuning timer */
1009         init_timer(&cld->tune_timer);
1010         cld->tune_timer.function = tune_timer_cb;
1011         cld->tune_timer.data = (unsigned long)cld;
1012         cld->tune_delay = usecs_to_jiffies(CL_DVFS_TUNE_HIGH_DELAY);
1013
1014         /* init calibration timer */
1015         init_timer(&cld->calibration_timer);
1016         cld->calibration_timer.function = calibration_timer_cb;
1017         cld->calibration_timer.data = (unsigned long)cld;
1018         cld->calibration_delay = usecs_to_jiffies(CL_DVFS_CALIBR_TIME);
1019
1020         /* Get ready ouput voltage mapping*/
1021         cl_dvfs_init_maps(cld);
1022
1023         /* Setup output range thresholds */
1024         cl_dvfs_init_output_thresholds(cld);
1025
1026         /* Setup PMU interface */
1027         cl_dvfs_init_out_if(cld);
1028
1029         /* Configure control registers in disabled mode and disable clocks */
1030         cl_dvfs_init_cntrl_logic(cld);
1031         cl_dvfs_disable_clocks(cld);
1032
1033         return 0;
1034 }
1035
1036 /*
1037  * Re-initialize and enable target device clock in open loop mode. Called
1038  * directly from SoC clock resume syscore operation. Closed loop will be
1039  * re-entered in platform syscore ops as well.
1040  */
1041 void tegra_cl_dvfs_resume(struct tegra_cl_dvfs *cld)
1042 {
1043         enum tegra_cl_dvfs_ctrl_mode mode = cld->mode;
1044         struct dfll_rate_req req = cld->last_req;
1045
1046         cl_dvfs_enable_clocks(cld);
1047
1048         /* Setup PMU interface, and configure controls in disabled mode */
1049         cl_dvfs_init_out_if(cld);
1050         cl_dvfs_init_cntrl_logic(cld);
1051
1052         cl_dvfs_disable_clocks(cld);
1053
1054         /* Restore last request and mode */
1055         cld->last_req = req;
1056         if (mode != TEGRA_CL_DVFS_DISABLED) {
1057                 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1058                 WARN(mode > TEGRA_CL_DVFS_OPEN_LOOP,
1059                      "DFLL was left locked in suspend\n");
1060         }
1061 }
1062
1063 #ifdef CONFIG_THERMAL
1064 /* cl_dvfs cooling device */
1065 static int tegra_cl_dvfs_get_cdev_max_state(struct thermal_cooling_device *cdev,
1066                                             unsigned long *max_state)
1067 {
1068         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1069         *max_state = cld->cdev->trip_temperatures_num;
1070         return 0;
1071 }
1072
1073 static int tegra_cl_dvfs_get_cdev_cur_state(struct thermal_cooling_device *cdev,
1074                                             unsigned long *cur_state)
1075 {
1076         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1077         *cur_state = cld->thermal_idx;
1078         return 0;
1079 }
1080
1081 static int tegra_cl_dvfs_set_cdev_state(struct thermal_cooling_device *cdev,
1082                                         unsigned long cur_state)
1083 {
1084         unsigned long flags;
1085         struct tegra_cl_dvfs *cld = (struct tegra_cl_dvfs *)cdev->devdata;
1086
1087         clk_lock_save(cld->dfll_clk, &flags);
1088
1089         if (cld->thermal_idx != cur_state) {
1090                 cld->thermal_idx = cur_state;
1091                 cl_dvfs_set_dvco_rate_min(cld);
1092                 if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1093                         tegra_cl_dvfs_request_rate(cld,
1094                                 tegra_cl_dvfs_request_get(cld));
1095                 }
1096         }
1097         clk_unlock_restore(cld->dfll_clk, &flags);
1098         return 0;
1099 }
1100
1101 static struct thermal_cooling_device_ops tegra_cl_dvfs_cooling_ops = {
1102         .get_max_state = tegra_cl_dvfs_get_cdev_max_state,
1103         .get_cur_state = tegra_cl_dvfs_get_cdev_cur_state,
1104         .set_cur_state = tegra_cl_dvfs_set_cdev_state,
1105 };
1106
1107 static void tegra_cl_dvfs_init_cdev(struct work_struct *work)
1108 {
1109         struct tegra_cl_dvfs *cld = container_of(
1110                 work, struct tegra_cl_dvfs, init_cdev_work);
1111
1112         if (!cld->cdev)
1113                 return;
1114
1115         /* just report error - initialized at WC temperature, anyway */
1116         if (IS_ERR_OR_NULL(thermal_cooling_device_register(
1117                 cld->cdev->cdev_type, (void *)cld,
1118                 &tegra_cl_dvfs_cooling_ops))) {
1119                 pr_err("tegra cooling device %s failed to register\n",
1120                        cld->cdev->cdev_type);
1121                 return;
1122         }
1123         pr_info("%s cooling device is registered\n", cld->cdev->cdev_type);
1124 }
1125 #endif
1126
1127 #ifdef CONFIG_PM_SLEEP
1128 /*
1129  * cl_dvfs controls clock/voltage to other devices, including CPU. Therefore,
1130  * cl_dvfs driver pm suspend callback does not stop cl-dvfs operations. It is
1131  * only used to enforce cold volatge limit, since SoC may cool down during
1132  * suspend without waking up. The correct temperature zone after supend will
1133  * be updated via cl_dvfs cooling device interface during resume of temperature
1134  * sensor.
1135  */
1136 static int tegra_cl_dvfs_suspend_cl(struct device *dev)
1137 {
1138         unsigned long flags;
1139         struct tegra_cl_dvfs *cld = dev_get_drvdata(dev);
1140
1141         clk_lock_save(cld->dfll_clk, &flags);
1142         cld->thermal_idx = 0;
1143         cl_dvfs_set_dvco_rate_min(cld);
1144         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1145                 set_cl_config(cld, &cld->last_req);
1146                 set_request(cld, &cld->last_req);
1147         }
1148         clk_unlock_restore(cld->dfll_clk, &flags);
1149
1150         return 0;
1151 }
1152
1153 static const struct dev_pm_ops tegra_cl_dvfs_pm_ops = {
1154         .suspend = tegra_cl_dvfs_suspend_cl,
1155 };
1156 #endif
1157
1158 static int __init tegra_cl_dvfs_probe(struct platform_device *pdev)
1159 {
1160         int ret;
1161         struct tegra_cl_dvfs_platform_data *p_data;
1162         struct resource *res;
1163         struct tegra_cl_dvfs *cld;
1164         struct clk *ref_clk, *soc_clk, *i2c_clk, *safe_dvfs_clk, *dfll_clk;
1165
1166         /* Get resources */
1167         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1168         if (!res) {
1169                 dev_err(&pdev->dev, "missing register base\n");
1170                 return -ENOMEM;
1171         }
1172
1173         p_data = pdev->dev.platform_data;
1174         if (!p_data || !p_data->cfg_param || !p_data->vdd_map) {
1175                 dev_err(&pdev->dev, "missing platform data\n");
1176                 return -ENODATA;
1177         }
1178
1179         ref_clk = clk_get(&pdev->dev, "ref");
1180         soc_clk = clk_get(&pdev->dev, "soc");
1181         i2c_clk = clk_get(&pdev->dev, "i2c");
1182         safe_dvfs_clk = clk_get(&pdev->dev, "safe_dvfs");
1183         dfll_clk = clk_get(&pdev->dev, p_data->dfll_clk_name);
1184         if (IS_ERR(ref_clk) || IS_ERR(soc_clk) || IS_ERR(i2c_clk)) {
1185                 dev_err(&pdev->dev, "missing control clock\n");
1186                 return -ENODEV;
1187         }
1188         if (IS_ERR(safe_dvfs_clk)) {
1189                 dev_err(&pdev->dev, "missing safe dvfs source clock\n");
1190                 return PTR_ERR(safe_dvfs_clk);
1191         }
1192         if (IS_ERR(dfll_clk)) {
1193                 dev_err(&pdev->dev, "missing target dfll clock\n");
1194                 return PTR_ERR(dfll_clk);
1195         }
1196         if (!safe_dvfs_clk->dvfs || !safe_dvfs_clk->dvfs->dvfs_rail) {
1197                 dev_err(&pdev->dev, "invalid safe dvfs source\n");
1198                 return -EINVAL;
1199         }
1200
1201         /* Allocate cl_dvfs object and populate resource accessors */
1202         cld = kzalloc(sizeof(*cld), GFP_KERNEL);
1203         if (!cld) {
1204                 dev_err(&pdev->dev, "failed to allocate cl_dvfs object\n");
1205                 return -ENOMEM;
1206         }
1207
1208         cld->cl_base = IO_ADDRESS(res->start);
1209         cld->p_data = p_data;
1210         cld->ref_clk = ref_clk;
1211         cld->soc_clk = soc_clk;
1212         cld->i2c_clk = i2c_clk;
1213         cld->dfll_clk = dfll_clk;
1214         cld->safe_dvfs = safe_dvfs_clk->dvfs;
1215 #ifdef CONFIG_THERMAL
1216         cld->cdev = cld->safe_dvfs->dvfs_rail->dfll_mode_cdev;
1217         INIT_WORK(&cld->init_cdev_work, tegra_cl_dvfs_init_cdev);
1218 #endif
1219         /* Initialize cl_dvfs */
1220         ret = cl_dvfs_init(cld);
1221         if (ret) {
1222                 kfree(cld);
1223                 return ret;
1224         }
1225
1226         platform_set_drvdata(pdev, cld);
1227
1228         /*
1229          * Schedule cooling device registration as a separate work to address
1230          * the following race: when cl_dvfs is probed the DFLL child clock
1231          * (e.g., CPU) cannot be changed; on the other hand cooling device
1232          * registration will update the entire thermal zone, and may trigger
1233          * rate change of the target clock
1234          */
1235         if (cld->cdev)
1236                 schedule_work(&cld->init_cdev_work);
1237         return 0;
1238 }
1239
1240 static struct platform_driver tegra_cl_dvfs_driver = {
1241         .driver         = {
1242                 .name   = "tegra_cl_dvfs",
1243                 .owner  = THIS_MODULE,
1244 #ifdef CONFIG_PM_SLEEP
1245                 .pm = &tegra_cl_dvfs_pm_ops,
1246 #endif
1247         },
1248 };
1249
1250 int __init tegra_init_cl_dvfs(void)
1251 {
1252         return platform_driver_probe(&tegra_cl_dvfs_driver,
1253                                      tegra_cl_dvfs_probe);
1254 }
1255
1256 /*
1257  * CL_DVFS states:
1258  *
1259  * - DISABLED: control logic mode - DISABLED, output interface disabled,
1260  *   dfll in reset
1261  * - OPEN_LOOP: control logic mode - OPEN_LOOP, output interface disabled,
1262  *   dfll is running "unlocked"
1263  * - CLOSED_LOOP: control logic mode - CLOSED_LOOP, output interface enabled,
1264  *   dfll is running "locked"
1265  */
1266
1267 /* Switch from any other state to DISABLED state */
1268 void tegra_cl_dvfs_disable(struct tegra_cl_dvfs *cld)
1269 {
1270         switch (cld->mode) {
1271         case TEGRA_CL_DVFS_CLOSED_LOOP:
1272                 WARN(1, "DFLL is disabled directly from closed loop mode\n");
1273                 set_ol_config(cld);
1274                 output_disable_ol_prepare(cld);
1275                 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1276                 output_disable_post_ol(cld);
1277                 cl_dvfs_disable_clocks(cld);
1278                 return;
1279
1280         case TEGRA_CL_DVFS_OPEN_LOOP:
1281                 set_mode(cld, TEGRA_CL_DVFS_DISABLED);
1282                 cl_dvfs_disable_clocks(cld);
1283                 return;
1284
1285         default:
1286                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1287                 return;
1288         }
1289 }
1290
1291 /* Switch from DISABLE state to OPEN_LOOP state */
1292 int tegra_cl_dvfs_enable(struct tegra_cl_dvfs *cld)
1293 {
1294         if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
1295                 pr_err("%s: Cannot enable DFLL in %s mode\n",
1296                        __func__, mode_name[cld->mode]);
1297                 return -EPERM;
1298         }
1299
1300         if (cld->mode != TEGRA_CL_DVFS_DISABLED)
1301                 return 0;
1302
1303         cl_dvfs_enable_clocks(cld);
1304         set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1305         return 0;
1306 }
1307
1308 /* Switch from OPEN_LOOP state to CLOSED_LOOP state */
1309 int tegra_cl_dvfs_lock(struct tegra_cl_dvfs *cld)
1310 {
1311         struct dfll_rate_req *req = &cld->last_req;
1312
1313         switch (cld->mode) {
1314         case TEGRA_CL_DVFS_CLOSED_LOOP:
1315                 return 0;
1316
1317         case TEGRA_CL_DVFS_OPEN_LOOP:
1318                 if (req->freq == 0) {
1319                         pr_err("%s: Cannot lock DFLL at rate 0\n", __func__);
1320                         return -EINVAL;
1321                 }
1322
1323                 /*
1324                  * Update control logic setting with last rate request;
1325                  * sync output limits with current tuning and thermal state,
1326                  * enable output and switch to closed loop mode.
1327                  */
1328                 set_cl_config(cld, req);
1329                 output_enable(cld);
1330                 set_mode(cld, TEGRA_CL_DVFS_CLOSED_LOOP);
1331                 set_request(cld, req);
1332                 calibration_timer_update(cld);
1333                 return 0;
1334
1335         default:
1336                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1337                 pr_err("%s: Cannot lock DFLL in %s mode\n",
1338                        __func__, mode_name[cld->mode]);
1339                 return -EPERM;
1340         }
1341 }
1342
1343 /* Switch from CLOSED_LOOP state to OPEN_LOOP state */
1344 int tegra_cl_dvfs_unlock(struct tegra_cl_dvfs *cld)
1345 {
1346         int ret;
1347
1348         switch (cld->mode) {
1349         case TEGRA_CL_DVFS_CLOSED_LOOP:
1350                 set_ol_config(cld);
1351                 ret = output_disable_ol_prepare(cld);
1352                 set_mode(cld, TEGRA_CL_DVFS_OPEN_LOOP);
1353                 if (!ret)
1354                         ret = output_disable_post_ol(cld);
1355                 return ret;
1356
1357         case TEGRA_CL_DVFS_OPEN_LOOP:
1358                 return 0;
1359
1360         default:
1361                 BUG_ON(cld->mode > TEGRA_CL_DVFS_CLOSED_LOOP);
1362                 pr_err("%s: Cannot unlock DFLL in %s mode\n",
1363                        __func__, mode_name[cld->mode]);
1364                 return -EPERM;
1365         }
1366 }
1367
1368 /*
1369  * Convert requested rate into the control logic settings. In CLOSED_LOOP mode,
1370  * update new settings immediately to adjust DFLL output rate accordingly.
1371  * Otherwise, just save them until next switch to closed loop.
1372  */
1373 int tegra_cl_dvfs_request_rate(struct tegra_cl_dvfs *cld, unsigned long rate)
1374 {
1375         u32 val;
1376         struct dfll_rate_req req;
1377
1378         if (cld->mode == TEGRA_CL_DVFS_UNINITIALIZED) {
1379                 pr_err("%s: Cannot set DFLL rate in %s mode\n",
1380                        __func__, mode_name[cld->mode]);
1381                 return -EPERM;
1382         }
1383
1384         /* Calibrate dfll minimum rate */
1385         cl_dvfs_calibrate(cld);
1386
1387         /* Determine DFLL output scale */
1388         req.scale = SCALE_MAX - 1;
1389         if (rate < cld->dvco_rate_min) {
1390                 int scale = DIV_ROUND_UP((rate / 1000 * SCALE_MAX),
1391                         (cld->dvco_rate_min / 1000));
1392                 if (!scale) {
1393                         pr_err("%s: Rate %lu is below scalable range\n",
1394                                __func__, rate);
1395                         return -EINVAL;
1396                 }
1397                 req.scale = scale - 1;
1398                 rate = cld->dvco_rate_min;
1399         }
1400
1401         /* Convert requested rate into frequency request and scale settings */
1402         val = GET_REQUEST_FREQ(rate, cld->ref_rate);
1403         if (val > FREQ_MAX) {
1404                 pr_err("%s: Rate %lu is above dfll range\n", __func__, rate);
1405                 return -EINVAL;
1406         }
1407         req.freq = val;
1408         rate = GET_REQUEST_RATE(val, cld->ref_rate);
1409
1410         /* Find safe voltage for requested rate */
1411         if (find_safe_output(cld, rate, &req.output)) {
1412                 pr_err("%s: Failed to find safe output for rate %lu\n",
1413                        __func__, rate);
1414                 return -EINVAL;
1415         }
1416         req.cap = req.output;
1417
1418         /*
1419          * Save validated request, and in CLOSED_LOOP mode actually update
1420          * control logic settings; use request output to set maximum voltage
1421          * limit, but keep one LUT step room above safe voltage
1422          */
1423         cld->last_req = req;
1424
1425         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1426                 set_cl_config(cld, &cld->last_req);
1427                 set_request(cld, &cld->last_req);
1428         }
1429         return 0;
1430 }
1431
1432 unsigned long tegra_cl_dvfs_request_get(struct tegra_cl_dvfs *cld)
1433 {
1434         struct dfll_rate_req *req = &cld->last_req;
1435         u32 rate = GET_REQUEST_RATE(req->freq, cld->ref_rate);
1436         if ((req->scale + 1) < SCALE_MAX) {
1437                 rate = (rate / 1000 * (req->scale + 1)) / SCALE_MAX;
1438                 rate *= 1000;
1439         }
1440         return rate;
1441 }
1442
1443 #ifdef CONFIG_DEBUG_FS
1444
1445 static int lock_get(void *data, u64 *val)
1446 {
1447         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1448         *val = cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP;
1449         return 0;
1450 }
1451 static int lock_set(void *data, u64 val)
1452 {
1453         struct clk *c = (struct clk *)data;
1454         return tegra_clk_cfg_ex(c, TEGRA_CLK_DFLL_LOCK, val);
1455 }
1456 DEFINE_SIMPLE_ATTRIBUTE(lock_fops, lock_get, lock_set, "%llu\n");
1457
1458 static int monitor_get(void *data, u64 *val)
1459 {
1460         u32 v, s;
1461         unsigned long flags;
1462         struct clk *c = (struct clk *)data;
1463         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1464
1465         clk_enable(cld->soc_clk);
1466
1467         clk_lock_save(c, &flags);
1468         v = cl_dvfs_readl(cld, CL_DVFS_MONITOR_DATA) &
1469                 CL_DVFS_MONITOR_DATA_MASK;
1470
1471         if (cl_dvfs_readl(cld, CL_DVFS_MONITOR_CTRL) ==
1472             CL_DVFS_MONITOR_CTRL_FREQ) {
1473                 v = GET_MONITORED_RATE(v, cld->ref_rate);
1474                 s = cl_dvfs_readl(cld, CL_DVFS_FREQ_REQ);
1475                 s = (s & CL_DVFS_FREQ_REQ_SCALE_MASK) >>
1476                         CL_DVFS_FREQ_REQ_SCALE_SHIFT;
1477                 *val = (u64)v * (s + 1) / 256;
1478
1479                 clk_unlock_restore(c, &flags);
1480                 clk_disable(cld->soc_clk);
1481                 return 0;
1482         }
1483         *val = v;
1484
1485         clk_unlock_restore(c, &flags);
1486         clk_disable(cld->soc_clk);
1487         return 0;
1488 }
1489 DEFINE_SIMPLE_ATTRIBUTE(monitor_fops, monitor_get, NULL, "%llu\n");
1490
1491 static int vmin_get(void *data, u64 *val)
1492 {
1493         u32 v;
1494         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1495
1496 #if CL_DVFS_DYNAMIC_OUTPUT_CFG
1497         clk_enable(cld->soc_clk);
1498         v = cl_dvfs_readl(cld, CL_DVFS_OUTPUT_CFG);
1499         v = (v & CL_DVFS_OUTPUT_CFG_MIN_MASK) >> CL_DVFS_OUTPUT_CFG_MIN_SHIFT;
1500         clk_disable(cld->soc_clk);
1501 #else
1502         v = cld->lut_min;
1503 #endif
1504         *val = cld->out_map[v]->reg_uV / 1000;
1505         return 0;
1506 }
1507 DEFINE_SIMPLE_ATTRIBUTE(vmin_fops, vmin_get, NULL, "%llu\n");
1508
1509 static int tune_high_mv_get(void *data, u64 *val)
1510 {
1511         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1512         *val = cld->safe_dvfs->dfll_data.tune_high_min_millivolts;
1513         return 0;
1514 }
1515 static int tune_high_mv_set(void *data, u64 val)
1516 {
1517         unsigned long flags;
1518         struct clk *c = (struct clk *)data;
1519         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1520
1521         clk_lock_save(c, &flags);
1522
1523         cld->safe_dvfs->dfll_data.tune_high_min_millivolts = val;
1524         cl_dvfs_init_output_thresholds(cld);
1525         if (cld->mode == TEGRA_CL_DVFS_CLOSED_LOOP) {
1526                 set_cl_config(cld, &cld->last_req);
1527                 set_request(cld, &cld->last_req);
1528         }
1529
1530         clk_unlock_restore(c, &flags);
1531         return 0;
1532 }
1533 DEFINE_SIMPLE_ATTRIBUTE(tune_high_mv_fops, tune_high_mv_get, tune_high_mv_set,
1534                         "%llu\n");
1535 static int fmin_get(void *data, u64 *val)
1536 {
1537         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1538         *val = cld->dvco_rate_min;
1539         return 0;
1540 }
1541 DEFINE_SIMPLE_ATTRIBUTE(dvco_rate_min_fops, fmin_get, NULL, "%llu\n");
1542
1543 static int calibr_delay_get(void *data, u64 *val)
1544 {
1545         struct tegra_cl_dvfs *cld = ((struct clk *)data)->u.dfll.cl_dvfs;
1546         *val = jiffies_to_msecs(cld->calibration_delay);
1547         return 0;
1548 }
1549 static int calibr_delay_set(void *data, u64 val)
1550 {
1551         unsigned long flags;
1552         struct clk *c = (struct clk *)data;
1553         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1554
1555         clk_lock_save(c, &flags);
1556         cld->calibration_delay = msecs_to_jiffies(val);
1557         clk_unlock_restore(c, &flags);
1558         return 0;
1559 }
1560 DEFINE_SIMPLE_ATTRIBUTE(calibr_delay_fops, calibr_delay_get, calibr_delay_set,
1561                         "%llu\n");
1562
1563 static int cl_register_show(struct seq_file *s, void *data)
1564 {
1565         u32 offs;
1566         struct clk *c = s->private;
1567         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1568
1569         clk_enable(cld->soc_clk);
1570
1571         seq_printf(s, "CONTROL REGISTERS:\n");
1572         for (offs = 0; offs <= CL_DVFS_MONITOR_DATA; offs += 4)
1573                 seq_printf(s, "[0x%02x] = 0x%08x\n",
1574                            offs, cl_dvfs_readl(cld, offs));
1575
1576         seq_printf(s, "\nI2C and INTR REGISTERS:\n");
1577         for (offs = CL_DVFS_I2C_CFG; offs <= CL_DVFS_I2C_STS; offs += 4)
1578                 seq_printf(s, "[0x%02x] = 0x%08x\n",
1579                            offs, cl_dvfs_readl(cld, offs));
1580
1581         offs = CL_DVFS_INTR_STS;
1582         seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
1583         offs = CL_DVFS_INTR_EN;
1584         seq_printf(s, "[0x%02x] = 0x%08x\n", offs, cl_dvfs_readl(cld, offs));
1585
1586         seq_printf(s, "\nLUT:\n");
1587         for (offs = CL_DVFS_OUTPUT_LUT;
1588              offs < CL_DVFS_OUTPUT_LUT + 4 * MAX_CL_DVFS_VOLTAGES;
1589              offs += 4)
1590                 seq_printf(s, "[0x%02x] = 0x%08x\n",
1591                            offs, cl_dvfs_readl(cld, offs));
1592
1593         clk_disable(cld->soc_clk);
1594         return 0;
1595 }
1596
1597 static int cl_register_open(struct inode *inode, struct file *file)
1598 {
1599         return single_open(file, cl_register_show, inode->i_private);
1600 }
1601
1602 static ssize_t cl_register_write(struct file *file,
1603         const char __user *userbuf, size_t count, loff_t *ppos)
1604 {
1605         char buf[80];
1606         u32 offs;
1607         u32 val;
1608         struct clk *c = file->f_path.dentry->d_inode->i_private;
1609         struct tegra_cl_dvfs *cld = c->u.dfll.cl_dvfs;
1610
1611         if (sizeof(buf) <= count)
1612                 return -EINVAL;
1613
1614         if (copy_from_user(buf, userbuf, count))
1615                 return -EFAULT;
1616
1617         /* terminate buffer and trim - white spaces may be appended
1618          *  at the end when invoked from shell command line */
1619         buf[count] = '\0';
1620         strim(buf);
1621
1622         if (sscanf(buf, "[0x%x] = 0x%x", &offs, &val) != 2)
1623                 return -1;
1624
1625         clk_enable(cld->soc_clk);
1626         cl_dvfs_writel(cld, val, offs & (~0x3));
1627         clk_disable(cld->soc_clk);
1628         return count;
1629 }
1630
1631 static const struct file_operations cl_register_fops = {
1632         .open           = cl_register_open,
1633         .read           = seq_read,
1634         .write          = cl_register_write,
1635         .llseek         = seq_lseek,
1636         .release        = single_release,
1637 };
1638
1639 int __init tegra_cl_dvfs_debug_init(struct clk *dfll_clk)
1640 {
1641         struct dentry *cl_dvfs_dentry;
1642
1643         if (!dfll_clk || !dfll_clk->dent || (dfll_clk->state == UNINITIALIZED))
1644                 return 0;
1645
1646         if (!debugfs_create_file("lock", S_IRUGO | S_IWUSR,
1647                 dfll_clk->dent, dfll_clk, &lock_fops))
1648                 goto err_out;
1649
1650         cl_dvfs_dentry = debugfs_create_dir("cl_dvfs", dfll_clk->dent);
1651         if (!cl_dvfs_dentry)
1652                 goto err_out;
1653
1654         if (!debugfs_create_file("monitor", S_IRUGO,
1655                 cl_dvfs_dentry, dfll_clk, &monitor_fops))
1656                 goto err_out;
1657
1658         if (!debugfs_create_file("vmin_mv", S_IRUGO,
1659                 cl_dvfs_dentry, dfll_clk, &vmin_fops))
1660                 goto err_out;
1661
1662         if (!debugfs_create_file("tune_high_mv", S_IRUGO | S_IWUSR,
1663                 cl_dvfs_dentry, dfll_clk, &tune_high_mv_fops))
1664                 goto err_out;
1665
1666         if (!debugfs_create_file("dvco_min", S_IRUGO,
1667                 cl_dvfs_dentry, dfll_clk, &dvco_rate_min_fops))
1668                 goto err_out;
1669
1670         if (!debugfs_create_file("calibr_delay", S_IRUGO,
1671                 cl_dvfs_dentry, dfll_clk, &calibr_delay_fops))
1672                 goto err_out;
1673
1674         if (!debugfs_create_file("registers", S_IRUGO | S_IWUSR,
1675                 cl_dvfs_dentry, dfll_clk, &cl_register_fops))
1676                 goto err_out;
1677
1678         return 0;
1679
1680 err_out:
1681         debugfs_remove_recursive(dfll_clk->dent);
1682         return -ENOMEM;
1683 }
1684 #endif