dvfs: tegra21: Rename thermal safe maximum frequency
[linux-3.10.git] / drivers / gpu / nvgpu / gm20b / clk_gm20b.c
1 /*
2  * GM20B Clocks
3  *
4  * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/clk.h>
20 #include <linux/delay.h>        /* for mdelay */
21 #include <linux/module.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/clk/tegra.h>
25 #include <linux/tegra-fuse.h>
26
27 #include "gk20a/gk20a.h"
28 #include "hw_trim_gm20b.h"
29 #include "hw_timer_gm20b.h"
30 #include "hw_therm_gm20b.h"
31 #include "hw_fuse_gm20b.h"
32 #include "clk_gm20b.h"
33
34 #define ALLOW_NON_CALIBRATED_NA_MODE    1
35
36 #define gk20a_dbg_clk(fmt, arg...) \
37         gk20a_dbg(gpu_dbg_clk, fmt, ##arg)
38
39 #define DFS_DET_RANGE   6       /* -2^6 ... 2^6-1 */
40 #define SDM_DIN_RANGE   12      /* -2^12 ... 2^12-1 */
41 #define DFS_EXT_CAL_EN  BIT(9)
42 #define DFS_EXT_STROBE  BIT(16)
43
44 #define BOOT_GPU_UV     1000000 /* gpu rail boot voltage 1.0V */
45 #define ADC_SLOPE_UV    10000   /* default ADC detection slope 10mV */
46
47 #define DVFS_SAFE_MARGIN        10      /* 10% */
48 static unsigned long dvfs_safe_max_freq;
49
50 static struct pll_parms gpc_pll_params = {
51         128000,  2600000,       /* freq */
52         1300000, 2600000,       /* vco */
53         12000,   38400,         /* u */
54         1, 255,                 /* M */
55         8, 255,                 /* N */
56         1, 31,                  /* PL */
57         -165230, 214007,        /* DFS_COEFF */
58         0, 0,                   /* ADC char coeff - to be read from fuses */
59         0x7 << 3,               /* vco control in NA mode */
60 };
61
62 #ifdef CONFIG_DEBUG_FS
63 static int clk_gm20b_debugfs_init(struct gk20a *g);
64 #endif
65 static void clk_setup_slide(struct gk20a *g, u32 clk_u);
66
67 #define DUMP_REG(addr_func) \
68 do {                                                                    \
69         addr = trim_sys_##addr_func##_r();                              \
70         data = gk20a_readl(g, addr);                                    \
71         pr_info(#addr_func "[0x%x] = 0x%x\n", addr, data);              \
72 } while (0)
73
74 static void dump_gpc_pll(struct gk20a *g, struct pll *gpll, u32 last_cfg)
75 {
76         u32 addr, data;
77
78         pr_info("**** GPCPLL DUMP ****");
79         pr_info("gpcpll s/w M=%u N=%u P=%u\n", gpll->M, gpll->N, gpll->PL);
80         pr_info("gpcpll_cfg_last = 0x%x\n", last_cfg);
81         DUMP_REG(gpcpll_cfg);
82         DUMP_REG(gpcpll_coeff);
83         DUMP_REG(sel_vco);
84         pr_info("\n");
85 }
86
87 /* 1:1 match between post divider settings and divisor value */
88 static inline u32 pl_to_div(u32 pl)
89 {
90         return pl;
91 }
92
93 static inline u32 div_to_pl(u32 div)
94 {
95         return div;
96 }
97
98 #define PLDIV_GLITCHLESS 1
99
100 #if PLDIV_GLITCHLESS
101 /*
102  * Post divider tarnsition is glitchless only if there is common "1" in binary
103  * representation of old and new settings.
104  */
105 static u32 get_interim_pldiv(u32 old_pl, u32 new_pl)
106 {
107         u32 pl;
108
109         if (old_pl & new_pl)
110                 return 0;
111
112         pl = old_pl | BIT(ffs(new_pl) - 1);     /* pl never 0 */
113         new_pl |= BIT(ffs(old_pl) - 1);
114
115         return min(pl, new_pl);
116 }
117 #endif
118
119 /* Calculate and update M/N/PL as well as pll->freq
120     ref_clk_f = clk_in_f;
121     u_f = ref_clk_f / M;
122     vco_f = u_f * N = ref_clk_f * N / M;
123     PLL output = gpc2clk = target clock frequency = vco_f / pl_to_pdiv(PL);
124     gpcclk = gpc2clk / 2; */
125 static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
126         struct pll_parms *pll_params, u32 *target_freq, bool best_fit)
127 {
128         u32 min_vco_f, max_vco_f;
129         u32 best_M, best_N;
130         u32 low_PL, high_PL, best_PL;
131         u32 m, n, n2;
132         u32 target_vco_f, vco_f;
133         u32 ref_clk_f, target_clk_f, u_f;
134         u32 delta, lwv, best_delta = ~0;
135         u32 pl;
136
137         BUG_ON(target_freq == NULL);
138
139         gk20a_dbg_fn("request target freq %d MHz", *target_freq);
140
141         ref_clk_f = pll->clk_in;
142         target_clk_f = *target_freq;
143         max_vco_f = pll_params->max_vco;
144         min_vco_f = pll_params->min_vco;
145         best_M = pll_params->max_M;
146         best_N = pll_params->min_N;
147         best_PL = pll_params->min_PL;
148
149         target_vco_f = target_clk_f + target_clk_f / 50;
150         if (max_vco_f < target_vco_f)
151                 max_vco_f = target_vco_f;
152
153         /* Set PL search boundaries. */
154         high_PL = div_to_pl((max_vco_f + target_vco_f - 1) / target_vco_f);
155         high_PL = min(high_PL, pll_params->max_PL);
156         high_PL = max(high_PL, pll_params->min_PL);
157
158         low_PL = div_to_pl(min_vco_f / target_vco_f);
159         low_PL = min(low_PL, pll_params->max_PL);
160         low_PL = max(low_PL, pll_params->min_PL);
161
162         gk20a_dbg_info("low_PL %d(div%d), high_PL %d(div%d)",
163                         low_PL, pl_to_div(low_PL), high_PL, pl_to_div(high_PL));
164
165         for (pl = low_PL; pl <= high_PL; pl++) {
166                 target_vco_f = target_clk_f * pl_to_div(pl);
167
168                 for (m = pll_params->min_M; m <= pll_params->max_M; m++) {
169                         u_f = ref_clk_f / m;
170
171                         if (u_f < pll_params->min_u)
172                                 break;
173                         if (u_f > pll_params->max_u)
174                                 continue;
175
176                         n = (target_vco_f * m) / ref_clk_f;
177                         n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
178
179                         if (n > pll_params->max_N)
180                                 break;
181
182                         for (; n <= n2; n++) {
183                                 if (n < pll_params->min_N)
184                                         continue;
185                                 if (n > pll_params->max_N)
186                                         break;
187
188                                 vco_f = ref_clk_f * n / m;
189
190                                 if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
191                                         lwv = (vco_f + (pl_to_div(pl) / 2))
192                                                 / pl_to_div(pl);
193                                         delta = abs(lwv - target_clk_f);
194
195                                         if (delta < best_delta) {
196                                                 best_delta = delta;
197                                                 best_M = m;
198                                                 best_N = n;
199                                                 best_PL = pl;
200
201                                                 if (best_delta == 0 ||
202                                                     /* 0.45% for non best fit */
203                                                     (!best_fit && (vco_f / best_delta > 218))) {
204                                                         goto found_match;
205                                                 }
206
207                                                 gk20a_dbg_info("delta %d @ M %d, N %d, PL %d",
208                                                         delta, m, n, pl);
209                                         }
210                                 }
211                         }
212                 }
213         }
214
215 found_match:
216         BUG_ON(best_delta == ~0);
217
218         if (best_fit && best_delta != 0)
219                 gk20a_dbg_clk("no best match for target @ %dMHz on gpc_pll",
220                         target_clk_f);
221
222         pll->M = best_M;
223         pll->N = best_N;
224         pll->PL = best_PL;
225
226         /* save current frequency */
227         pll->freq = ref_clk_f * pll->N / (pll->M * pl_to_div(pll->PL));
228
229         *target_freq = pll->freq;
230
231         gk20a_dbg_clk("actual target freq %d kHz, M %d, N %d, PL %d(div%d)",
232                 *target_freq, pll->M, pll->N, pll->PL, pl_to_div(pll->PL));
233
234         gk20a_dbg_fn("done");
235
236         return 0;
237 }
238
239 /* GPCPLL NA/DVFS mode methods */
240
241 /*
242  * Read ADC characteristic parmeters from fuses.
243  * Determine clibration settings.
244  */
245 static int clk_config_calibration_params(struct gk20a *g)
246 {
247         int slope, offs;
248         struct pll_parms *p = &gpc_pll_params;
249
250         if (!tegra_fuse_calib_gpcpll_get_adc(&slope, &offs)) {
251                 p->uvdet_slope = slope;
252                 p->uvdet_offs = offs;
253         }
254
255         if (!p->uvdet_slope || !p->uvdet_offs) {
256                 /*
257                  * If ADC conversion slope/offset parameters are not fused
258                  * (non-production config), report error, but allow to use
259                  * boot internal calibration with default slope.
260                  */
261                 gk20a_err(dev_from_gk20a(g), "ADC coeff are not fused\n");
262                 return -EINVAL;
263         }
264         return 0;
265 }
266
267 /*
268  * Determine DFS_COEFF for the requested voltage. Always select external
269  * calibration override equal to the voltage, and set maximum detection
270  * limit "0" (to make sure that PLL output remains under F/V curve when
271  * voltage increases).
272  */
273 static void clk_config_dvfs_detection(int mv, struct na_dvfs *d)
274 {
275         u32 coeff, coeff_max;
276         struct pll_parms *p = &gpc_pll_params;
277
278         coeff_max = trim_sys_gpcpll_dvfs0_dfs_coeff_v(
279                 trim_sys_gpcpll_dvfs0_dfs_coeff_m());
280         coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
281         coeff = DIV_ROUND_CLOSEST(coeff, 1000);
282         coeff = min(coeff, coeff_max);
283         d->dfs_coeff = coeff;
284
285         d->dfs_ext_cal = DIV_ROUND_CLOSEST(mv * 1000 - p->uvdet_offs,
286                                            p->uvdet_slope);
287         BUG_ON(abs(d->dfs_ext_cal) >= (1 << DFS_DET_RANGE));
288         d->uv_cal = p->uvdet_offs + d->dfs_ext_cal * p->uvdet_slope;
289         d->dfs_det_max = 0;
290 }
291
292 /*
293  * Solve equation for integer and fractional part of the effective NDIV:
294  *
295  * n_eff = n_int + 1/2 + SDM_DIN / 2^(SDM_DIN_RANGE + 1) +
296  * DVFS_COEFF * DVFS_DET_DELTA / 2^DFS_DET_RANGE
297  *
298  * The SDM_DIN LSB is finally shifted out, since it is not accessible by s/w.
299  */
300 static void clk_config_dvfs_ndiv(int mv, u32 n_eff, struct na_dvfs *d)
301 {
302         int n, det_delta;
303         u32 rem, rem_range;
304         struct pll_parms *p = &gpc_pll_params;
305
306         det_delta = DIV_ROUND_CLOSEST(mv * 1000 - p->uvdet_offs,
307                                       p->uvdet_slope);
308         det_delta -= d->dfs_ext_cal;
309         det_delta = min(det_delta, d->dfs_det_max);
310         det_delta = det_delta * d->dfs_coeff;
311
312         n = (int)(n_eff << DFS_DET_RANGE) - det_delta;
313         BUG_ON((n < 0) || (n > (p->max_N << DFS_DET_RANGE)));
314         d->n_int = ((u32)n) >> DFS_DET_RANGE;
315
316         rem = ((u32)n) & ((1 << DFS_DET_RANGE) - 1);
317         rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
318         d->sdm_din = (rem << rem_range) - (1 << SDM_DIN_RANGE);
319         d->sdm_din = (d->sdm_din >> BITS_PER_BYTE) & 0xff;
320 }
321
322 /* Voltage dependent configuration */
323 static void clk_config_dvfs(struct gk20a *g, struct pll *gpll)
324 {
325         struct na_dvfs *d = &gpll->dvfs;
326
327         d->mv = tegra_dvfs_predict_millivolts_t(
328                         clk_get_parent(g->clk.tegra_clk),
329                         rate_gpc2clk_to_gpu(gpll->freq));
330         clk_config_dvfs_detection(d->mv, d);
331         clk_config_dvfs_ndiv(d->mv, gpll->N, d);
332 }
333
334 /* Update DVFS detection settings in flight */
335 static void clk_set_dfs_coeff(struct gk20a *g, u32 dfs_coeff)
336 {
337         u32 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
338         data |= DFS_EXT_STROBE;
339         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
340
341         data = gk20a_readl(g, trim_sys_gpcpll_dvfs0_r());
342         data = set_field(data, trim_sys_gpcpll_dvfs0_dfs_coeff_m(),
343                 trim_sys_gpcpll_dvfs0_dfs_coeff_f(dfs_coeff));
344         gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data);
345
346         data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
347         udelay(1);
348         data &= ~DFS_EXT_STROBE;
349         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
350 }
351
352 static void __maybe_unused clk_set_dfs_det_max(struct gk20a *g, u32 dfs_det_max)
353 {
354         u32 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
355         data |= DFS_EXT_STROBE;
356         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
357
358         data = gk20a_readl(g, trim_sys_gpcpll_dvfs0_r());
359         data = set_field(data, trim_sys_gpcpll_dvfs0_dfs_det_max_m(),
360                 trim_sys_gpcpll_dvfs0_dfs_det_max_f(dfs_det_max));
361         gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data);
362
363         data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
364         udelay(1);
365         data &= ~DFS_EXT_STROBE;
366         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
367 }
368
369 static void clk_set_dfs_ext_cal(struct gk20a *g, u32 dfs_det_cal)
370 {
371         u32 data;
372
373         data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
374         data &= ~(BIT(DFS_DET_RANGE + 1) - 1);
375         data |= dfs_det_cal;
376         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
377
378         data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
379         udelay(1);
380         if (~trim_sys_gpcpll_dvfs1_dfs_ctrl_v(data) & DFS_EXT_CAL_EN) {
381                 data = set_field(data, trim_sys_gpcpll_dvfs1_dfs_ctrl_m(),
382                         trim_sys_gpcpll_dvfs1_dfs_ctrl_f(DFS_EXT_CAL_EN));
383                 gk20a_writel(g, trim_sys_gpcpll_dvfs1_r(), data);
384         }
385 }
386
387 static void clk_setup_dvfs_detection(struct gk20a *g, struct pll *gpll)
388 {
389         struct na_dvfs *d = &gpll->dvfs;
390
391         u32 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
392         data |= DFS_EXT_STROBE;
393         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
394
395         data = gk20a_readl(g, trim_sys_gpcpll_dvfs0_r());
396         data = set_field(data, trim_sys_gpcpll_dvfs0_dfs_coeff_m(),
397                 trim_sys_gpcpll_dvfs0_dfs_coeff_f(d->dfs_coeff));
398         data = set_field(data, trim_sys_gpcpll_dvfs0_dfs_det_max_m(),
399                 trim_sys_gpcpll_dvfs0_dfs_det_max_f(d->dfs_det_max));
400         gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data);
401
402         data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
403         udelay(1);
404         data &= ~DFS_EXT_STROBE;
405         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
406
407         clk_set_dfs_ext_cal(g, d->dfs_ext_cal);
408 }
409
410 /* Enable NA/DVFS mode */
411 static int clk_enbale_pll_dvfs(struct gk20a *g)
412 {
413         u32 data;
414         int delay = 5;  /* use for iddq exit delay & calib timeout */
415         struct pll_parms *p = &gpc_pll_params;
416         bool calibrated = p->uvdet_slope && p->uvdet_offs;
417
418         /* Enable NA DVFS */
419         data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
420         data |= trim_sys_gpcpll_dvfs1_en_dfs_m();
421         gk20a_writel(g, trim_sys_gpcpll_dvfs1_r(), data);
422
423         /* Set VCO_CTRL */
424         if (p->vco_ctrl) {
425                 data = gk20a_readl(g, trim_sys_gpcpll_cfg3_r());
426                 data = set_field(data, trim_sys_gpcpll_cfg3_vco_ctrl_m(),
427                                  trim_sys_gpcpll_cfg3_vco_ctrl_f(p->vco_ctrl));
428                 gk20a_writel(g, trim_sys_gpcpll_cfg3_r(), data);
429         }
430
431         /*
432          * If calibration parameters are known (either from fuses, or from
433          * internal calibration on boot) - use them. Internal calibration is
434          * started anyway; it will complete, but results will not be used.
435          */
436         if (calibrated) {
437                 data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
438                 data |= trim_sys_gpcpll_dvfs1_en_dfs_cal_m();
439                 gk20a_writel(g, trim_sys_gpcpll_dvfs1_r(), data);
440         }
441
442         /* Exit IDDQ mode */
443         data = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
444         data = set_field(data, trim_sys_gpcpll_cfg_iddq_m(),
445                          trim_sys_gpcpll_cfg_iddq_power_on_v());
446         gk20a_writel(g, trim_sys_gpcpll_cfg_r(), data);
447         gk20a_readl(g, trim_sys_gpcpll_cfg_r());
448         udelay(delay);
449
450         /*
451          * Dynamic ramp setup based on update rate, which in DVFS mode on GM20b
452          * is always 38.4 MHz, the same as reference clock rate.
453          */
454         clk_setup_slide(g, g->clk.gpc_pll.clk_in);
455
456         if (calibrated)
457                 return 0;
458
459         /*
460          * If calibration parameters are not fused, start internal calibration,
461          * wait for completion, and use results along with default slope to
462          * calculate ADC offset during boot.
463          */
464         data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
465         data |= trim_sys_gpcpll_dvfs1_en_dfs_cal_m();
466         gk20a_writel(g, trim_sys_gpcpll_dvfs1_r(), data);
467
468         /* Wait for internal calibration done (spec < 2us). */
469         do {
470                 data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
471                 if (trim_sys_gpcpll_dvfs1_dfs_cal_done_v(data))
472                         break;
473                 udelay(1);
474                 delay--;
475         } while (delay > 0);
476
477         if (delay <= 0) {
478                 gk20a_err(dev_from_gk20a(g), "GPCPLL calibration timeout");
479                 return -ETIMEDOUT;
480         }
481
482         data = gk20a_readl(g, trim_sys_gpcpll_cfg3_r());
483         data = trim_sys_gpcpll_cfg3_dfs_testout_v(data);
484         p->uvdet_offs = BOOT_GPU_UV - data * ADC_SLOPE_UV;
485         p->uvdet_slope = ADC_SLOPE_UV;
486         return 0;
487 }
488
489 /* GPCPLL slide methods */
490 static void clk_setup_slide(struct gk20a *g, u32 clk_u)
491 {
492         u32 data, step_a, step_b;
493
494         switch (clk_u) {
495         case 12000:
496         case 12800:
497         case 13000:                     /* only on FPGA */
498                 step_a = 0x2B;
499                 step_b = 0x0B;
500                 break;
501         case 19200:
502                 step_a = 0x12;
503                 step_b = 0x08;
504                 break;
505         case 38400:
506                 step_a = 0x04;
507                 step_b = 0x05;
508                 break;
509         default:
510                 gk20a_err(dev_from_gk20a(g), "Unexpected reference rate %u kHz",
511                           clk_u);
512                 BUG();
513         }
514
515         /* setup */
516         data = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
517         data = set_field(data, trim_sys_gpcpll_cfg2_pll_stepa_m(),
518                         trim_sys_gpcpll_cfg2_pll_stepa_f(step_a));
519         gk20a_writel(g, trim_sys_gpcpll_cfg2_r(), data);
520         data = gk20a_readl(g, trim_sys_gpcpll_cfg3_r());
521         data = set_field(data, trim_sys_gpcpll_cfg3_pll_stepb_m(),
522                         trim_sys_gpcpll_cfg3_pll_stepb_f(step_b));
523         gk20a_writel(g, trim_sys_gpcpll_cfg3_r(), data);
524 }
525
526 static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
527 {
528         u32 data, coeff;
529         u32 nold, sdm_old;
530         int ramp_timeout = 500;
531
532         /* get old coefficients */
533         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
534         nold = trim_sys_gpcpll_coeff_ndiv_v(coeff);
535
536         /* do nothing if NDIV is same */
537         if (gpll->mode == GPC_PLL_MODE_DVFS) {
538                 /* in DVFS mode check both integer and fraction */
539                 coeff = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
540                 sdm_old = trim_sys_gpcpll_cfg2_sdm_din_v(coeff);
541                 if ((gpll->dvfs.n_int == nold) &&
542                     (gpll->dvfs.sdm_din == sdm_old))
543                         return 0;
544         } else {
545                 if (gpll->N == nold)
546                         return 0;
547
548                 /* dynamic ramp setup based on update rate */
549                 clk_setup_slide(g, gpll->clk_in / gpll->M);
550         }
551
552         /* pll slowdown mode */
553         data = gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
554         data = set_field(data,
555                         trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_m(),
556                         trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_yes_f());
557         gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data);
558
559         /* new ndiv ready for ramp */
560         if (gpll->mode == GPC_PLL_MODE_DVFS) {
561                 /* in DVFS mode SDM is updated via "new" field */
562                 coeff = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
563                 coeff = set_field(coeff, trim_sys_gpcpll_cfg2_sdm_din_new_m(),
564                         trim_sys_gpcpll_cfg2_sdm_din_new_f(gpll->dvfs.sdm_din));
565                 gk20a_writel(g, trim_sys_gpcpll_cfg2_r(), coeff);
566
567                 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
568                 coeff = set_field(coeff, trim_sys_gpcpll_coeff_ndiv_m(),
569                         trim_sys_gpcpll_coeff_ndiv_f(gpll->dvfs.n_int));
570                 udelay(1);
571                 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
572         } else {
573                 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
574                 coeff = set_field(coeff, trim_sys_gpcpll_coeff_ndiv_m(),
575                                 trim_sys_gpcpll_coeff_ndiv_f(gpll->N));
576                 udelay(1);
577                 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
578         }
579
580         /* dynamic ramp to new ndiv */
581         data = gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
582         data = set_field(data,
583                         trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(),
584                         trim_sys_gpcpll_ndiv_slowdown_en_dynramp_yes_f());
585         udelay(1);
586         gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data);
587
588         do {
589                 udelay(1);
590                 ramp_timeout--;
591                 data = gk20a_readl(
592                         g, trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_r());
593                 if (trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_pll_dynramp_done_synced_v(data))
594                         break;
595         } while (ramp_timeout > 0);
596
597         if ((gpll->mode == GPC_PLL_MODE_DVFS) && (ramp_timeout > 0)) {
598                 /* in DVFS mode complete SDM update */
599                 coeff = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
600                 coeff = set_field(coeff, trim_sys_gpcpll_cfg2_sdm_din_m(),
601                         trim_sys_gpcpll_cfg2_sdm_din_f(gpll->dvfs.sdm_din));
602                 gk20a_writel(g, trim_sys_gpcpll_cfg2_r(), coeff);
603         }
604
605         /* exit slowdown mode */
606         data = gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
607         data = set_field(data,
608                         trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_m(),
609                         trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_no_f());
610         data = set_field(data,
611                         trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(),
612                         trim_sys_gpcpll_ndiv_slowdown_en_dynramp_no_f());
613         gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data);
614         gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
615
616         if (ramp_timeout <= 0) {
617                 gk20a_err(dev_from_gk20a(g), "gpcpll dynamic ramp timeout");
618                 return -ETIMEDOUT;
619         }
620         return 0;
621 }
622
623 /* GPCPLL bypass methods */
624 static int clk_change_pldiv_under_bypass(struct gk20a *g, struct pll *gpll)
625 {
626         u32 data, coeff;
627
628         /* put PLL in bypass before programming it */
629         data = gk20a_readl(g, trim_sys_sel_vco_r());
630         data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
631                 trim_sys_sel_vco_gpc2clk_out_bypass_f());
632         gk20a_writel(g, trim_sys_sel_vco_r(), data);
633
634         /* change PLDIV */
635         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
636         udelay(1);
637         coeff = set_field(coeff, trim_sys_gpcpll_coeff_pldiv_m(),
638                           trim_sys_gpcpll_coeff_pldiv_f(gpll->PL));
639         gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
640
641         /* put PLL back on vco */
642         data = gk20a_readl(g, trim_sys_sel_vco_r());
643         udelay(1);
644         data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
645                 trim_sys_sel_vco_gpc2clk_out_vco_f());
646         gk20a_writel(g, trim_sys_sel_vco_r(), data);
647
648         return 0;
649 }
650
651 static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
652 {
653         u32 data, cfg, coeff, timeout;
654
655         /* put PLL in bypass before programming it */
656         data = gk20a_readl(g, trim_sys_sel_vco_r());
657         data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
658                 trim_sys_sel_vco_gpc2clk_out_bypass_f());
659         gk20a_writel(g, trim_sys_sel_vco_r(), data);
660
661         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
662         udelay(1);
663         if (trim_sys_gpcpll_cfg_iddq_v(cfg)) {
664                 /* get out from IDDQ (1st power up) */
665                 cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(),
666                                 trim_sys_gpcpll_cfg_iddq_power_on_v());
667                 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
668                 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
669                 udelay(5);
670         } else {
671                 /* clear SYNC_MODE before disabling PLL */
672                 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
673                                 trim_sys_gpcpll_cfg_sync_mode_disable_f());
674                 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
675                 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
676
677                 /* disable running PLL before changing coefficients */
678                 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
679                                 trim_sys_gpcpll_cfg_enable_no_f());
680                 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
681                 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
682         }
683
684         /* change coefficients */
685         if (gpll->mode == GPC_PLL_MODE_DVFS) {
686                 clk_setup_dvfs_detection(g, gpll);
687
688                 coeff = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
689                 coeff = set_field(coeff, trim_sys_gpcpll_cfg2_sdm_din_m(),
690                         trim_sys_gpcpll_cfg2_sdm_din_f(gpll->dvfs.sdm_din));
691                 gk20a_writel(g, trim_sys_gpcpll_cfg2_r(), coeff);
692
693                 coeff = trim_sys_gpcpll_coeff_mdiv_f(gpll->M) |
694                         trim_sys_gpcpll_coeff_ndiv_f(gpll->dvfs.n_int) |
695                         trim_sys_gpcpll_coeff_pldiv_f(gpll->PL);
696                 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
697         } else {
698                 coeff = trim_sys_gpcpll_coeff_mdiv_f(gpll->M) |
699                         trim_sys_gpcpll_coeff_ndiv_f(gpll->N) |
700                         trim_sys_gpcpll_coeff_pldiv_f(gpll->PL);
701                 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
702         }
703
704         /* enable PLL after changing coefficients */
705         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
706         cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
707                         trim_sys_gpcpll_cfg_enable_yes_f());
708         gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
709
710         /* just delay in DVFS mode (lock cannot be used) */
711         if (gpll->mode == GPC_PLL_MODE_DVFS) {
712                 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
713                 udelay(g->clk.na_pll_delay);
714                 goto pll_locked;
715         }
716
717         /* lock pll */
718         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
719         if (cfg & trim_sys_gpcpll_cfg_enb_lckdet_power_off_f()){
720                 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enb_lckdet_m(),
721                         trim_sys_gpcpll_cfg_enb_lckdet_power_on_f());
722                 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
723                 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
724         }
725
726         /* wait pll lock */
727         timeout = g->clk.pll_delay + 1;
728         do {
729                 udelay(1);
730                 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
731                 if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f())
732                         goto pll_locked;
733         } while (--timeout > 0);
734
735         /* PLL is messed up. What can we do here? */
736         dump_gpc_pll(g, gpll, cfg);
737         BUG();
738         return -EBUSY;
739
740 pll_locked:
741         gk20a_dbg_clk("locked config_pll under bypass r=0x%x v=0x%x",
742                 trim_sys_gpcpll_cfg_r(), cfg);
743
744         /* set SYNC_MODE for glitchless switch out of bypass */
745         cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
746                         trim_sys_gpcpll_cfg_sync_mode_enable_f());
747         gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
748         gk20a_readl(g, trim_sys_gpcpll_cfg_r());
749
750         /* put PLL back on vco */
751         data = gk20a_readl(g, trim_sys_sel_vco_r());
752         data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
753                 trim_sys_sel_vco_gpc2clk_out_vco_f());
754         gk20a_writel(g, trim_sys_sel_vco_r(), data);
755
756         return 0;
757 }
758
759 /*
760  *  Change GPCPLL frequency:
761  *  - in legacy (non-DVFS) mode
762  *  - in DVFS mode at constant DVFS detection settings, matching current/lower
763  *    voltage; the same procedure can be used in this case, since maximum DVFS
764  *    detection limit makes sure that PLL output remains under F/V curve when
765  *    voltage increases arbitrary.
766  */
767 static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
768                         int allow_slide)
769 {
770         u32 cfg, coeff, data;
771         bool can_slide, pldiv_only;
772         struct pll gpll;
773
774         gk20a_dbg_fn("");
775
776         if (!tegra_platform_is_silicon())
777                 return 0;
778
779         /* get old coefficients */
780         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
781         gpll.M = trim_sys_gpcpll_coeff_mdiv_v(coeff);
782         gpll.N = trim_sys_gpcpll_coeff_ndiv_v(coeff);
783         gpll.PL = trim_sys_gpcpll_coeff_pldiv_v(coeff);
784         gpll.clk_in = gpll_new->clk_in;
785
786         /* combine target dvfs with old coefficients */
787         gpll.dvfs = gpll_new->dvfs;
788         gpll.mode = gpll_new->mode;
789
790         /* do NDIV slide if there is no change in M and PL */
791         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
792         can_slide = allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg);
793
794         if (can_slide && (gpll_new->M == gpll.M) && (gpll_new->PL == gpll.PL))
795                 return clk_slide_gpc_pll(g, gpll_new);
796
797         /* slide down to NDIV_LO */
798         if (can_slide) {
799                 int ret;
800                 gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco,
801                                       gpll.clk_in);
802                 if (gpll.mode == GPC_PLL_MODE_DVFS)
803                         clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
804                 ret = clk_slide_gpc_pll(g, &gpll);
805                 if (ret)
806                         return ret;
807         }
808         pldiv_only = can_slide && (gpll_new->M == gpll.M);
809
810         /*
811          *  Split FO-to-bypass jump in halfs by setting out divider 1:2.
812          *  (needed even if PLDIV_GLITCHLESS is set, since 1:1 <=> 1:2 direct
813          *  transition is not really glitch-less - see get_interim_pldiv
814          *  function header).
815          */
816         if ((gpll_new->PL < 2) || (gpll.PL < 2)) {
817                 data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
818                 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(),
819                         trim_sys_gpc2clk_out_vcodiv_f(2));
820                 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
821                 /* Intentional 2nd write to assure linear divider operation */
822                 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
823                 gk20a_readl(g, trim_sys_gpc2clk_out_r());
824                 udelay(2);
825         }
826
827 #if PLDIV_GLITCHLESS
828         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
829         if (pldiv_only) {
830                 /* Insert interim PLDIV state if necessary */
831                 u32 interim_pl = get_interim_pldiv(gpll_new->PL, gpll.PL);
832                 if (interim_pl) {
833                         coeff = set_field(coeff,
834                                 trim_sys_gpcpll_coeff_pldiv_m(),
835                                 trim_sys_gpcpll_coeff_pldiv_f(interim_pl));
836                         gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
837                         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
838                 }
839                 goto set_pldiv; /* path A: no need to bypass */
840         }
841
842         /* path B: bypass if either M changes or PLL is disabled */
843 #endif
844         /*
845          * Program and lock pll under bypass. On exit PLL is out of bypass,
846          * enabled, and locked. VCO is at vco_min if sliding is allowed.
847          * Otherwise it is at VCO target (and therefore last slide call below
848          * is effectively NOP). PL is set to target. Output divider is engaged
849          * at 1:2 if either entry, or exit PL setting is 1:1.
850          */
851         gpll = *gpll_new;
852         if (allow_slide) {
853                 gpll.N = DIV_ROUND_UP(gpll_new->M * gpc_pll_params.min_vco,
854                                       gpll_new->clk_in);
855                 if (gpll.mode == GPC_PLL_MODE_DVFS)
856                         clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
857         }
858         if (pldiv_only)
859                 clk_change_pldiv_under_bypass(g, &gpll);
860         else
861                 clk_lock_gpc_pll_under_bypass(g, &gpll);
862
863 #if PLDIV_GLITCHLESS
864         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
865
866 set_pldiv:
867         /* coeff must be current from either path A or B */
868         if (trim_sys_gpcpll_coeff_pldiv_v(coeff) != gpll_new->PL) {
869                 coeff = set_field(coeff, trim_sys_gpcpll_coeff_pldiv_m(),
870                         trim_sys_gpcpll_coeff_pldiv_f(gpll_new->PL));
871                 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
872         }
873 #endif
874         /* restore out divider 1:1 */
875         data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
876         if ((data & trim_sys_gpc2clk_out_vcodiv_m()) !=
877             trim_sys_gpc2clk_out_vcodiv_by1_f()) {
878                 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(),
879                                  trim_sys_gpc2clk_out_vcodiv_by1_f());
880                 udelay(2);
881                 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
882                 /* Intentional 2nd write to assure linear divider operation */
883                 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
884                 gk20a_readl(g, trim_sys_gpc2clk_out_r());
885         }
886
887         /* slide up to target NDIV */
888         return clk_slide_gpc_pll(g, gpll_new);
889 }
890
891 /* Find GPCPLL config safe at DVFS coefficient = 0, matching target frequency */
892 static void clk_config_pll_safe_dvfs(struct gk20a *g, struct pll *gpll)
893 {
894         u32 nsafe, nmin;
895
896         if (gpll->freq > dvfs_safe_max_freq)
897                 gpll->freq = gpll->freq * (100 - DVFS_SAFE_MARGIN) / 100;
898
899         nmin = DIV_ROUND_UP(gpll->M * gpc_pll_params.min_vco, gpll->clk_in);
900         nsafe = gpll->M * gpll->freq / gpll->clk_in;
901
902         /*
903          * If safe frequency is above VCOmin, it can be used in safe PLL config
904          * as is. Since safe frequency is below both old and new frequencies,
905          * in this case all three configurations have same post divider 1:1, and
906          * direct old=>safe=>new n-sliding will be used for transitions.
907          *
908          * Otherwise, if safe frequency is below VCO min, post-divider in safe
909          * configuration (and possibly in old and/or new configurations) is
910          * above 1:1, and each old=>safe and safe=>new transitions includes
911          * sliding to/from VCOmin, as well as divider changes. To avoid extra
912          * dynamic ramps from VCOmin during old=>safe transition and to VCOmin
913          * during safe=>new transition, select nmin as safe NDIV, and set safe
914          * post divider to assure PLL output is below safe frequency
915          */
916         if (nsafe < nmin) {
917                 gpll->PL = DIV_ROUND_UP(nmin * gpll->clk_in,
918                                         gpll->M * gpll->freq);
919                 nsafe = nmin;
920         }
921         gpll->N = nsafe;
922         clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs);
923
924         gk20a_dbg_clk("safe freq %d kHz, M %d, N %d, PL %d(div%d)",
925                 gpll->freq, gpll->M, gpll->N, gpll->PL, pl_to_div(gpll->PL));
926 }
927
928 /* Change GPCPLL frequency and DVFS detection settings in DVFS mode */
929 static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new,
930                                   int allow_slide)
931 {
932         int ret;
933         struct pll gpll_safe;
934         struct pll *gpll_old = &g->clk.gpc_pll_last;
935
936         BUG_ON(gpll_new->M != 1);       /* the only MDIV in NA mode  */
937         clk_config_dvfs(g, gpll_new);
938
939         /*
940          * In cases below no intermediate steps in PLL DVFS configuration are
941          * necessary because either
942          * - PLL DVFS will be configured under bypass directly to target, or
943          * - voltage is not changing, so DVFS detection settings are the same
944          */
945         if (!allow_slide || !gpll_new->enabled ||
946             (gpll_old->dvfs.mv == gpll_new->dvfs.mv))
947                 return clk_program_gpc_pll(g, gpll_new, allow_slide);
948
949         /*
950          * Interim step for changing DVFS detection settings: low enough
951          * frequency to be safe at at DVFS coeff = 0.
952          *
953          * 1. If voltage is increasing:
954          * - safe frequency target matches the lowest - old - frequency
955          * - DVFS settings are still old
956          * - Voltage already increased to new level by tegra DVFS, but maximum
957          *    detection limit assures PLL output remains under F/V curve
958          *
959          * 2. If voltage is decreasing:
960          * - safe frequency target matches the lowest - new - frequency
961          * - DVFS settings are still old
962          * - Voltage is also old, it will be lowered by tegra DVFS afterwards
963          *
964          * Interim step can be skipped if old frequency is below safe minimum,
965          * i.e., it is low enough to be safe at any voltage in operating range
966          * with zero DVFS coefficient.
967          */
968         if (gpll_old->freq > dvfs_safe_max_freq) {
969                 if (gpll_old->dvfs.mv < gpll_new->dvfs.mv) {
970                         gpll_safe = *gpll_old;
971                         gpll_safe.dvfs.mv = gpll_new->dvfs.mv;
972                 } else {
973                         gpll_safe = *gpll_new;
974                         gpll_safe.dvfs = gpll_old->dvfs;
975                 }
976                 clk_config_pll_safe_dvfs(g, &gpll_safe);
977
978                 ret = clk_program_gpc_pll(g, &gpll_safe, 1);
979                 if (ret) {
980                         gk20a_err(dev_from_gk20a(g), "Safe dvfs program fail\n");
981                         return ret;
982                 }
983         }
984
985         /*
986          * DVFS detection settings transition:
987          * - Set DVFS coefficient zero (safe, since already at frequency safe
988          *   at DVFS coeff = 0 for the lowest of the old/new end-points)
989          * - Set calibration level to new voltage (safe, since DVFS coeff = 0)
990          * - Set DVFS coefficient to match new voltage (safe, since already at
991          *   frequency safe at DVFS coeff = 0 for the lowest of the old/new
992          *   end-points.
993          */
994         clk_set_dfs_coeff(g, 0);
995         clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal);
996         clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff);
997
998         /* Finally set target rate (with DVFS detection settings already new) */
999         return clk_program_gpc_pll(g, gpll_new, 1);
1000 }
1001
1002 static int clk_disable_gpcpll(struct gk20a *g, int allow_slide)
1003 {
1004         u32 cfg, coeff;
1005         struct clk_gk20a *clk = &g->clk;
1006         struct pll gpll = clk->gpc_pll;
1007
1008         /* slide to VCO min */
1009         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1010         if (allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg)) {
1011                 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
1012                 gpll.M = trim_sys_gpcpll_coeff_mdiv_v(coeff);
1013                 gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco,
1014                                       gpll.clk_in);
1015                 if (gpll.mode == GPC_PLL_MODE_DVFS)
1016                         clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
1017                 clk_slide_gpc_pll(g, &gpll);
1018         }
1019
1020         /* put PLL in bypass before disabling it */
1021         cfg = gk20a_readl(g, trim_sys_sel_vco_r());
1022         cfg = set_field(cfg, trim_sys_sel_vco_gpc2clk_out_m(),
1023                         trim_sys_sel_vco_gpc2clk_out_bypass_f());
1024         gk20a_writel(g, trim_sys_sel_vco_r(), cfg);
1025
1026         /* clear SYNC_MODE before disabling PLL */
1027         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1028         cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
1029                         trim_sys_gpcpll_cfg_sync_mode_disable_f());
1030         gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
1031
1032         /* disable PLL */
1033         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1034         cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
1035                         trim_sys_gpcpll_cfg_enable_no_f());
1036         gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
1037         gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1038
1039         clk->gpc_pll.enabled = false;
1040         clk->gpc_pll_last.enabled = false;
1041         return 0;
1042 }
1043
1044 static int gm20b_init_clk_reset_enable_hw(struct gk20a *g)
1045 {
1046         gk20a_dbg_fn("");
1047         return 0;
1048 }
1049
1050 struct clk *gm20b_clk_get(struct gk20a *g)
1051 {
1052         if (!g->clk.tegra_clk) {
1053                 struct clk *clk;
1054
1055                 clk = clk_get_sys("tegra_gk20a", "gpu");
1056                 if (IS_ERR(clk)) {
1057                         gk20a_err(dev_from_gk20a(g),
1058                                 "fail to get tegra gpu clk tegra_gk20a/gpu");
1059                         return NULL;
1060                 }
1061                 g->clk.tegra_clk = clk;
1062         }
1063
1064         return g->clk.tegra_clk;
1065 }
1066
1067 static int gm20b_init_clk_setup_sw(struct gk20a *g)
1068 {
1069         struct clk_gk20a *clk = &g->clk;
1070         unsigned long safe_rate;
1071         struct clk *ref;
1072         bool calibrated;
1073
1074         gk20a_dbg_fn("");
1075
1076         if (clk->sw_ready) {
1077                 gk20a_dbg_fn("skip init");
1078                 return 0;
1079         }
1080
1081         if (!gk20a_clk_get(g))
1082                 return -EINVAL;
1083
1084         /*
1085          * On Tegra GPU clock exposed to frequency governor is a shared user on
1086          * GPCPLL bus (gbus). The latter can be accessed as GPU clock parent.
1087          * Respectively the grandparent is PLL reference clock.
1088          */
1089         ref = clk_get_parent(clk_get_parent(clk->tegra_clk));
1090         if (IS_ERR(ref)) {
1091                 gk20a_err(dev_from_gk20a(g),
1092                         "failed to get GPCPLL reference clock");
1093                 return -EINVAL;
1094         }
1095
1096         /*
1097          * Locking time in both legacy and DVFS mode is 40us. However, in legacy
1098          * mode we rely on lock detection signal, and delay is just timeout
1099          * limit, so we can afford set it longer. In DVFS mode each lock inserts
1100          * specified delay, so it should be set as short as h/w allows.
1101          */
1102         clk->pll_delay = 300; /* usec */
1103         clk->na_pll_delay = 40; /* usec*/
1104
1105         clk->gpc_pll.id = GK20A_GPC_PLL;
1106         clk->gpc_pll.clk_in = clk_get_rate(ref) / KHZ;
1107
1108         safe_rate = tegra_dvfs_get_fmax_at_vmin_safe_t(
1109                 clk_get_parent(clk->tegra_clk));
1110         safe_rate = safe_rate * (100 - DVFS_SAFE_MARGIN) / 100;
1111         dvfs_safe_max_freq = rate_gpu_to_gpc2clk(safe_rate);
1112         clk->gpc_pll.PL = DIV_ROUND_UP(gpc_pll_params.min_vco,
1113                                        dvfs_safe_max_freq);
1114
1115         /* Initial frequency: 1/3 VCO min (low enough to be safe at Vmin) */
1116         clk->gpc_pll.M = 1;
1117         clk->gpc_pll.N = DIV_ROUND_UP(gpc_pll_params.min_vco,
1118                                 clk->gpc_pll.clk_in);
1119         clk->gpc_pll.PL = max(clk->gpc_pll.PL, 3U);
1120         clk->gpc_pll.freq = clk->gpc_pll.clk_in * clk->gpc_pll.N;
1121         clk->gpc_pll.freq /= pl_to_div(clk->gpc_pll.PL);
1122
1123         calibrated = !clk_config_calibration_params(g);
1124 #ifdef CONFIG_TEGRA_USE_NA_GPCPLL
1125         if (ALLOW_NON_CALIBRATED_NA_MODE || calibrated) {
1126                 /* NA mode is supported only at max update rate 38.4 MHz */
1127                 if (clk->gpc_pll.clk_in == gpc_pll_params.max_u) {
1128                         clk->gpc_pll.mode = GPC_PLL_MODE_DVFS;
1129                         gpc_pll_params.min_u = gpc_pll_params.max_u;
1130                 }
1131         }
1132 #endif
1133
1134         mutex_init(&clk->clk_mutex);
1135
1136         clk->sw_ready = true;
1137
1138         gk20a_dbg_fn("done");
1139         pr_info("GM20b GPCPLL initial settings:%s M=%u, N=%u, P=%u\n",
1140                 clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "",
1141                 clk->gpc_pll.M, clk->gpc_pll.N, clk->gpc_pll.PL);
1142         return 0;
1143 }
1144
1145 static int gm20b_init_clk_setup_hw(struct gk20a *g)
1146 {
1147         u32 data;
1148
1149         gk20a_dbg_fn("");
1150
1151         /* LDIV: Div4 mode (required); both  bypass and vco ratios 1:1 */
1152         data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
1153         data = set_field(data,
1154                         trim_sys_gpc2clk_out_sdiv14_m() |
1155                         trim_sys_gpc2clk_out_vcodiv_m() |
1156                         trim_sys_gpc2clk_out_bypdiv_m(),
1157                         trim_sys_gpc2clk_out_sdiv14_indiv4_mode_f() |
1158                         trim_sys_gpc2clk_out_vcodiv_by1_f() |
1159                         trim_sys_gpc2clk_out_bypdiv_f(0));
1160         gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
1161
1162         /*
1163          * Clear global bypass control; PLL is still under bypass, since SEL_VCO
1164          * is cleared by default.
1165          */
1166         data = gk20a_readl(g, trim_sys_bypassctrl_r());
1167         data = set_field(data, trim_sys_bypassctrl_gpcpll_m(),
1168                          trim_sys_bypassctrl_gpcpll_vco_f());
1169         gk20a_writel(g, trim_sys_bypassctrl_r(), data);
1170
1171         /* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
1172         data = gk20a_readl(g, fuse_ctrl_opt_ram_svop_pdp_r());
1173         if (!fuse_ctrl_opt_ram_svop_pdp_data_v(data)) {
1174                 data = set_field(data, fuse_ctrl_opt_ram_svop_pdp_data_m(),
1175                          fuse_ctrl_opt_ram_svop_pdp_data_f(0x2));
1176                 gk20a_writel(g, fuse_ctrl_opt_ram_svop_pdp_r(), data);
1177                 data = gk20a_readl(g, fuse_ctrl_opt_ram_svop_pdp_override_r());
1178                 data = set_field(data,
1179                         fuse_ctrl_opt_ram_svop_pdp_override_data_m(),
1180                         fuse_ctrl_opt_ram_svop_pdp_override_data_yes_f());
1181                 gk20a_writel(g, fuse_ctrl_opt_ram_svop_pdp_override_r(), data);
1182         }
1183
1184         /* Disable idle slow down */
1185         data = gk20a_readl(g, therm_clk_slowdown_r(0));
1186         data = set_field(data, therm_clk_slowdown_idle_factor_m(),
1187                          therm_clk_slowdown_idle_factor_disabled_f());
1188         gk20a_writel(g, therm_clk_slowdown_r(0), data);
1189         gk20a_readl(g, therm_clk_slowdown_r(0));
1190
1191         if (g->clk.gpc_pll.mode == GPC_PLL_MODE_DVFS)
1192                 return clk_enbale_pll_dvfs(g);
1193
1194         return 0;
1195 }
1196
1197 static int set_pll_target(struct gk20a *g, u32 freq, u32 old_freq)
1198 {
1199         struct clk_gk20a *clk = &g->clk;
1200
1201         if (freq > gpc_pll_params.max_freq)
1202                 freq = gpc_pll_params.max_freq;
1203         else if (freq < gpc_pll_params.min_freq)
1204                 freq = gpc_pll_params.min_freq;
1205
1206         if (freq != old_freq) {
1207                 /* gpc_pll.freq is changed to new value here */
1208                 if (clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params,
1209                                    &freq, true)) {
1210                         gk20a_err(dev_from_gk20a(g),
1211                                    "failed to set pll target for %d", freq);
1212                         return -EINVAL;
1213                 }
1214         }
1215         return 0;
1216 }
1217
1218 static int set_pll_freq(struct gk20a *g, int allow_slide)
1219 {
1220         struct clk_gk20a *clk = &g->clk;
1221         int err = 0;
1222
1223         gk20a_dbg_fn("last freq: %dMHz, target freq %dMHz",
1224                      clk->gpc_pll_last.freq, clk->gpc_pll.freq);
1225
1226         /* If programming with dynamic sliding failed, re-try under bypass */
1227         if (clk->gpc_pll.mode == GPC_PLL_MODE_DVFS) {
1228                 err = clk_program_na_gpc_pll(g, &clk->gpc_pll, allow_slide);
1229                 if (err && allow_slide)
1230                         err = clk_program_na_gpc_pll(g, &clk->gpc_pll, 0);
1231         } else {
1232                 err = clk_program_gpc_pll(g, &clk->gpc_pll, allow_slide);
1233                 if (err && allow_slide)
1234                         err = clk_program_gpc_pll(g, &clk->gpc_pll, 0);
1235         }
1236
1237         if (!err) {
1238                 clk->gpc_pll.enabled = true;
1239                 clk->gpc_pll_last = clk->gpc_pll;
1240                 return 0;
1241         }
1242
1243         /*
1244          * Just report error but not restore PLL since dvfs could already change
1245          * voltage even when programming failed.
1246          */
1247         gk20a_err(dev_from_gk20a(g), "failed to set pll to %d",
1248                   clk->gpc_pll.freq);
1249         return err;
1250 }
1251
1252 static int gm20b_clk_export_set_rate(void *data, unsigned long *rate)
1253 {
1254         u32 old_freq;
1255         int ret = -ENODATA;
1256         struct gk20a *g = data;
1257         struct clk_gk20a *clk = &g->clk;
1258
1259         if (rate) {
1260                 mutex_lock(&clk->clk_mutex);
1261                 old_freq = clk->gpc_pll.freq;
1262                 ret = set_pll_target(g, rate_gpu_to_gpc2clk(*rate), old_freq);
1263                 if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on)
1264                         ret = set_pll_freq(g, 1);
1265                 if (!ret)
1266                         *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq);
1267                 mutex_unlock(&clk->clk_mutex);
1268         }
1269         return ret;
1270 }
1271
1272 static int gm20b_clk_export_enable(void *data)
1273 {
1274         int ret = 0;
1275         struct gk20a *g = data;
1276         struct clk_gk20a *clk = &g->clk;
1277
1278         mutex_lock(&clk->clk_mutex);
1279         if (!clk->gpc_pll.enabled && clk->clk_hw_on)
1280                 ret = set_pll_freq(g, 1);
1281         mutex_unlock(&clk->clk_mutex);
1282         return ret;
1283 }
1284
1285 static void gm20b_clk_export_disable(void *data)
1286 {
1287         struct gk20a *g = data;
1288         struct clk_gk20a *clk = &g->clk;
1289
1290         mutex_lock(&clk->clk_mutex);
1291         if (clk->gpc_pll.enabled && clk->clk_hw_on)
1292                 clk_disable_gpcpll(g, 1);
1293         mutex_unlock(&clk->clk_mutex);
1294 }
1295
1296 static void gm20b_clk_export_init(void *data, unsigned long *rate, bool *state)
1297 {
1298         struct gk20a *g = data;
1299         struct clk_gk20a *clk = &g->clk;
1300
1301         mutex_lock(&clk->clk_mutex);
1302         if (state)
1303                 *state = clk->gpc_pll.enabled;
1304         if (rate)
1305                 *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq);
1306         mutex_unlock(&clk->clk_mutex);
1307 }
1308
1309 static struct tegra_clk_export_ops gm20b_clk_export_ops = {
1310         .init = gm20b_clk_export_init,
1311         .enable = gm20b_clk_export_enable,
1312         .disable = gm20b_clk_export_disable,
1313         .set_rate = gm20b_clk_export_set_rate,
1314 };
1315
1316 static int gm20b_clk_register_export_ops(struct gk20a *g)
1317 {
1318         int ret;
1319         struct clk *c;
1320
1321         if (gm20b_clk_export_ops.data)
1322                 return 0;
1323
1324         gm20b_clk_export_ops.data = (void *)g;
1325         c = g->clk.tegra_clk;
1326         if (!c || !clk_get_parent(c))
1327                 return -ENOSYS;
1328
1329         ret = tegra_clk_register_export_ops(clk_get_parent(c),
1330                                             &gm20b_clk_export_ops);
1331
1332         return ret;
1333 }
1334
1335 static int gm20b_init_clk_support(struct gk20a *g)
1336 {
1337         struct clk_gk20a *clk = &g->clk;
1338         u32 err;
1339
1340         gk20a_dbg_fn("");
1341
1342         clk->g = g;
1343
1344         err = gm20b_init_clk_reset_enable_hw(g);
1345         if (err)
1346                 return err;
1347
1348         err = gm20b_init_clk_setup_sw(g);
1349         if (err)
1350                 return err;
1351
1352         mutex_lock(&clk->clk_mutex);
1353         clk->clk_hw_on = true;
1354
1355         err = gm20b_init_clk_setup_hw(g);
1356         mutex_unlock(&clk->clk_mutex);
1357         if (err)
1358                 return err;
1359
1360         err = gm20b_clk_register_export_ops(g);
1361         if (err)
1362                 return err;
1363
1364         /* FIXME: this effectively prevents host level clock gating */
1365         err = clk_enable(g->clk.tegra_clk);
1366         if (err)
1367                 return err;
1368
1369         /* The prev call may not enable PLL if gbus is unbalanced - force it */
1370         mutex_lock(&clk->clk_mutex);
1371         if (!clk->gpc_pll.enabled)
1372                 err = set_pll_freq(g, 1);
1373         mutex_unlock(&clk->clk_mutex);
1374         if (err)
1375                 return err;
1376
1377 #ifdef CONFIG_DEBUG_FS
1378         if (!clk->debugfs_set) {
1379                 if (!clk_gm20b_debugfs_init(g))
1380                         clk->debugfs_set = true;
1381         }
1382 #endif
1383         return err;
1384 }
1385
1386 static int gm20b_suspend_clk_support(struct gk20a *g)
1387 {
1388         int ret = 0;
1389
1390         clk_disable(g->clk.tegra_clk);
1391
1392         /* The prev call may not disable PLL if gbus is unbalanced - force it */
1393         mutex_lock(&g->clk.clk_mutex);
1394         if (g->clk.gpc_pll.enabled)
1395                 ret = clk_disable_gpcpll(g, 1);
1396         g->clk.clk_hw_on = false;
1397         mutex_unlock(&g->clk.clk_mutex);
1398         return ret;
1399 }
1400
1401 void gm20b_init_clk_ops(struct gpu_ops *gops)
1402 {
1403         gops->clk.init_clk_support = gm20b_init_clk_support;
1404         gops->clk.suspend_clk_support = gm20b_suspend_clk_support;
1405 }
1406
1407 #ifdef CONFIG_DEBUG_FS
1408
1409 static int rate_get(void *data, u64 *val)
1410 {
1411         struct gk20a *g = (struct gk20a *)data;
1412         *val = (u64)gk20a_clk_get_rate(g);
1413         return 0;
1414 }
1415 static int rate_set(void *data, u64 val)
1416 {
1417         struct gk20a *g = (struct gk20a *)data;
1418         return gk20a_clk_set_rate(g, (u32)val);
1419 }
1420 DEFINE_SIMPLE_ATTRIBUTE(rate_fops, rate_get, rate_set, "%llu\n");
1421
1422 static int pll_reg_show(struct seq_file *s, void *data)
1423 {
1424         struct gk20a *g = s->private;
1425         u32 reg, m, n, pl, f;
1426
1427         mutex_lock(&g->clk.clk_mutex);
1428         if (!g->clk.clk_hw_on) {
1429                 seq_printf(s, "gk20a powered down - no access to registers\n");
1430                 mutex_unlock(&g->clk.clk_mutex);
1431                 return 0;
1432         }
1433
1434         reg = gk20a_readl(g, trim_sys_bypassctrl_r());
1435         seq_printf(s, "bypassctrl = %s, ", reg ? "bypass" : "vco");
1436         reg = gk20a_readl(g, trim_sys_sel_vco_r());
1437         seq_printf(s, "sel_vco = %s, ", reg ? "vco" : "bypass");
1438
1439         reg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1440         seq_printf(s, "cfg  = 0x%x : %s : %s : %s\n", reg,
1441                 trim_sys_gpcpll_cfg_enable_v(reg) ? "enabled" : "disabled",
1442                 trim_sys_gpcpll_cfg_pll_lock_v(reg) ? "locked" : "unlocked",
1443                 trim_sys_gpcpll_cfg_sync_mode_v(reg) ? "sync_on" : "sync_off");
1444
1445         reg = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
1446         m = trim_sys_gpcpll_coeff_mdiv_v(reg);
1447         n = trim_sys_gpcpll_coeff_ndiv_v(reg);
1448         pl = trim_sys_gpcpll_coeff_pldiv_v(reg);
1449         f = g->clk.gpc_pll.clk_in * n / (m * pl_to_div(pl));
1450         seq_printf(s, "coef = 0x%x : m = %u : n = %u : pl = %u", reg, m, n, pl);
1451         seq_printf(s, " : pll_f(gpu_f) = %u(%u) kHz\n", f, f/2);
1452         mutex_unlock(&g->clk.clk_mutex);
1453         return 0;
1454 }
1455
1456 static int pll_reg_open(struct inode *inode, struct file *file)
1457 {
1458         return single_open(file, pll_reg_show, inode->i_private);
1459 }
1460
1461 static const struct file_operations pll_reg_fops = {
1462         .open           = pll_reg_open,
1463         .read           = seq_read,
1464         .llseek         = seq_lseek,
1465         .release        = single_release,
1466 };
1467
1468 static int pll_reg_raw_show(struct seq_file *s, void *data)
1469 {
1470         struct gk20a *g = s->private;
1471         u32 reg;
1472
1473         mutex_lock(&g->clk.clk_mutex);
1474         if (!g->clk.clk_hw_on) {
1475                 seq_puts(s, "gk20a powered down - no access to registers\n");
1476                 mutex_unlock(&g->clk.clk_mutex);
1477                 return 0;
1478         }
1479
1480         seq_puts(s, "GPCPLL REGISTERS:\n");
1481         for (reg = trim_sys_gpcpll_cfg_r(); reg <= trim_sys_gpcpll_dvfs2_r();
1482               reg += sizeof(u32))
1483                 seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg));
1484
1485         seq_puts(s, "\nGPC CLK OUT REGISTERS:\n");
1486
1487         reg = trim_sys_sel_vco_r();
1488         seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg));
1489         reg = trim_sys_gpc2clk_out_r();
1490         seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg));
1491         reg = trim_sys_bypassctrl_r();
1492         seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg));
1493
1494         mutex_unlock(&g->clk.clk_mutex);
1495         return 0;
1496 }
1497
1498 static int pll_reg_raw_open(struct inode *inode, struct file *file)
1499 {
1500         return single_open(file, pll_reg_raw_show, inode->i_private);
1501 }
1502
1503 static ssize_t pll_reg_raw_write(struct file *file,
1504         const char __user *userbuf, size_t count, loff_t *ppos)
1505 {
1506         struct gk20a *g = file->f_path.dentry->d_inode->i_private;
1507         char buf[80];
1508         u32 reg, val;
1509
1510         if (sizeof(buf) <= count)
1511                 return -EINVAL;
1512
1513         if (copy_from_user(buf, userbuf, count))
1514                 return -EFAULT;
1515
1516         /* terminate buffer and trim - white spaces may be appended
1517          *  at the end when invoked from shell command line */
1518         buf[count] = '\0';
1519         strim(buf);
1520
1521         if (sscanf(buf, "[0x%x] = 0x%x", &reg, &val) != 2)
1522                 return -EINVAL;
1523
1524         if (((reg < trim_sys_gpcpll_cfg_r()) ||
1525             (reg > trim_sys_gpcpll_dvfs2_r())) &&
1526             (reg != trim_sys_sel_vco_r()) &&
1527             (reg != trim_sys_gpc2clk_out_r()) &&
1528             (reg != trim_sys_bypassctrl_r()))
1529                 return -EPERM;
1530
1531         mutex_lock(&g->clk.clk_mutex);
1532         if (!g->clk.clk_hw_on) {
1533                 mutex_unlock(&g->clk.clk_mutex);
1534                 return -EBUSY;
1535         }
1536         gk20a_writel(g, reg, val);
1537         mutex_unlock(&g->clk.clk_mutex);
1538         return count;
1539 }
1540
1541 static const struct file_operations pll_reg_raw_fops = {
1542         .open           = pll_reg_raw_open,
1543         .read           = seq_read,
1544         .write          = pll_reg_raw_write,
1545         .llseek         = seq_lseek,
1546         .release        = single_release,
1547 };
1548
1549 static int monitor_get(void *data, u64 *val)
1550 {
1551         struct gk20a *g = (struct gk20a *)data;
1552         struct clk_gk20a *clk = &g->clk;
1553         u32 clk_slowdown, clk_slowdown_save;
1554         int err;
1555
1556         u32 ncycle = 800; /* count GPCCLK for ncycle of clkin */
1557         u64 freq = clk->gpc_pll.clk_in;
1558         u32 count1, count2;
1559
1560         err = gk20a_busy(g->dev);
1561         if (err)
1562                 return err;
1563
1564         mutex_lock(&g->clk.clk_mutex);
1565
1566         /* Disable clock slowdown during measurements */
1567         clk_slowdown_save = gk20a_readl(g, therm_clk_slowdown_r(0));
1568         clk_slowdown = set_field(clk_slowdown_save,
1569                                  therm_clk_slowdown_idle_factor_m(),
1570                                  therm_clk_slowdown_idle_factor_disabled_f());
1571         gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown);
1572         gk20a_readl(g, therm_clk_slowdown_r(0));
1573
1574         gk20a_writel(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0),
1575                      trim_gpc_clk_cntr_ncgpcclk_cfg_reset_asserted_f());
1576         gk20a_writel(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0),
1577                      trim_gpc_clk_cntr_ncgpcclk_cfg_enable_asserted_f() |
1578                      trim_gpc_clk_cntr_ncgpcclk_cfg_write_en_asserted_f() |
1579                      trim_gpc_clk_cntr_ncgpcclk_cfg_noofipclks_f(ncycle));
1580         /* start */
1581
1582         /* It should take less than 25us to finish 800 cycle of 38.4MHz.
1583            But longer than 100us delay is required here. */
1584         gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0));
1585         udelay(200);
1586
1587         count1 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0));
1588         udelay(100);
1589         count2 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0));
1590         freq *= trim_gpc_clk_cntr_ncgpcclk_cnt_value_v(count2);
1591         do_div(freq, ncycle);
1592         *val = freq;
1593
1594         /* Restore clock slowdown */
1595         gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown_save);
1596         mutex_unlock(&g->clk.clk_mutex);
1597
1598         gk20a_idle(g->dev);
1599
1600         if (count1 != count2)
1601                 return -EBUSY;
1602         return 0;
1603 }
1604 DEFINE_SIMPLE_ATTRIBUTE(monitor_fops, monitor_get, NULL, "%llu\n");
1605
1606 static int clk_gm20b_debugfs_init(struct gk20a *g)
1607 {
1608         struct dentry *d;
1609         struct gk20a_platform *platform = platform_get_drvdata(g->dev);
1610
1611         d = debugfs_create_file(
1612                 "rate", S_IRUGO|S_IWUSR, platform->debugfs, g, &rate_fops);
1613         if (!d)
1614                 goto err_out;
1615
1616         d = debugfs_create_file(
1617                 "pll_reg", S_IRUGO, platform->debugfs, g, &pll_reg_fops);
1618         if (!d)
1619                 goto err_out;
1620
1621         d = debugfs_create_file("pll_reg_raw",
1622                 S_IRUGO, platform->debugfs, g, &pll_reg_raw_fops);
1623         if (!d)
1624                 goto err_out;
1625
1626         d = debugfs_create_file(
1627                 "monitor", S_IRUGO, platform->debugfs, g, &monitor_fops);
1628         if (!d)
1629                 goto err_out;
1630
1631         return 0;
1632
1633 err_out:
1634         pr_err("%s: Failed to make debugfs node\n", __func__);
1635         debugfs_remove_recursive(platform->debugfs);
1636         return -ENOMEM;
1637 }
1638
1639 #endif /* CONFIG_DEBUG_FS */