eb7703bdcd809b3be30f2668381a0c52a985e60a
[linux-3.10.git] / drivers / gpu / nvgpu / gm20b / clk_gm20b.c
1 /*
2  * GM20B Clocks
3  *
4  * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/clk.h>
20 #include <linux/delay.h>        /* for mdelay */
21 #include <linux/module.h>
22 #include <linux/debugfs.h>
23 #include <linux/uaccess.h>
24 #include <linux/clk/tegra.h>
25 #include <linux/tegra-fuse.h>
26
27 #include "gk20a/gk20a.h"
28 #include "hw_trim_gm20b.h"
29 #include "hw_timer_gm20b.h"
30 #include "hw_therm_gm20b.h"
31 #include "clk_gm20b.h"
32
33 #define ALLOW_NON_CALIBRATED_NA_MODE    1
34
35 #define gk20a_dbg_clk(fmt, arg...) \
36         gk20a_dbg(gpu_dbg_clk, fmt, ##arg)
37
38 #define DFS_DET_RANGE   6       /* -2^6 ... 2^6-1 */
39 #define SDM_DIN_RANGE   12      /* -2^12 ... 2^12-1 */
40 #define DFS_EXT_CAL_EN  BIT(9)
41 #define DFS_EXT_STROBE  BIT(16)
42
43 #define BOOT_GPU_UV     1000000 /* gpu rail boot voltage 1.0V */
44 #define ADC_SLOPE_UV    10000   /* default ADC detection slope 10mV */
45
46 #define DVFS_SAFE_MARGIN        10      /* 10% */
47 static unsigned long dvfs_safe_max_freq;
48
49 static struct pll_parms gpc_pll_params = {
50         128000,  2600000,       /* freq */
51         1300000, 2600000,       /* vco */
52         12000,   38400,         /* u */
53         1, 255,                 /* M */
54         8, 255,                 /* N */
55         1, 31,                  /* PL */
56         -165230, 214007,        /* DFS_COEFF */
57         0, 0,                   /* ADC char coeff - to be read from fuses */
58         0x7 << 3,               /* vco control in NA mode */
59 };
60
61 #ifdef CONFIG_DEBUG_FS
62 static int clk_gm20b_debugfs_init(struct gk20a *g);
63 #endif
64 static void clk_setup_slide(struct gk20a *g, u32 clk_u);
65
66 #define DUMP_REG(addr_func) \
67 do {                                                                    \
68         addr = trim_sys_##addr_func##_r();                              \
69         data = gk20a_readl(g, addr);                                    \
70         pr_info(#addr_func "[0x%x] = 0x%x\n", addr, data);              \
71 } while (0)
72
73 static void dump_gpc_pll(struct gk20a *g, struct pll *gpll, u32 last_cfg)
74 {
75         u32 addr, data;
76
77         pr_info("**** GPCPLL DUMP ****");
78         pr_info("gpcpll s/w M=%u N=%u P=%u\n", gpll->M, gpll->N, gpll->PL);
79         pr_info("gpcpll_cfg_last = 0x%x\n", last_cfg);
80         DUMP_REG(gpcpll_cfg);
81         DUMP_REG(gpcpll_coeff);
82         DUMP_REG(sel_vco);
83         pr_info("\n");
84 }
85
86 /* 1:1 match between post divider settings and divisor value */
87 static inline u32 pl_to_div(u32 pl)
88 {
89         return pl;
90 }
91
92 static inline u32 div_to_pl(u32 div)
93 {
94         return div;
95 }
96
97 #define PLDIV_GLITCHLESS 1
98
99 #if PLDIV_GLITCHLESS
100 /*
101  * Post divider tarnsition is glitchless only if there is common "1" in binary
102  * representation of old and new settings.
103  */
104 static u32 get_interim_pldiv(u32 old_pl, u32 new_pl)
105 {
106         u32 pl;
107
108         if (old_pl & new_pl)
109                 return 0;
110
111         pl = old_pl | BIT(ffs(new_pl) - 1);     /* pl never 0 */
112         new_pl |= BIT(ffs(old_pl) - 1);
113
114         return min(pl, new_pl);
115 }
116 #endif
117
118 /* Calculate and update M/N/PL as well as pll->freq
119     ref_clk_f = clk_in_f;
120     u_f = ref_clk_f / M;
121     vco_f = u_f * N = ref_clk_f * N / M;
122     PLL output = gpc2clk = target clock frequency = vco_f / pl_to_pdiv(PL);
123     gpcclk = gpc2clk / 2; */
124 static int clk_config_pll(struct clk_gk20a *clk, struct pll *pll,
125         struct pll_parms *pll_params, u32 *target_freq, bool best_fit)
126 {
127         u32 min_vco_f, max_vco_f;
128         u32 best_M, best_N;
129         u32 low_PL, high_PL, best_PL;
130         u32 m, n, n2;
131         u32 target_vco_f, vco_f;
132         u32 ref_clk_f, target_clk_f, u_f;
133         u32 delta, lwv, best_delta = ~0;
134         u32 pl;
135
136         BUG_ON(target_freq == NULL);
137
138         gk20a_dbg_fn("request target freq %d MHz", *target_freq);
139
140         ref_clk_f = pll->clk_in;
141         target_clk_f = *target_freq;
142         max_vco_f = pll_params->max_vco;
143         min_vco_f = pll_params->min_vco;
144         best_M = pll_params->max_M;
145         best_N = pll_params->min_N;
146         best_PL = pll_params->min_PL;
147
148         target_vco_f = target_clk_f + target_clk_f / 50;
149         if (max_vco_f < target_vco_f)
150                 max_vco_f = target_vco_f;
151
152         /* Set PL search boundaries. */
153         high_PL = div_to_pl((max_vco_f + target_vco_f - 1) / target_vco_f);
154         high_PL = min(high_PL, pll_params->max_PL);
155         high_PL = max(high_PL, pll_params->min_PL);
156
157         low_PL = div_to_pl(min_vco_f / target_vco_f);
158         low_PL = min(low_PL, pll_params->max_PL);
159         low_PL = max(low_PL, pll_params->min_PL);
160
161         gk20a_dbg_info("low_PL %d(div%d), high_PL %d(div%d)",
162                         low_PL, pl_to_div(low_PL), high_PL, pl_to_div(high_PL));
163
164         for (pl = low_PL; pl <= high_PL; pl++) {
165                 target_vco_f = target_clk_f * pl_to_div(pl);
166
167                 for (m = pll_params->min_M; m <= pll_params->max_M; m++) {
168                         u_f = ref_clk_f / m;
169
170                         if (u_f < pll_params->min_u)
171                                 break;
172                         if (u_f > pll_params->max_u)
173                                 continue;
174
175                         n = (target_vco_f * m) / ref_clk_f;
176                         n2 = ((target_vco_f * m) + (ref_clk_f - 1)) / ref_clk_f;
177
178                         if (n > pll_params->max_N)
179                                 break;
180
181                         for (; n <= n2; n++) {
182                                 if (n < pll_params->min_N)
183                                         continue;
184                                 if (n > pll_params->max_N)
185                                         break;
186
187                                 vco_f = ref_clk_f * n / m;
188
189                                 if (vco_f >= min_vco_f && vco_f <= max_vco_f) {
190                                         lwv = (vco_f + (pl_to_div(pl) / 2))
191                                                 / pl_to_div(pl);
192                                         delta = abs(lwv - target_clk_f);
193
194                                         if (delta < best_delta) {
195                                                 best_delta = delta;
196                                                 best_M = m;
197                                                 best_N = n;
198                                                 best_PL = pl;
199
200                                                 if (best_delta == 0 ||
201                                                     /* 0.45% for non best fit */
202                                                     (!best_fit && (vco_f / best_delta > 218))) {
203                                                         goto found_match;
204                                                 }
205
206                                                 gk20a_dbg_info("delta %d @ M %d, N %d, PL %d",
207                                                         delta, m, n, pl);
208                                         }
209                                 }
210                         }
211                 }
212         }
213
214 found_match:
215         BUG_ON(best_delta == ~0);
216
217         if (best_fit && best_delta != 0)
218                 gk20a_dbg_clk("no best match for target @ %dMHz on gpc_pll",
219                         target_clk_f);
220
221         pll->M = best_M;
222         pll->N = best_N;
223         pll->PL = best_PL;
224
225         /* save current frequency */
226         pll->freq = ref_clk_f * pll->N / (pll->M * pl_to_div(pll->PL));
227
228         *target_freq = pll->freq;
229
230         gk20a_dbg_clk("actual target freq %d kHz, M %d, N %d, PL %d(div%d)",
231                 *target_freq, pll->M, pll->N, pll->PL, pl_to_div(pll->PL));
232
233         gk20a_dbg_fn("done");
234
235         return 0;
236 }
237
238 /* GPCPLL NA/DVFS mode methods */
239
240 /*
241  * Read ADC characteristic parmeters from fuses.
242  * Determine clibration settings.
243  */
244 static int clk_config_calibration_params(struct gk20a *g)
245 {
246         int slope, offs;
247         struct pll_parms *p = &gpc_pll_params;
248
249         if (!tegra_fuse_calib_gpcpll_get_adc(&slope, &offs)) {
250                 p->uvdet_slope = slope;
251                 p->uvdet_offs = offs;
252         }
253
254         if (!p->uvdet_slope || !p->uvdet_offs) {
255                 /*
256                  * If ADC conversion slope/offset parameters are not fused
257                  * (non-production config), report error, but allow to use
258                  * boot internal calibration with default slope.
259                  */
260                 gk20a_err(dev_from_gk20a(g), "ADC coeff are not fused\n");
261                 return -EINVAL;
262         }
263         return 0;
264 }
265
266 /*
267  * Determine DFS_COEFF for the requested voltage. Always select external
268  * calibration override equal to the voltage, and set maximum detection
269  * limit "0" (to make sure that PLL output remains under F/V curve when
270  * voltage increases).
271  */
272 static void clk_config_dvfs_detection(int mv, struct na_dvfs *d)
273 {
274         u32 coeff, coeff_max;
275         struct pll_parms *p = &gpc_pll_params;
276
277         coeff_max = trim_sys_gpcpll_dvfs0_dfs_coeff_v(
278                 trim_sys_gpcpll_dvfs0_dfs_coeff_m());
279         coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
280         coeff = DIV_ROUND_CLOSEST(coeff, 1000);
281         coeff = min(coeff, coeff_max);
282         d->dfs_coeff = coeff;
283
284         d->dfs_ext_cal = DIV_ROUND_CLOSEST(mv * 1000 - p->uvdet_offs,
285                                            p->uvdet_slope);
286         BUG_ON(abs(d->dfs_ext_cal) >= (1 << DFS_DET_RANGE));
287         d->uv_cal = p->uvdet_offs + d->dfs_ext_cal * p->uvdet_slope;
288         d->dfs_det_max = 0;
289 }
290
291 /*
292  * Solve equation for integer and fractional part of the effective NDIV:
293  *
294  * n_eff = n_int + 1/2 + SDM_DIN / 2^(SDM_DIN_RANGE + 1) +
295  * DVFS_COEFF * DVFS_DET_DELTA / 2^DFS_DET_RANGE
296  *
297  * The SDM_DIN LSB is finally shifted out, since it is not accessible by s/w.
298  */
299 static void clk_config_dvfs_ndiv(int mv, u32 n_eff, struct na_dvfs *d)
300 {
301         int n, det_delta;
302         u32 rem, rem_range;
303         struct pll_parms *p = &gpc_pll_params;
304
305         det_delta = DIV_ROUND_CLOSEST(mv * 1000 - p->uvdet_offs,
306                                       p->uvdet_slope);
307         det_delta -= d->dfs_ext_cal;
308         det_delta = min(det_delta, d->dfs_det_max);
309         det_delta = det_delta * d->dfs_coeff;
310
311         n = (int)(n_eff << DFS_DET_RANGE) - det_delta;
312         BUG_ON((n < 0) || (n > (p->max_N << DFS_DET_RANGE)));
313         d->n_int = ((u32)n) >> DFS_DET_RANGE;
314
315         rem = ((u32)n) & ((1 << DFS_DET_RANGE) - 1);
316         rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
317         d->sdm_din = (rem << rem_range) - (1 << SDM_DIN_RANGE);
318         d->sdm_din = (d->sdm_din >> BITS_PER_BYTE) & 0xff;
319 }
320
321 /* Voltage dependent configuration */
322 static void clk_config_dvfs(struct gk20a *g, struct pll *gpll)
323 {
324         struct na_dvfs *d = &gpll->dvfs;
325
326         d->mv = tegra_dvfs_predict_millivolts_t(
327                         clk_get_parent(g->clk.tegra_clk),
328                         rate_gpc2clk_to_gpu(gpll->freq));
329         clk_config_dvfs_detection(d->mv, d);
330         clk_config_dvfs_ndiv(d->mv, gpll->N, d);
331 }
332
333 /* Update DVFS detection settings in flight */
334 static void clk_set_dfs_coeff(struct gk20a *g, u32 dfs_coeff)
335 {
336         u32 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
337         data |= DFS_EXT_STROBE;
338         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
339
340         data = gk20a_readl(g, trim_sys_gpcpll_dvfs0_r());
341         data = set_field(data, trim_sys_gpcpll_dvfs0_dfs_coeff_m(),
342                 trim_sys_gpcpll_dvfs0_dfs_coeff_f(dfs_coeff));
343         gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data);
344
345         data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
346         udelay(1);
347         data &= ~DFS_EXT_STROBE;
348         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
349 }
350
351 static void __maybe_unused clk_set_dfs_det_max(struct gk20a *g, u32 dfs_det_max)
352 {
353         u32 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
354         data |= DFS_EXT_STROBE;
355         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
356
357         data = gk20a_readl(g, trim_sys_gpcpll_dvfs0_r());
358         data = set_field(data, trim_sys_gpcpll_dvfs0_dfs_det_max_m(),
359                 trim_sys_gpcpll_dvfs0_dfs_det_max_f(dfs_det_max));
360         gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data);
361
362         data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
363         udelay(1);
364         data &= ~DFS_EXT_STROBE;
365         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
366 }
367
368 static void clk_set_dfs_ext_cal(struct gk20a *g, u32 dfs_det_cal)
369 {
370         u32 data;
371
372         data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
373         data &= ~(BIT(DFS_DET_RANGE + 1) - 1);
374         data |= dfs_det_cal;
375         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
376
377         data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
378         udelay(1);
379         if (~trim_sys_gpcpll_dvfs1_dfs_ctrl_v(data) & DFS_EXT_CAL_EN) {
380                 data = set_field(data, trim_sys_gpcpll_dvfs1_dfs_ctrl_m(),
381                         trim_sys_gpcpll_dvfs1_dfs_ctrl_f(DFS_EXT_CAL_EN));
382                 gk20a_writel(g, trim_sys_gpcpll_dvfs1_r(), data);
383         }
384 }
385
386 static void clk_setup_dvfs_detection(struct gk20a *g, struct pll *gpll)
387 {
388         struct na_dvfs *d = &gpll->dvfs;
389
390         u32 data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
391         data |= DFS_EXT_STROBE;
392         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
393
394         data = gk20a_readl(g, trim_sys_gpcpll_dvfs0_r());
395         data = set_field(data, trim_sys_gpcpll_dvfs0_dfs_coeff_m(),
396                 trim_sys_gpcpll_dvfs0_dfs_coeff_f(d->dfs_coeff));
397         data = set_field(data, trim_sys_gpcpll_dvfs0_dfs_det_max_m(),
398                 trim_sys_gpcpll_dvfs0_dfs_det_max_f(d->dfs_det_max));
399         gk20a_writel(g, trim_sys_gpcpll_dvfs0_r(), data);
400
401         data = gk20a_readl(g, trim_gpc_bcast_gpcpll_dvfs2_r());
402         udelay(1);
403         data &= ~DFS_EXT_STROBE;
404         gk20a_writel(g, trim_gpc_bcast_gpcpll_dvfs2_r(), data);
405
406         clk_set_dfs_ext_cal(g, d->dfs_ext_cal);
407 }
408
409 /* Enable NA/DVFS mode */
410 static int clk_enbale_pll_dvfs(struct gk20a *g)
411 {
412         u32 data;
413         int delay = 5;  /* use for iddq exit delay & calib timeout */
414         struct pll_parms *p = &gpc_pll_params;
415         bool calibrated = p->uvdet_slope && p->uvdet_offs;
416
417         /* Enable NA DVFS */
418         data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
419         data |= trim_sys_gpcpll_dvfs1_en_dfs_m();
420         gk20a_writel(g, trim_sys_gpcpll_dvfs1_r(), data);
421
422         /* Set VCO_CTRL */
423         if (p->vco_ctrl) {
424                 data = gk20a_readl(g, trim_sys_gpcpll_cfg3_r());
425                 data = set_field(data, trim_sys_gpcpll_cfg3_vco_ctrl_m(),
426                                  trim_sys_gpcpll_cfg3_vco_ctrl_f(p->vco_ctrl));
427                 gk20a_writel(g, trim_sys_gpcpll_cfg3_r(), data);
428         }
429
430         /*
431          * If calibration parameters are known (either from fuses, or from
432          * internal calibration on boot) - use them. Internal calibration is
433          * started anyway; it will complete, but results will not be used.
434          */
435         if (calibrated) {
436                 data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
437                 data |= trim_sys_gpcpll_dvfs1_en_dfs_cal_m();
438                 gk20a_writel(g, trim_sys_gpcpll_dvfs1_r(), data);
439         }
440
441         /* Exit IDDQ mode */
442         data = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
443         data = set_field(data, trim_sys_gpcpll_cfg_iddq_m(),
444                          trim_sys_gpcpll_cfg_iddq_power_on_v());
445         gk20a_writel(g, trim_sys_gpcpll_cfg_r(), data);
446         gk20a_readl(g, trim_sys_gpcpll_cfg_r());
447         udelay(delay);
448
449         /*
450          * Dynamic ramp setup based on update rate, which in DVFS mode on GM20b
451          * is always 38.4 MHz, the same as reference clock rate.
452          */
453         clk_setup_slide(g, g->clk.gpc_pll.clk_in);
454
455         if (calibrated)
456                 return 0;
457
458         /*
459          * If calibration parameters are not fused, start internal calibration,
460          * wait for completion, and use results along with default slope to
461          * calculate ADC offset during boot.
462          */
463         data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
464         data |= trim_sys_gpcpll_dvfs1_en_dfs_cal_m();
465         gk20a_writel(g, trim_sys_gpcpll_dvfs1_r(), data);
466
467         /* Wait for internal calibration done (spec < 2us). */
468         do {
469                 data = gk20a_readl(g, trim_sys_gpcpll_dvfs1_r());
470                 if (trim_sys_gpcpll_dvfs1_dfs_cal_done_v(data))
471                         break;
472                 udelay(1);
473                 delay--;
474         } while (delay > 0);
475
476         if (delay <= 0) {
477                 gk20a_err(dev_from_gk20a(g), "GPCPLL calibration timeout");
478                 return -ETIMEDOUT;
479         }
480
481         data = gk20a_readl(g, trim_sys_gpcpll_cfg3_r());
482         data = trim_sys_gpcpll_cfg3_dfs_testout_v(data);
483         p->uvdet_offs = BOOT_GPU_UV - data * ADC_SLOPE_UV;
484         p->uvdet_slope = ADC_SLOPE_UV;
485         return 0;
486 }
487
488 /* GPCPLL slide methods */
489 static void clk_setup_slide(struct gk20a *g, u32 clk_u)
490 {
491         u32 data, step_a, step_b;
492
493         switch (clk_u) {
494         case 12000:
495         case 12800:
496         case 13000:                     /* only on FPGA */
497                 step_a = 0x2B;
498                 step_b = 0x0B;
499                 break;
500         case 19200:
501                 step_a = 0x12;
502                 step_b = 0x08;
503                 break;
504         case 38400:
505                 step_a = 0x04;
506                 step_b = 0x05;
507                 break;
508         default:
509                 gk20a_err(dev_from_gk20a(g), "Unexpected reference rate %u kHz",
510                           clk_u);
511                 BUG();
512         }
513
514         /* setup */
515         data = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
516         data = set_field(data, trim_sys_gpcpll_cfg2_pll_stepa_m(),
517                         trim_sys_gpcpll_cfg2_pll_stepa_f(step_a));
518         gk20a_writel(g, trim_sys_gpcpll_cfg2_r(), data);
519         data = gk20a_readl(g, trim_sys_gpcpll_cfg3_r());
520         data = set_field(data, trim_sys_gpcpll_cfg3_pll_stepb_m(),
521                         trim_sys_gpcpll_cfg3_pll_stepb_f(step_b));
522         gk20a_writel(g, trim_sys_gpcpll_cfg3_r(), data);
523 }
524
525 static int clk_slide_gpc_pll(struct gk20a *g, struct pll *gpll)
526 {
527         u32 data, coeff;
528         u32 nold, sdm_old;
529         int ramp_timeout = 500;
530
531         /* get old coefficients */
532         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
533         nold = trim_sys_gpcpll_coeff_ndiv_v(coeff);
534
535         /* do nothing if NDIV is same */
536         if (gpll->mode == GPC_PLL_MODE_DVFS) {
537                 /* in DVFS mode check both integer and fraction */
538                 coeff = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
539                 sdm_old = trim_sys_gpcpll_cfg2_sdm_din_v(coeff);
540                 if ((gpll->dvfs.n_int == nold) &&
541                     (gpll->dvfs.sdm_din == sdm_old))
542                         return 0;
543         } else {
544                 if (gpll->N == nold)
545                         return 0;
546
547                 /* dynamic ramp setup based on update rate */
548                 clk_setup_slide(g, gpll->clk_in / gpll->M);
549         }
550
551         /* pll slowdown mode */
552         data = gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
553         data = set_field(data,
554                         trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_m(),
555                         trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_yes_f());
556         gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data);
557
558         /* new ndiv ready for ramp */
559         if (gpll->mode == GPC_PLL_MODE_DVFS) {
560                 /* in DVFS mode SDM is updated via "new" field */
561                 coeff = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
562                 coeff = set_field(coeff, trim_sys_gpcpll_cfg2_sdm_din_new_m(),
563                         trim_sys_gpcpll_cfg2_sdm_din_new_f(gpll->dvfs.sdm_din));
564                 gk20a_writel(g, trim_sys_gpcpll_cfg2_r(), coeff);
565
566                 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
567                 coeff = set_field(coeff, trim_sys_gpcpll_coeff_ndiv_m(),
568                         trim_sys_gpcpll_coeff_ndiv_f(gpll->dvfs.n_int));
569                 udelay(1);
570                 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
571         } else {
572                 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
573                 coeff = set_field(coeff, trim_sys_gpcpll_coeff_ndiv_m(),
574                                 trim_sys_gpcpll_coeff_ndiv_f(gpll->N));
575                 udelay(1);
576                 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
577         }
578
579         /* dynamic ramp to new ndiv */
580         data = gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
581         data = set_field(data,
582                         trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(),
583                         trim_sys_gpcpll_ndiv_slowdown_en_dynramp_yes_f());
584         udelay(1);
585         gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data);
586
587         do {
588                 udelay(1);
589                 ramp_timeout--;
590                 data = gk20a_readl(
591                         g, trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_r());
592                 if (trim_gpc_bcast_gpcpll_ndiv_slowdown_debug_pll_dynramp_done_synced_v(data))
593                         break;
594         } while (ramp_timeout > 0);
595
596         if ((gpll->mode == GPC_PLL_MODE_DVFS) && (ramp_timeout > 0)) {
597                 /* in DVFS mode complete SDM update */
598                 coeff = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
599                 coeff = set_field(coeff, trim_sys_gpcpll_cfg2_sdm_din_m(),
600                         trim_sys_gpcpll_cfg2_sdm_din_f(gpll->dvfs.sdm_din));
601                 gk20a_writel(g, trim_sys_gpcpll_cfg2_r(), coeff);
602         }
603
604         /* exit slowdown mode */
605         data = gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
606         data = set_field(data,
607                         trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_m(),
608                         trim_sys_gpcpll_ndiv_slowdown_slowdown_using_pll_no_f());
609         data = set_field(data,
610                         trim_sys_gpcpll_ndiv_slowdown_en_dynramp_m(),
611                         trim_sys_gpcpll_ndiv_slowdown_en_dynramp_no_f());
612         gk20a_writel(g, trim_sys_gpcpll_ndiv_slowdown_r(), data);
613         gk20a_readl(g, trim_sys_gpcpll_ndiv_slowdown_r());
614
615         if (ramp_timeout <= 0) {
616                 gk20a_err(dev_from_gk20a(g), "gpcpll dynamic ramp timeout");
617                 return -ETIMEDOUT;
618         }
619         return 0;
620 }
621
622 /* GPCPLL bypass methods */
623 static int clk_change_pldiv_under_bypass(struct gk20a *g, struct pll *gpll)
624 {
625         u32 data, coeff;
626
627         /* put PLL in bypass before programming it */
628         data = gk20a_readl(g, trim_sys_sel_vco_r());
629         data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
630                 trim_sys_sel_vco_gpc2clk_out_bypass_f());
631         gk20a_writel(g, trim_sys_sel_vco_r(), data);
632
633         /* change PLDIV */
634         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
635         udelay(1);
636         coeff = set_field(coeff, trim_sys_gpcpll_coeff_pldiv_m(),
637                           trim_sys_gpcpll_coeff_pldiv_f(gpll->PL));
638         gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
639
640         /* put PLL back on vco */
641         data = gk20a_readl(g, trim_sys_sel_vco_r());
642         udelay(1);
643         data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
644                 trim_sys_sel_vco_gpc2clk_out_vco_f());
645         gk20a_writel(g, trim_sys_sel_vco_r(), data);
646
647         return 0;
648 }
649
650 static int clk_lock_gpc_pll_under_bypass(struct gk20a *g, struct pll *gpll)
651 {
652         u32 data, cfg, coeff, timeout;
653
654         /* put PLL in bypass before programming it */
655         data = gk20a_readl(g, trim_sys_sel_vco_r());
656         data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
657                 trim_sys_sel_vco_gpc2clk_out_bypass_f());
658         gk20a_writel(g, trim_sys_sel_vco_r(), data);
659
660         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
661         udelay(1);
662         if (trim_sys_gpcpll_cfg_iddq_v(cfg)) {
663                 /* get out from IDDQ (1st power up) */
664                 cfg = set_field(cfg, trim_sys_gpcpll_cfg_iddq_m(),
665                                 trim_sys_gpcpll_cfg_iddq_power_on_v());
666                 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
667                 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
668                 udelay(5);
669         } else {
670                 /* clear SYNC_MODE before disabling PLL */
671                 cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
672                                 trim_sys_gpcpll_cfg_sync_mode_disable_f());
673                 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
674                 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
675
676                 /* disable running PLL before changing coefficients */
677                 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
678                                 trim_sys_gpcpll_cfg_enable_no_f());
679                 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
680                 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
681         }
682
683         /* change coefficients */
684         if (gpll->mode == GPC_PLL_MODE_DVFS) {
685                 clk_setup_dvfs_detection(g, gpll);
686
687                 coeff = gk20a_readl(g, trim_sys_gpcpll_cfg2_r());
688                 coeff = set_field(coeff, trim_sys_gpcpll_cfg2_sdm_din_m(),
689                         trim_sys_gpcpll_cfg2_sdm_din_f(gpll->dvfs.sdm_din));
690                 gk20a_writel(g, trim_sys_gpcpll_cfg2_r(), coeff);
691
692                 coeff = trim_sys_gpcpll_coeff_mdiv_f(gpll->M) |
693                         trim_sys_gpcpll_coeff_ndiv_f(gpll->dvfs.n_int) |
694                         trim_sys_gpcpll_coeff_pldiv_f(gpll->PL);
695                 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
696         } else {
697                 coeff = trim_sys_gpcpll_coeff_mdiv_f(gpll->M) |
698                         trim_sys_gpcpll_coeff_ndiv_f(gpll->N) |
699                         trim_sys_gpcpll_coeff_pldiv_f(gpll->PL);
700                 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
701         }
702
703         /* enable PLL after changing coefficients */
704         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
705         cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
706                         trim_sys_gpcpll_cfg_enable_yes_f());
707         gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
708
709         /* just delay in DVFS mode (lock cannot be used) */
710         if (gpll->mode == GPC_PLL_MODE_DVFS) {
711                 gk20a_readl(g, trim_sys_gpcpll_cfg_r());
712                 udelay(g->clk.na_pll_delay);
713                 goto pll_locked;
714         }
715
716         /* lock pll */
717         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
718         if (cfg & trim_sys_gpcpll_cfg_enb_lckdet_power_off_f()){
719                 cfg = set_field(cfg, trim_sys_gpcpll_cfg_enb_lckdet_m(),
720                         trim_sys_gpcpll_cfg_enb_lckdet_power_on_f());
721                 gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
722                 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
723         }
724
725         /* wait pll lock */
726         timeout = g->clk.pll_delay + 1;
727         do {
728                 udelay(1);
729                 cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
730                 if (cfg & trim_sys_gpcpll_cfg_pll_lock_true_f())
731                         goto pll_locked;
732         } while (--timeout > 0);
733
734         /* PLL is messed up. What can we do here? */
735         dump_gpc_pll(g, gpll, cfg);
736         BUG();
737         return -EBUSY;
738
739 pll_locked:
740         gk20a_dbg_clk("locked config_pll under bypass r=0x%x v=0x%x",
741                 trim_sys_gpcpll_cfg_r(), cfg);
742
743         /* set SYNC_MODE for glitchless switch out of bypass */
744         cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
745                         trim_sys_gpcpll_cfg_sync_mode_enable_f());
746         gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
747         gk20a_readl(g, trim_sys_gpcpll_cfg_r());
748
749         /* put PLL back on vco */
750         data = gk20a_readl(g, trim_sys_sel_vco_r());
751         data = set_field(data, trim_sys_sel_vco_gpc2clk_out_m(),
752                 trim_sys_sel_vco_gpc2clk_out_vco_f());
753         gk20a_writel(g, trim_sys_sel_vco_r(), data);
754
755         return 0;
756 }
757
758 /*
759  *  Change GPCPLL frequency:
760  *  - in legacy (non-DVFS) mode
761  *  - in DVFS mode at constant DVFS detection settings, matching current/lower
762  *    voltage; the same procedure can be used in this case, since maximum DVFS
763  *    detection limit makes sure that PLL output remains under F/V curve when
764  *    voltage increases arbitrary.
765  */
766 static int clk_program_gpc_pll(struct gk20a *g, struct pll *gpll_new,
767                         int allow_slide)
768 {
769         u32 cfg, coeff, data;
770         bool can_slide, pldiv_only;
771         struct pll gpll;
772
773         gk20a_dbg_fn("");
774
775         if (!tegra_platform_is_silicon())
776                 return 0;
777
778         /* get old coefficients */
779         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
780         gpll.M = trim_sys_gpcpll_coeff_mdiv_v(coeff);
781         gpll.N = trim_sys_gpcpll_coeff_ndiv_v(coeff);
782         gpll.PL = trim_sys_gpcpll_coeff_pldiv_v(coeff);
783         gpll.clk_in = gpll_new->clk_in;
784
785         /* combine target dvfs with old coefficients */
786         gpll.dvfs = gpll_new->dvfs;
787         gpll.mode = gpll_new->mode;
788
789         /* do NDIV slide if there is no change in M and PL */
790         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
791         can_slide = allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg);
792
793         if (can_slide && (gpll_new->M == gpll.M) && (gpll_new->PL == gpll.PL))
794                 return clk_slide_gpc_pll(g, gpll_new);
795
796         /* slide down to NDIV_LO */
797         if (can_slide) {
798                 int ret;
799                 gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco,
800                                       gpll.clk_in);
801                 if (gpll.mode == GPC_PLL_MODE_DVFS)
802                         clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
803                 ret = clk_slide_gpc_pll(g, &gpll);
804                 if (ret)
805                         return ret;
806         }
807         pldiv_only = can_slide && (gpll_new->M == gpll.M);
808
809         /*
810          *  Split FO-to-bypass jump in halfs by setting out divider 1:2.
811          *  (needed even if PLDIV_GLITCHLESS is set, since 1:1 <=> 1:2 direct
812          *  transition is not really glitch-less - see get_interim_pldiv
813          *  function header).
814          */
815         if ((gpll_new->PL < 2) || (gpll.PL < 2)) {
816                 data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
817                 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(),
818                         trim_sys_gpc2clk_out_vcodiv_f(2));
819                 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
820                 /* Intentional 2nd write to assure linear divider operation */
821                 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
822                 gk20a_readl(g, trim_sys_gpc2clk_out_r());
823                 udelay(2);
824         }
825
826 #if PLDIV_GLITCHLESS
827         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
828         if (pldiv_only) {
829                 /* Insert interim PLDIV state if necessary */
830                 u32 interim_pl = get_interim_pldiv(gpll_new->PL, gpll.PL);
831                 if (interim_pl) {
832                         coeff = set_field(coeff,
833                                 trim_sys_gpcpll_coeff_pldiv_m(),
834                                 trim_sys_gpcpll_coeff_pldiv_f(interim_pl));
835                         gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
836                         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
837                 }
838                 goto set_pldiv; /* path A: no need to bypass */
839         }
840
841         /* path B: bypass if either M changes or PLL is disabled */
842 #endif
843         /*
844          * Program and lock pll under bypass. On exit PLL is out of bypass,
845          * enabled, and locked. VCO is at vco_min if sliding is allowed.
846          * Otherwise it is at VCO target (and therefore last slide call below
847          * is effectively NOP). PL is set to target. Output divider is engaged
848          * at 1:2 if either entry, or exit PL setting is 1:1.
849          */
850         gpll = *gpll_new;
851         if (allow_slide) {
852                 gpll.N = DIV_ROUND_UP(gpll_new->M * gpc_pll_params.min_vco,
853                                       gpll_new->clk_in);
854                 if (gpll.mode == GPC_PLL_MODE_DVFS)
855                         clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
856         }
857         if (pldiv_only)
858                 clk_change_pldiv_under_bypass(g, &gpll);
859         else
860                 clk_lock_gpc_pll_under_bypass(g, &gpll);
861
862 #if PLDIV_GLITCHLESS
863         coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
864
865 set_pldiv:
866         /* coeff must be current from either path A or B */
867         if (trim_sys_gpcpll_coeff_pldiv_v(coeff) != gpll_new->PL) {
868                 coeff = set_field(coeff, trim_sys_gpcpll_coeff_pldiv_m(),
869                         trim_sys_gpcpll_coeff_pldiv_f(gpll_new->PL));
870                 gk20a_writel(g, trim_sys_gpcpll_coeff_r(), coeff);
871         }
872 #endif
873         /* restore out divider 1:1 */
874         data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
875         if ((data & trim_sys_gpc2clk_out_vcodiv_m()) !=
876             trim_sys_gpc2clk_out_vcodiv_by1_f()) {
877                 data = set_field(data, trim_sys_gpc2clk_out_vcodiv_m(),
878                                  trim_sys_gpc2clk_out_vcodiv_by1_f());
879                 udelay(2);
880                 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
881                 /* Intentional 2nd write to assure linear divider operation */
882                 gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
883                 gk20a_readl(g, trim_sys_gpc2clk_out_r());
884         }
885
886         /* slide up to target NDIV */
887         return clk_slide_gpc_pll(g, gpll_new);
888 }
889
890 /* Find GPCPLL config safe at DVFS coefficient = 0, matching target frequency */
891 static void clk_config_pll_safe_dvfs(struct gk20a *g, struct pll *gpll)
892 {
893         u32 nsafe, nmin;
894
895         if (gpll->freq > dvfs_safe_max_freq)
896                 gpll->freq = gpll->freq * (100 - DVFS_SAFE_MARGIN) / 100;
897
898         nmin = DIV_ROUND_UP(gpll->M * gpc_pll_params.min_vco, gpll->clk_in);
899         nsafe = gpll->M * gpll->freq / gpll->clk_in;
900
901         /*
902          * If safe frequency is above VCOmin, it can be used in safe PLL config
903          * as is. Since safe frequency is below both old and new frequencies,
904          * in this case all three configurations have same post divider 1:1, and
905          * direct old=>safe=>new n-sliding will be used for transitions.
906          *
907          * Otherwise, if safe frequency is below VCO min, post-divider in safe
908          * configuration (and possibly in old and/or new configurations) is
909          * above 1:1, and each old=>safe and safe=>new transitions includes
910          * sliding to/from VCOmin, as well as divider changes. To avoid extra
911          * dynamic ramps from VCOmin during old=>safe transition and to VCOmin
912          * during safe=>new transition, select nmin as safe NDIV, and set safe
913          * post divider to assure PLL output is below safe frequency
914          */
915         if (nsafe < nmin) {
916                 gpll->PL = DIV_ROUND_UP(nmin * gpll->clk_in,
917                                         gpll->M * gpll->freq);
918                 nsafe = nmin;
919         }
920         gpll->N = nsafe;
921         clk_config_dvfs_ndiv(gpll->dvfs.mv, gpll->N, &gpll->dvfs);
922
923         gk20a_dbg_clk("safe freq %d kHz, M %d, N %d, PL %d(div%d)",
924                 gpll->freq, gpll->M, gpll->N, gpll->PL, pl_to_div(gpll->PL));
925 }
926
927 /* Change GPCPLL frequency and DVFS detection settings in DVFS mode */
928 static int clk_program_na_gpc_pll(struct gk20a *g, struct pll *gpll_new,
929                                   int allow_slide)
930 {
931         int ret;
932         struct pll gpll_safe;
933         struct pll *gpll_old = &g->clk.gpc_pll_last;
934
935         BUG_ON(gpll_new->M != 1);       /* the only MDIV in NA mode  */
936         clk_config_dvfs(g, gpll_new);
937
938         /*
939          * In cases below no intermediate steps in PLL DVFS configuration are
940          * necessary because either
941          * - PLL DVFS will be configured under bypass directly to target, or
942          * - voltage is not changing, so DVFS detection settings are the same
943          */
944         if (!allow_slide || !gpll_new->enabled ||
945             (gpll_old->dvfs.mv == gpll_new->dvfs.mv))
946                 return clk_program_gpc_pll(g, gpll_new, allow_slide);
947
948         /*
949          * Interim step for changing DVFS detection settings: low enough
950          * frequency to be safe at at DVFS coeff = 0.
951          *
952          * 1. If voltage is increasing:
953          * - safe frequency target matches the lowest - old - frequency
954          * - DVFS settings are still old
955          * - Voltage already increased to new level by tegra DVFS, but maximum
956          *    detection limit assures PLL output remains under F/V curve
957          *
958          * 2. If voltage is decreasing:
959          * - safe frequency target matches the lowest - new - frequency
960          * - DVFS settings are still old
961          * - Voltage is also old, it will be lowered by tegra DVFS afterwards
962          *
963          * Interim step can be skipped if old frequency is below safe minimum,
964          * i.e., it is low enough to be safe at any voltage in operating range
965          * with zero DVFS coefficient.
966          */
967         if (gpll_old->freq > dvfs_safe_max_freq) {
968                 if (gpll_old->dvfs.mv < gpll_new->dvfs.mv) {
969                         gpll_safe = *gpll_old;
970                         gpll_safe.dvfs.mv = gpll_new->dvfs.mv;
971                 } else {
972                         gpll_safe = *gpll_new;
973                         gpll_safe.dvfs = gpll_old->dvfs;
974                 }
975                 clk_config_pll_safe_dvfs(g, &gpll_safe);
976
977                 ret = clk_program_gpc_pll(g, &gpll_safe, 1);
978                 if (ret) {
979                         gk20a_err(dev_from_gk20a(g), "Safe dvfs program fail\n");
980                         return ret;
981                 }
982         }
983
984         /*
985          * DVFS detection settings transition:
986          * - Set DVFS coefficient zero (safe, since already at frequency safe
987          *   at DVFS coeff = 0 for the lowest of the old/new end-points)
988          * - Set calibration level to new voltage (safe, since DVFS coeff = 0)
989          * - Set DVFS coefficient to match new voltage (safe, since already at
990          *   frequency safe at DVFS coeff = 0 for the lowest of the old/new
991          *   end-points.
992          */
993         clk_set_dfs_coeff(g, 0);
994         clk_set_dfs_ext_cal(g, gpll_new->dvfs.dfs_ext_cal);
995         clk_set_dfs_coeff(g, gpll_new->dvfs.dfs_coeff);
996
997         /* Finally set target rate (with DVFS detection settings already new) */
998         return clk_program_gpc_pll(g, gpll_new, 1);
999 }
1000
1001 static int clk_disable_gpcpll(struct gk20a *g, int allow_slide)
1002 {
1003         u32 cfg, coeff;
1004         struct clk_gk20a *clk = &g->clk;
1005         struct pll gpll = clk->gpc_pll;
1006
1007         /* slide to VCO min */
1008         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1009         if (allow_slide && trim_sys_gpcpll_cfg_enable_v(cfg)) {
1010                 coeff = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
1011                 gpll.M = trim_sys_gpcpll_coeff_mdiv_v(coeff);
1012                 gpll.N = DIV_ROUND_UP(gpll.M * gpc_pll_params.min_vco,
1013                                       gpll.clk_in);
1014                 if (gpll.mode == GPC_PLL_MODE_DVFS)
1015                         clk_config_dvfs_ndiv(gpll.dvfs.mv, gpll.N, &gpll.dvfs);
1016                 clk_slide_gpc_pll(g, &gpll);
1017         }
1018
1019         /* put PLL in bypass before disabling it */
1020         cfg = gk20a_readl(g, trim_sys_sel_vco_r());
1021         cfg = set_field(cfg, trim_sys_sel_vco_gpc2clk_out_m(),
1022                         trim_sys_sel_vco_gpc2clk_out_bypass_f());
1023         gk20a_writel(g, trim_sys_sel_vco_r(), cfg);
1024
1025         /* clear SYNC_MODE before disabling PLL */
1026         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1027         cfg = set_field(cfg, trim_sys_gpcpll_cfg_sync_mode_m(),
1028                         trim_sys_gpcpll_cfg_sync_mode_disable_f());
1029         gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
1030
1031         /* disable PLL */
1032         cfg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1033         cfg = set_field(cfg, trim_sys_gpcpll_cfg_enable_m(),
1034                         trim_sys_gpcpll_cfg_enable_no_f());
1035         gk20a_writel(g, trim_sys_gpcpll_cfg_r(), cfg);
1036         gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1037
1038         clk->gpc_pll.enabled = false;
1039         clk->gpc_pll_last.enabled = false;
1040         return 0;
1041 }
1042
1043 static int gm20b_init_clk_reset_enable_hw(struct gk20a *g)
1044 {
1045         gk20a_dbg_fn("");
1046         return 0;
1047 }
1048
1049 struct clk *gm20b_clk_get(struct gk20a *g)
1050 {
1051         if (!g->clk.tegra_clk) {
1052                 struct clk *clk;
1053
1054                 clk = clk_get_sys("tegra_gk20a", "gpu");
1055                 if (IS_ERR(clk)) {
1056                         gk20a_err(dev_from_gk20a(g),
1057                                 "fail to get tegra gpu clk tegra_gk20a/gpu");
1058                         return NULL;
1059                 }
1060                 g->clk.tegra_clk = clk;
1061         }
1062
1063         return g->clk.tegra_clk;
1064 }
1065
1066 static int gm20b_init_clk_setup_sw(struct gk20a *g)
1067 {
1068         struct clk_gk20a *clk = &g->clk;
1069         unsigned long safe_rate;
1070         struct clk *ref;
1071         bool calibrated;
1072
1073         gk20a_dbg_fn("");
1074
1075         if (clk->sw_ready) {
1076                 gk20a_dbg_fn("skip init");
1077                 return 0;
1078         }
1079
1080         if (!gk20a_clk_get(g))
1081                 return -EINVAL;
1082
1083         /*
1084          * On Tegra GPU clock exposed to frequency governor is a shared user on
1085          * GPCPLL bus (gbus). The latter can be accessed as GPU clock parent.
1086          * Respectively the grandparent is PLL reference clock.
1087          */
1088         ref = clk_get_parent(clk_get_parent(clk->tegra_clk));
1089         if (IS_ERR(ref)) {
1090                 gk20a_err(dev_from_gk20a(g),
1091                         "failed to get GPCPLL reference clock");
1092                 return -EINVAL;
1093         }
1094
1095         /*
1096          * Locking time in both legacy and DVFS mode is 40us. However, in legacy
1097          * mode we rely on lock detection signal, and delay is just timeout
1098          * limit, so we can afford set it longer. In DVFS mode each lock inserts
1099          * specified delay, so it should be set as short as h/w allows.
1100          */
1101         clk->pll_delay = 300; /* usec */
1102         clk->na_pll_delay = 40; /* usec*/
1103
1104         clk->gpc_pll.id = GK20A_GPC_PLL;
1105         clk->gpc_pll.clk_in = clk_get_rate(ref) / KHZ;
1106
1107         safe_rate = tegra_dvfs_get_therm_safe_fmax(
1108                 clk_get_parent(clk->tegra_clk));
1109         safe_rate = safe_rate * (100 - DVFS_SAFE_MARGIN) / 100;
1110         dvfs_safe_max_freq = rate_gpu_to_gpc2clk(safe_rate);
1111         clk->gpc_pll.PL = DIV_ROUND_UP(gpc_pll_params.min_vco,
1112                                        dvfs_safe_max_freq);
1113
1114         /* Initial frequency: 1/3 VCO min (low enough to be safe at Vmin) */
1115         clk->gpc_pll.M = 1;
1116         clk->gpc_pll.N = DIV_ROUND_UP(gpc_pll_params.min_vco,
1117                                 clk->gpc_pll.clk_in);
1118         clk->gpc_pll.PL = max(clk->gpc_pll.PL, 3U);
1119         clk->gpc_pll.freq = clk->gpc_pll.clk_in * clk->gpc_pll.N;
1120         clk->gpc_pll.freq /= pl_to_div(clk->gpc_pll.PL);
1121
1122         calibrated = !clk_config_calibration_params(g);
1123 #ifdef CONFIG_TEGRA_USE_NA_GPCPLL
1124         if (ALLOW_NON_CALIBRATED_NA_MODE || calibrated) {
1125                 /* NA mode is supported only at max update rate 38.4 MHz */
1126                 if (clk->gpc_pll.clk_in == gpc_pll_params.max_u) {
1127                         clk->gpc_pll.mode = GPC_PLL_MODE_DVFS;
1128                         gpc_pll_params.min_u = gpc_pll_params.max_u;
1129                 }
1130         }
1131 #endif
1132
1133         mutex_init(&clk->clk_mutex);
1134
1135         clk->sw_ready = true;
1136
1137         gk20a_dbg_fn("done");
1138         pr_info("GM20b GPCPLL initial settings:%s M=%u, N=%u, P=%u\n",
1139                 clk->gpc_pll.mode == GPC_PLL_MODE_DVFS ? " NA mode," : "",
1140                 clk->gpc_pll.M, clk->gpc_pll.N, clk->gpc_pll.PL);
1141         return 0;
1142 }
1143
1144 static int gm20b_init_clk_setup_hw(struct gk20a *g)
1145 {
1146         u32 data;
1147
1148         gk20a_dbg_fn("");
1149
1150         /* LDIV: Div4 mode (required); both  bypass and vco ratios 1:1 */
1151         data = gk20a_readl(g, trim_sys_gpc2clk_out_r());
1152         data = set_field(data,
1153                         trim_sys_gpc2clk_out_sdiv14_m() |
1154                         trim_sys_gpc2clk_out_vcodiv_m() |
1155                         trim_sys_gpc2clk_out_bypdiv_m(),
1156                         trim_sys_gpc2clk_out_sdiv14_indiv4_mode_f() |
1157                         trim_sys_gpc2clk_out_vcodiv_by1_f() |
1158                         trim_sys_gpc2clk_out_bypdiv_f(0));
1159         gk20a_writel(g, trim_sys_gpc2clk_out_r(), data);
1160
1161         /*
1162          * Clear global bypass control; PLL is still under bypass, since SEL_VCO
1163          * is cleared by default.
1164          */
1165         data = gk20a_readl(g, trim_sys_bypassctrl_r());
1166         data = set_field(data, trim_sys_bypassctrl_gpcpll_m(),
1167                          trim_sys_bypassctrl_gpcpll_vco_f());
1168         gk20a_writel(g, trim_sys_bypassctrl_r(), data);
1169
1170         /* Disable idle slow down */
1171         data = gk20a_readl(g, therm_clk_slowdown_r(0));
1172         data = set_field(data, therm_clk_slowdown_idle_factor_m(),
1173                          therm_clk_slowdown_idle_factor_disabled_f());
1174         gk20a_writel(g, therm_clk_slowdown_r(0), data);
1175         gk20a_readl(g, therm_clk_slowdown_r(0));
1176
1177         if (g->clk.gpc_pll.mode == GPC_PLL_MODE_DVFS)
1178                 return clk_enbale_pll_dvfs(g);
1179
1180         return 0;
1181 }
1182
1183 static int set_pll_target(struct gk20a *g, u32 freq, u32 old_freq)
1184 {
1185         struct clk_gk20a *clk = &g->clk;
1186
1187         if (freq > gpc_pll_params.max_freq)
1188                 freq = gpc_pll_params.max_freq;
1189         else if (freq < gpc_pll_params.min_freq)
1190                 freq = gpc_pll_params.min_freq;
1191
1192         if (freq != old_freq) {
1193                 /* gpc_pll.freq is changed to new value here */
1194                 if (clk_config_pll(clk, &clk->gpc_pll, &gpc_pll_params,
1195                                    &freq, true)) {
1196                         gk20a_err(dev_from_gk20a(g),
1197                                    "failed to set pll target for %d", freq);
1198                         return -EINVAL;
1199                 }
1200         }
1201         return 0;
1202 }
1203
1204 static int set_pll_freq(struct gk20a *g, int allow_slide)
1205 {
1206         struct clk_gk20a *clk = &g->clk;
1207         int err = 0;
1208
1209         gk20a_dbg_fn("last freq: %dMHz, target freq %dMHz",
1210                      clk->gpc_pll_last.freq, clk->gpc_pll.freq);
1211
1212         /* If programming with dynamic sliding failed, re-try under bypass */
1213         if (clk->gpc_pll.mode == GPC_PLL_MODE_DVFS) {
1214                 err = clk_program_na_gpc_pll(g, &clk->gpc_pll, allow_slide);
1215                 if (err && allow_slide)
1216                         err = clk_program_na_gpc_pll(g, &clk->gpc_pll, 0);
1217         } else {
1218                 err = clk_program_gpc_pll(g, &clk->gpc_pll, allow_slide);
1219                 if (err && allow_slide)
1220                         err = clk_program_gpc_pll(g, &clk->gpc_pll, 0);
1221         }
1222
1223         if (!err) {
1224                 clk->gpc_pll.enabled = true;
1225                 clk->gpc_pll_last = clk->gpc_pll;
1226                 return 0;
1227         }
1228
1229         /*
1230          * Just report error but not restore PLL since dvfs could already change
1231          * voltage even when programming failed.
1232          */
1233         gk20a_err(dev_from_gk20a(g), "failed to set pll to %d",
1234                   clk->gpc_pll.freq);
1235         return err;
1236 }
1237
1238 static int gm20b_clk_export_set_rate(void *data, unsigned long *rate)
1239 {
1240         u32 old_freq;
1241         int ret = -ENODATA;
1242         struct gk20a *g = data;
1243         struct clk_gk20a *clk = &g->clk;
1244
1245         if (rate) {
1246                 mutex_lock(&clk->clk_mutex);
1247                 old_freq = clk->gpc_pll.freq;
1248                 ret = set_pll_target(g, rate_gpu_to_gpc2clk(*rate), old_freq);
1249                 if (!ret && clk->gpc_pll.enabled && clk->clk_hw_on)
1250                         ret = set_pll_freq(g, 1);
1251                 if (!ret)
1252                         *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq);
1253                 mutex_unlock(&clk->clk_mutex);
1254         }
1255         return ret;
1256 }
1257
1258 static int gm20b_clk_export_enable(void *data)
1259 {
1260         int ret = 0;
1261         struct gk20a *g = data;
1262         struct clk_gk20a *clk = &g->clk;
1263
1264         mutex_lock(&clk->clk_mutex);
1265         if (!clk->gpc_pll.enabled && clk->clk_hw_on)
1266                 ret = set_pll_freq(g, 1);
1267         mutex_unlock(&clk->clk_mutex);
1268         return ret;
1269 }
1270
1271 static void gm20b_clk_export_disable(void *data)
1272 {
1273         struct gk20a *g = data;
1274         struct clk_gk20a *clk = &g->clk;
1275
1276         mutex_lock(&clk->clk_mutex);
1277         if (clk->gpc_pll.enabled && clk->clk_hw_on)
1278                 clk_disable_gpcpll(g, 1);
1279         mutex_unlock(&clk->clk_mutex);
1280 }
1281
1282 static void gm20b_clk_export_init(void *data, unsigned long *rate, bool *state)
1283 {
1284         struct gk20a *g = data;
1285         struct clk_gk20a *clk = &g->clk;
1286
1287         mutex_lock(&clk->clk_mutex);
1288         if (state)
1289                 *state = clk->gpc_pll.enabled;
1290         if (rate)
1291                 *rate = rate_gpc2clk_to_gpu(clk->gpc_pll.freq);
1292         mutex_unlock(&clk->clk_mutex);
1293 }
1294
1295 static struct tegra_clk_export_ops gm20b_clk_export_ops = {
1296         .init = gm20b_clk_export_init,
1297         .enable = gm20b_clk_export_enable,
1298         .disable = gm20b_clk_export_disable,
1299         .set_rate = gm20b_clk_export_set_rate,
1300 };
1301
1302 static int gm20b_clk_register_export_ops(struct gk20a *g)
1303 {
1304         int ret;
1305         struct clk *c;
1306
1307         if (gm20b_clk_export_ops.data)
1308                 return 0;
1309
1310         gm20b_clk_export_ops.data = (void *)g;
1311         c = g->clk.tegra_clk;
1312         if (!c || !clk_get_parent(c))
1313                 return -ENOSYS;
1314
1315         ret = tegra_clk_register_export_ops(clk_get_parent(c),
1316                                             &gm20b_clk_export_ops);
1317
1318         return ret;
1319 }
1320
1321 static int gm20b_init_clk_support(struct gk20a *g)
1322 {
1323         struct clk_gk20a *clk = &g->clk;
1324         u32 err;
1325
1326         gk20a_dbg_fn("");
1327
1328         clk->g = g;
1329
1330         err = gm20b_init_clk_reset_enable_hw(g);
1331         if (err)
1332                 return err;
1333
1334         err = gm20b_init_clk_setup_sw(g);
1335         if (err)
1336                 return err;
1337
1338         mutex_lock(&clk->clk_mutex);
1339         clk->clk_hw_on = true;
1340
1341         err = gm20b_init_clk_setup_hw(g);
1342         mutex_unlock(&clk->clk_mutex);
1343         if (err)
1344                 return err;
1345
1346         err = gm20b_clk_register_export_ops(g);
1347         if (err)
1348                 return err;
1349
1350         /* FIXME: this effectively prevents host level clock gating */
1351         err = clk_enable(g->clk.tegra_clk);
1352         if (err)
1353                 return err;
1354
1355         /* The prev call may not enable PLL if gbus is unbalanced - force it */
1356         mutex_lock(&clk->clk_mutex);
1357         if (!clk->gpc_pll.enabled)
1358                 err = set_pll_freq(g, 1);
1359         mutex_unlock(&clk->clk_mutex);
1360         if (err)
1361                 return err;
1362
1363 #ifdef CONFIG_DEBUG_FS
1364         if (!clk->debugfs_set) {
1365                 if (!clk_gm20b_debugfs_init(g))
1366                         clk->debugfs_set = true;
1367         }
1368 #endif
1369         return err;
1370 }
1371
1372 static int gm20b_suspend_clk_support(struct gk20a *g)
1373 {
1374         int ret = 0;
1375
1376         clk_disable(g->clk.tegra_clk);
1377
1378         /* The prev call may not disable PLL if gbus is unbalanced - force it */
1379         mutex_lock(&g->clk.clk_mutex);
1380         if (g->clk.gpc_pll.enabled)
1381                 ret = clk_disable_gpcpll(g, 1);
1382         g->clk.clk_hw_on = false;
1383         mutex_unlock(&g->clk.clk_mutex);
1384         return ret;
1385 }
1386
1387 void gm20b_init_clk_ops(struct gpu_ops *gops)
1388 {
1389         gops->clk.init_clk_support = gm20b_init_clk_support;
1390         gops->clk.suspend_clk_support = gm20b_suspend_clk_support;
1391 }
1392
1393 #ifdef CONFIG_DEBUG_FS
1394
1395 static int rate_get(void *data, u64 *val)
1396 {
1397         struct gk20a *g = (struct gk20a *)data;
1398         *val = (u64)gk20a_clk_get_rate(g);
1399         return 0;
1400 }
1401 static int rate_set(void *data, u64 val)
1402 {
1403         struct gk20a *g = (struct gk20a *)data;
1404         return gk20a_clk_set_rate(g, (u32)val);
1405 }
1406 DEFINE_SIMPLE_ATTRIBUTE(rate_fops, rate_get, rate_set, "%llu\n");
1407
1408 static int pll_reg_show(struct seq_file *s, void *data)
1409 {
1410         struct gk20a *g = s->private;
1411         u32 reg, m, n, pl, f;
1412
1413         mutex_lock(&g->clk.clk_mutex);
1414         if (!g->clk.clk_hw_on) {
1415                 seq_printf(s, "gk20a powered down - no access to registers\n");
1416                 mutex_unlock(&g->clk.clk_mutex);
1417                 return 0;
1418         }
1419
1420         reg = gk20a_readl(g, trim_sys_bypassctrl_r());
1421         seq_printf(s, "bypassctrl = %s, ", reg ? "bypass" : "vco");
1422         reg = gk20a_readl(g, trim_sys_sel_vco_r());
1423         seq_printf(s, "sel_vco = %s, ", reg ? "vco" : "bypass");
1424
1425         reg = gk20a_readl(g, trim_sys_gpcpll_cfg_r());
1426         seq_printf(s, "cfg  = 0x%x : %s : %s : %s\n", reg,
1427                 trim_sys_gpcpll_cfg_enable_v(reg) ? "enabled" : "disabled",
1428                 trim_sys_gpcpll_cfg_pll_lock_v(reg) ? "locked" : "unlocked",
1429                 trim_sys_gpcpll_cfg_sync_mode_v(reg) ? "sync_on" : "sync_off");
1430
1431         reg = gk20a_readl(g, trim_sys_gpcpll_coeff_r());
1432         m = trim_sys_gpcpll_coeff_mdiv_v(reg);
1433         n = trim_sys_gpcpll_coeff_ndiv_v(reg);
1434         pl = trim_sys_gpcpll_coeff_pldiv_v(reg);
1435         f = g->clk.gpc_pll.clk_in * n / (m * pl_to_div(pl));
1436         seq_printf(s, "coef = 0x%x : m = %u : n = %u : pl = %u", reg, m, n, pl);
1437         seq_printf(s, " : pll_f(gpu_f) = %u(%u) kHz\n", f, f/2);
1438         mutex_unlock(&g->clk.clk_mutex);
1439         return 0;
1440 }
1441
1442 static int pll_reg_open(struct inode *inode, struct file *file)
1443 {
1444         return single_open(file, pll_reg_show, inode->i_private);
1445 }
1446
1447 static const struct file_operations pll_reg_fops = {
1448         .open           = pll_reg_open,
1449         .read           = seq_read,
1450         .llseek         = seq_lseek,
1451         .release        = single_release,
1452 };
1453
1454 static int pll_reg_raw_show(struct seq_file *s, void *data)
1455 {
1456         struct gk20a *g = s->private;
1457         u32 reg;
1458
1459         mutex_lock(&g->clk.clk_mutex);
1460         if (!g->clk.clk_hw_on) {
1461                 seq_puts(s, "gk20a powered down - no access to registers\n");
1462                 mutex_unlock(&g->clk.clk_mutex);
1463                 return 0;
1464         }
1465
1466         seq_puts(s, "GPCPLL REGISTERS:\n");
1467         for (reg = trim_sys_gpcpll_cfg_r(); reg <= trim_sys_gpcpll_dvfs2_r();
1468               reg += sizeof(u32))
1469                 seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg));
1470
1471         seq_puts(s, "\nGPC CLK OUT REGISTERS:\n");
1472
1473         reg = trim_sys_sel_vco_r();
1474         seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg));
1475         reg = trim_sys_gpc2clk_out_r();
1476         seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg));
1477         reg = trim_sys_bypassctrl_r();
1478         seq_printf(s, "[0x%02x] = 0x%08x\n", reg, gk20a_readl(g, reg));
1479
1480         mutex_unlock(&g->clk.clk_mutex);
1481         return 0;
1482 }
1483
1484 static int pll_reg_raw_open(struct inode *inode, struct file *file)
1485 {
1486         return single_open(file, pll_reg_raw_show, inode->i_private);
1487 }
1488
1489 static ssize_t pll_reg_raw_write(struct file *file,
1490         const char __user *userbuf, size_t count, loff_t *ppos)
1491 {
1492         struct gk20a *g = file->f_path.dentry->d_inode->i_private;
1493         char buf[80];
1494         u32 reg, val;
1495
1496         if (sizeof(buf) <= count)
1497                 return -EINVAL;
1498
1499         if (copy_from_user(buf, userbuf, count))
1500                 return -EFAULT;
1501
1502         /* terminate buffer and trim - white spaces may be appended
1503          *  at the end when invoked from shell command line */
1504         buf[count] = '\0';
1505         strim(buf);
1506
1507         if (sscanf(buf, "[0x%x] = 0x%x", &reg, &val) != 2)
1508                 return -EINVAL;
1509
1510         if (((reg < trim_sys_gpcpll_cfg_r()) ||
1511             (reg > trim_sys_gpcpll_dvfs2_r())) &&
1512             (reg != trim_sys_sel_vco_r()) &&
1513             (reg != trim_sys_gpc2clk_out_r()) &&
1514             (reg != trim_sys_bypassctrl_r()))
1515                 return -EPERM;
1516
1517         mutex_lock(&g->clk.clk_mutex);
1518         if (!g->clk.clk_hw_on) {
1519                 mutex_unlock(&g->clk.clk_mutex);
1520                 return -EBUSY;
1521         }
1522         gk20a_writel(g, reg, val);
1523         mutex_unlock(&g->clk.clk_mutex);
1524         return count;
1525 }
1526
1527 static const struct file_operations pll_reg_raw_fops = {
1528         .open           = pll_reg_raw_open,
1529         .read           = seq_read,
1530         .write          = pll_reg_raw_write,
1531         .llseek         = seq_lseek,
1532         .release        = single_release,
1533 };
1534
1535 static int monitor_get(void *data, u64 *val)
1536 {
1537         struct gk20a *g = (struct gk20a *)data;
1538         struct clk_gk20a *clk = &g->clk;
1539         u32 clk_slowdown, clk_slowdown_save;
1540         int err;
1541
1542         u32 ncycle = 800; /* count GPCCLK for ncycle of clkin */
1543         u64 freq = clk->gpc_pll.clk_in;
1544         u32 count1, count2;
1545
1546         err = gk20a_busy(g->dev);
1547         if (err)
1548                 return err;
1549
1550         mutex_lock(&g->clk.clk_mutex);
1551
1552         /* Disable clock slowdown during measurements */
1553         clk_slowdown_save = gk20a_readl(g, therm_clk_slowdown_r(0));
1554         clk_slowdown = set_field(clk_slowdown_save,
1555                                  therm_clk_slowdown_idle_factor_m(),
1556                                  therm_clk_slowdown_idle_factor_disabled_f());
1557         gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown);
1558         gk20a_readl(g, therm_clk_slowdown_r(0));
1559
1560         gk20a_writel(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0),
1561                      trim_gpc_clk_cntr_ncgpcclk_cfg_reset_asserted_f());
1562         gk20a_writel(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0),
1563                      trim_gpc_clk_cntr_ncgpcclk_cfg_enable_asserted_f() |
1564                      trim_gpc_clk_cntr_ncgpcclk_cfg_write_en_asserted_f() |
1565                      trim_gpc_clk_cntr_ncgpcclk_cfg_noofipclks_f(ncycle));
1566         /* start */
1567
1568         /* It should take less than 25us to finish 800 cycle of 38.4MHz.
1569            But longer than 100us delay is required here. */
1570         gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cfg_r(0));
1571         udelay(200);
1572
1573         count1 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0));
1574         udelay(100);
1575         count2 = gk20a_readl(g, trim_gpc_clk_cntr_ncgpcclk_cnt_r(0));
1576         freq *= trim_gpc_clk_cntr_ncgpcclk_cnt_value_v(count2);
1577         do_div(freq, ncycle);
1578         *val = freq;
1579
1580         /* Restore clock slowdown */
1581         gk20a_writel(g, therm_clk_slowdown_r(0), clk_slowdown_save);
1582         mutex_unlock(&g->clk.clk_mutex);
1583
1584         gk20a_idle(g->dev);
1585
1586         if (count1 != count2)
1587                 return -EBUSY;
1588         return 0;
1589 }
1590 DEFINE_SIMPLE_ATTRIBUTE(monitor_fops, monitor_get, NULL, "%llu\n");
1591
1592 static int clk_gm20b_debugfs_init(struct gk20a *g)
1593 {
1594         struct dentry *d;
1595         struct gk20a_platform *platform = platform_get_drvdata(g->dev);
1596
1597         d = debugfs_create_file(
1598                 "rate", S_IRUGO|S_IWUSR, platform->debugfs, g, &rate_fops);
1599         if (!d)
1600                 goto err_out;
1601
1602         d = debugfs_create_file(
1603                 "pll_reg", S_IRUGO, platform->debugfs, g, &pll_reg_fops);
1604         if (!d)
1605                 goto err_out;
1606
1607         d = debugfs_create_file("pll_reg_raw",
1608                 S_IRUGO, platform->debugfs, g, &pll_reg_raw_fops);
1609         if (!d)
1610                 goto err_out;
1611
1612         d = debugfs_create_file(
1613                 "monitor", S_IRUGO, platform->debugfs, g, &monitor_fops);
1614         if (!d)
1615                 goto err_out;
1616
1617         return 0;
1618
1619 err_out:
1620         pr_err("%s: Failed to make debugfs node\n", __func__);
1621         debugfs_remove_recursive(platform->debugfs);
1622         return -ENOMEM;
1623 }
1624
1625 #endif /* CONFIG_DEBUG_FS */