Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Ingenic SoC CGU driver |
| 3 | * |
| 4 | * Copyright (c) 2013-2015 Imagination Technologies |
Paul Burton | fb615d6 | 2017-10-25 17:04:33 -0700 | [diff] [blame] | 5 | * Author: Paul Burton <paul.burton@mips.com> |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 6 | * |
| 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License as |
| 9 | * published by the Free Software Foundation; either version 2 of |
| 10 | * the License, or (at your option) any later version. |
| 11 | * |
| 12 | * This program is distributed in the hope that it will be useful, |
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 15 | * GNU General Public License for more details. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/bitops.h> |
Stephen Boyd | e2a6570 | 2015-05-01 16:09:33 -0700 | [diff] [blame] | 19 | #include <linux/clk.h> |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 20 | #include <linux/clk-provider.h> |
| 21 | #include <linux/clkdev.h> |
| 22 | #include <linux/delay.h> |
Stephen Boyd | 62e59c4 | 2019-04-18 15:20:22 -0700 | [diff] [blame^] | 23 | #include <linux/io.h> |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 24 | #include <linux/math64.h> |
| 25 | #include <linux/of.h> |
| 26 | #include <linux/of_address.h> |
| 27 | #include <linux/slab.h> |
| 28 | #include <linux/spinlock.h> |
| 29 | #include "cgu.h" |
| 30 | |
| 31 | #define MHZ (1000 * 1000) |
| 32 | |
| 33 | /** |
| 34 | * ingenic_cgu_gate_get() - get the value of clock gate register bit |
| 35 | * @cgu: reference to the CGU whose registers should be read |
| 36 | * @info: info struct describing the gate bit |
| 37 | * |
| 38 | * Retrieves the state of the clock gate bit described by info. The |
| 39 | * caller must hold cgu->lock. |
| 40 | * |
| 41 | * Return: true if the gate bit is set, else false. |
| 42 | */ |
| 43 | static inline bool |
| 44 | ingenic_cgu_gate_get(struct ingenic_cgu *cgu, |
| 45 | const struct ingenic_cgu_gate_info *info) |
| 46 | { |
Paul Cercueil | 7ef3844 | 2018-05-20 16:31:12 +0000 | [diff] [blame] | 47 | return !!(readl(cgu->base + info->reg) & BIT(info->bit)) |
| 48 | ^ info->clear_to_gate; |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 49 | } |
| 50 | |
| 51 | /** |
| 52 | * ingenic_cgu_gate_set() - set the value of clock gate register bit |
| 53 | * @cgu: reference to the CGU whose registers should be modified |
| 54 | * @info: info struct describing the gate bit |
| 55 | * @val: non-zero to gate a clock, otherwise zero |
| 56 | * |
| 57 | * Sets the given gate bit in order to gate or ungate a clock. |
| 58 | * |
| 59 | * The caller must hold cgu->lock. |
| 60 | */ |
| 61 | static inline void |
| 62 | ingenic_cgu_gate_set(struct ingenic_cgu *cgu, |
| 63 | const struct ingenic_cgu_gate_info *info, bool val) |
| 64 | { |
| 65 | u32 clkgr = readl(cgu->base + info->reg); |
| 66 | |
Paul Cercueil | 7ef3844 | 2018-05-20 16:31:12 +0000 | [diff] [blame] | 67 | if (val ^ info->clear_to_gate) |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 68 | clkgr |= BIT(info->bit); |
| 69 | else |
| 70 | clkgr &= ~BIT(info->bit); |
| 71 | |
| 72 | writel(clkgr, cgu->base + info->reg); |
| 73 | } |
| 74 | |
| 75 | /* |
| 76 | * PLL operations |
| 77 | */ |
| 78 | |
| 79 | static unsigned long |
| 80 | ingenic_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) |
| 81 | { |
| 82 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 83 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 84 | const struct ingenic_cgu_clk_info *clk_info; |
| 85 | const struct ingenic_cgu_pll_info *pll_info; |
| 86 | unsigned m, n, od_enc, od; |
YueHaibing | 635bd69b | 2019-02-26 01:57:32 +0000 | [diff] [blame] | 87 | bool bypass; |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 88 | unsigned long flags; |
| 89 | u32 ctl; |
| 90 | |
| 91 | clk_info = &cgu->clock_info[ingenic_clk->idx]; |
| 92 | BUG_ON(clk_info->type != CGU_CLK_PLL); |
| 93 | pll_info = &clk_info->pll; |
| 94 | |
| 95 | spin_lock_irqsave(&cgu->lock, flags); |
| 96 | ctl = readl(cgu->base + pll_info->reg); |
| 97 | spin_unlock_irqrestore(&cgu->lock, flags); |
| 98 | |
| 99 | m = (ctl >> pll_info->m_shift) & GENMASK(pll_info->m_bits - 1, 0); |
| 100 | m += pll_info->m_offset; |
| 101 | n = (ctl >> pll_info->n_shift) & GENMASK(pll_info->n_bits - 1, 0); |
| 102 | n += pll_info->n_offset; |
| 103 | od_enc = ctl >> pll_info->od_shift; |
| 104 | od_enc &= GENMASK(pll_info->od_bits - 1, 0); |
Paul Cercueil | 268db07 | 2018-01-16 16:47:53 +0100 | [diff] [blame] | 105 | bypass = !pll_info->no_bypass_bit && |
| 106 | !!(ctl & BIT(pll_info->bypass_bit)); |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 107 | |
| 108 | if (bypass) |
| 109 | return parent_rate; |
| 110 | |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 111 | for (od = 0; od < pll_info->od_max; od++) { |
| 112 | if (pll_info->od_encoding[od] == od_enc) |
| 113 | break; |
| 114 | } |
| 115 | BUG_ON(od == pll_info->od_max); |
| 116 | od++; |
| 117 | |
| 118 | return div_u64((u64)parent_rate * m, n * od); |
| 119 | } |
| 120 | |
| 121 | static unsigned long |
| 122 | ingenic_pll_calc(const struct ingenic_cgu_clk_info *clk_info, |
| 123 | unsigned long rate, unsigned long parent_rate, |
| 124 | unsigned *pm, unsigned *pn, unsigned *pod) |
| 125 | { |
| 126 | const struct ingenic_cgu_pll_info *pll_info; |
| 127 | unsigned m, n, od; |
| 128 | |
| 129 | pll_info = &clk_info->pll; |
| 130 | od = 1; |
| 131 | |
| 132 | /* |
| 133 | * The frequency after the input divider must be between 10 and 50 MHz. |
| 134 | * The highest divider yields the best resolution. |
| 135 | */ |
| 136 | n = parent_rate / (10 * MHZ); |
| 137 | n = min_t(unsigned, n, 1 << clk_info->pll.n_bits); |
| 138 | n = max_t(unsigned, n, pll_info->n_offset); |
| 139 | |
| 140 | m = (rate / MHZ) * od * n / (parent_rate / MHZ); |
| 141 | m = min_t(unsigned, m, 1 << clk_info->pll.m_bits); |
| 142 | m = max_t(unsigned, m, pll_info->m_offset); |
| 143 | |
| 144 | if (pm) |
| 145 | *pm = m; |
| 146 | if (pn) |
| 147 | *pn = n; |
| 148 | if (pod) |
| 149 | *pod = od; |
| 150 | |
| 151 | return div_u64((u64)parent_rate * m, n * od); |
| 152 | } |
| 153 | |
Paul Cercueil | ab27eb4 | 2018-01-16 16:47:54 +0100 | [diff] [blame] | 154 | static inline const struct ingenic_cgu_clk_info *to_clk_info( |
| 155 | struct ingenic_clk *ingenic_clk) |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 156 | { |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 157 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 158 | const struct ingenic_cgu_clk_info *clk_info; |
| 159 | |
| 160 | clk_info = &cgu->clock_info[ingenic_clk->idx]; |
| 161 | BUG_ON(clk_info->type != CGU_CLK_PLL); |
| 162 | |
Paul Cercueil | ab27eb4 | 2018-01-16 16:47:54 +0100 | [diff] [blame] | 163 | return clk_info; |
| 164 | } |
| 165 | |
| 166 | static long |
| 167 | ingenic_pll_round_rate(struct clk_hw *hw, unsigned long req_rate, |
| 168 | unsigned long *prate) |
| 169 | { |
| 170 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 171 | const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk); |
| 172 | |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 173 | return ingenic_pll_calc(clk_info, req_rate, *prate, NULL, NULL, NULL); |
| 174 | } |
| 175 | |
| 176 | static int |
| 177 | ingenic_pll_set_rate(struct clk_hw *hw, unsigned long req_rate, |
| 178 | unsigned long parent_rate) |
| 179 | { |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 180 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 181 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
Paul Cercueil | ab27eb4 | 2018-01-16 16:47:54 +0100 | [diff] [blame] | 182 | const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk); |
| 183 | const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll; |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 184 | unsigned long rate, flags; |
Paul Cercueil | ab27eb4 | 2018-01-16 16:47:54 +0100 | [diff] [blame] | 185 | unsigned int m, n, od; |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 186 | u32 ctl; |
| 187 | |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 188 | rate = ingenic_pll_calc(clk_info, req_rate, parent_rate, |
| 189 | &m, &n, &od); |
| 190 | if (rate != req_rate) |
| 191 | pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n", |
| 192 | clk_info->name, req_rate, rate); |
| 193 | |
| 194 | spin_lock_irqsave(&cgu->lock, flags); |
| 195 | ctl = readl(cgu->base + pll_info->reg); |
| 196 | |
| 197 | ctl &= ~(GENMASK(pll_info->m_bits - 1, 0) << pll_info->m_shift); |
| 198 | ctl |= (m - pll_info->m_offset) << pll_info->m_shift; |
| 199 | |
| 200 | ctl &= ~(GENMASK(pll_info->n_bits - 1, 0) << pll_info->n_shift); |
| 201 | ctl |= (n - pll_info->n_offset) << pll_info->n_shift; |
| 202 | |
| 203 | ctl &= ~(GENMASK(pll_info->od_bits - 1, 0) << pll_info->od_shift); |
| 204 | ctl |= pll_info->od_encoding[od - 1] << pll_info->od_shift; |
| 205 | |
Paul Cercueil | ab27eb4 | 2018-01-16 16:47:54 +0100 | [diff] [blame] | 206 | writel(ctl, cgu->base + pll_info->reg); |
| 207 | spin_unlock_irqrestore(&cgu->lock, flags); |
| 208 | |
| 209 | return 0; |
| 210 | } |
| 211 | |
| 212 | static int ingenic_pll_enable(struct clk_hw *hw) |
| 213 | { |
| 214 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 215 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 216 | const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk); |
| 217 | const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll; |
| 218 | const unsigned int timeout = 100; |
| 219 | unsigned long flags; |
| 220 | unsigned int i; |
| 221 | u32 ctl; |
| 222 | |
| 223 | spin_lock_irqsave(&cgu->lock, flags); |
| 224 | ctl = readl(cgu->base + pll_info->reg); |
| 225 | |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 226 | ctl &= ~BIT(pll_info->bypass_bit); |
| 227 | ctl |= BIT(pll_info->enable_bit); |
| 228 | |
| 229 | writel(ctl, cgu->base + pll_info->reg); |
| 230 | |
| 231 | /* wait for the PLL to stabilise */ |
| 232 | for (i = 0; i < timeout; i++) { |
| 233 | ctl = readl(cgu->base + pll_info->reg); |
| 234 | if (ctl & BIT(pll_info->stable_bit)) |
| 235 | break; |
| 236 | mdelay(1); |
| 237 | } |
| 238 | |
| 239 | spin_unlock_irqrestore(&cgu->lock, flags); |
| 240 | |
| 241 | if (i == timeout) |
| 242 | return -EBUSY; |
| 243 | |
| 244 | return 0; |
| 245 | } |
| 246 | |
Paul Cercueil | ab27eb4 | 2018-01-16 16:47:54 +0100 | [diff] [blame] | 247 | static void ingenic_pll_disable(struct clk_hw *hw) |
| 248 | { |
| 249 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 250 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 251 | const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk); |
| 252 | const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll; |
| 253 | unsigned long flags; |
| 254 | u32 ctl; |
| 255 | |
| 256 | spin_lock_irqsave(&cgu->lock, flags); |
| 257 | ctl = readl(cgu->base + pll_info->reg); |
| 258 | |
| 259 | ctl &= ~BIT(pll_info->enable_bit); |
| 260 | |
| 261 | writel(ctl, cgu->base + pll_info->reg); |
| 262 | spin_unlock_irqrestore(&cgu->lock, flags); |
| 263 | } |
| 264 | |
| 265 | static int ingenic_pll_is_enabled(struct clk_hw *hw) |
| 266 | { |
| 267 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 268 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 269 | const struct ingenic_cgu_clk_info *clk_info = to_clk_info(ingenic_clk); |
| 270 | const struct ingenic_cgu_pll_info *pll_info = &clk_info->pll; |
| 271 | unsigned long flags; |
| 272 | u32 ctl; |
| 273 | |
| 274 | spin_lock_irqsave(&cgu->lock, flags); |
| 275 | ctl = readl(cgu->base + pll_info->reg); |
| 276 | spin_unlock_irqrestore(&cgu->lock, flags); |
| 277 | |
| 278 | return !!(ctl & BIT(pll_info->enable_bit)); |
| 279 | } |
| 280 | |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 281 | static const struct clk_ops ingenic_pll_ops = { |
| 282 | .recalc_rate = ingenic_pll_recalc_rate, |
| 283 | .round_rate = ingenic_pll_round_rate, |
| 284 | .set_rate = ingenic_pll_set_rate, |
Paul Cercueil | ab27eb4 | 2018-01-16 16:47:54 +0100 | [diff] [blame] | 285 | |
| 286 | .enable = ingenic_pll_enable, |
| 287 | .disable = ingenic_pll_disable, |
| 288 | .is_enabled = ingenic_pll_is_enabled, |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 289 | }; |
| 290 | |
| 291 | /* |
| 292 | * Operations for all non-PLL clocks |
| 293 | */ |
| 294 | |
| 295 | static u8 ingenic_clk_get_parent(struct clk_hw *hw) |
| 296 | { |
| 297 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 298 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 299 | const struct ingenic_cgu_clk_info *clk_info; |
| 300 | u32 reg; |
| 301 | u8 i, hw_idx, idx = 0; |
| 302 | |
| 303 | clk_info = &cgu->clock_info[ingenic_clk->idx]; |
| 304 | |
| 305 | if (clk_info->type & CGU_CLK_MUX) { |
| 306 | reg = readl(cgu->base + clk_info->mux.reg); |
| 307 | hw_idx = (reg >> clk_info->mux.shift) & |
| 308 | GENMASK(clk_info->mux.bits - 1, 0); |
| 309 | |
| 310 | /* |
| 311 | * Convert the hardware index to the parent index by skipping |
| 312 | * over any -1's in the parents array. |
| 313 | */ |
| 314 | for (i = 0; i < hw_idx; i++) { |
| 315 | if (clk_info->parents[i] != -1) |
| 316 | idx++; |
| 317 | } |
| 318 | } |
| 319 | |
| 320 | return idx; |
| 321 | } |
| 322 | |
| 323 | static int ingenic_clk_set_parent(struct clk_hw *hw, u8 idx) |
| 324 | { |
| 325 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 326 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 327 | const struct ingenic_cgu_clk_info *clk_info; |
| 328 | unsigned long flags; |
| 329 | u8 curr_idx, hw_idx, num_poss; |
| 330 | u32 reg, mask; |
| 331 | |
| 332 | clk_info = &cgu->clock_info[ingenic_clk->idx]; |
| 333 | |
| 334 | if (clk_info->type & CGU_CLK_MUX) { |
| 335 | /* |
| 336 | * Convert the parent index to the hardware index by adding |
| 337 | * 1 for any -1 in the parents array preceding the given |
| 338 | * index. That is, we want the index of idx'th entry in |
| 339 | * clk_info->parents which does not equal -1. |
| 340 | */ |
| 341 | hw_idx = curr_idx = 0; |
| 342 | num_poss = 1 << clk_info->mux.bits; |
| 343 | for (; hw_idx < num_poss; hw_idx++) { |
| 344 | if (clk_info->parents[hw_idx] == -1) |
| 345 | continue; |
| 346 | if (curr_idx == idx) |
| 347 | break; |
| 348 | curr_idx++; |
| 349 | } |
| 350 | |
| 351 | /* idx should always be a valid parent */ |
| 352 | BUG_ON(curr_idx != idx); |
| 353 | |
| 354 | mask = GENMASK(clk_info->mux.bits - 1, 0); |
| 355 | mask <<= clk_info->mux.shift; |
| 356 | |
| 357 | spin_lock_irqsave(&cgu->lock, flags); |
| 358 | |
| 359 | /* write the register */ |
| 360 | reg = readl(cgu->base + clk_info->mux.reg); |
| 361 | reg &= ~mask; |
| 362 | reg |= hw_idx << clk_info->mux.shift; |
| 363 | writel(reg, cgu->base + clk_info->mux.reg); |
| 364 | |
| 365 | spin_unlock_irqrestore(&cgu->lock, flags); |
| 366 | return 0; |
| 367 | } |
| 368 | |
| 369 | return idx ? -EINVAL : 0; |
| 370 | } |
| 371 | |
| 372 | static unsigned long |
| 373 | ingenic_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) |
| 374 | { |
| 375 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 376 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 377 | const struct ingenic_cgu_clk_info *clk_info; |
| 378 | unsigned long rate = parent_rate; |
| 379 | u32 div_reg, div; |
| 380 | |
| 381 | clk_info = &cgu->clock_info[ingenic_clk->idx]; |
| 382 | |
| 383 | if (clk_info->type & CGU_CLK_DIV) { |
| 384 | div_reg = readl(cgu->base + clk_info->div.reg); |
| 385 | div = (div_reg >> clk_info->div.shift) & |
| 386 | GENMASK(clk_info->div.bits - 1, 0); |
| 387 | div += 1; |
Harvey Hunt | 4afe2d1 | 2016-05-09 17:29:52 +0100 | [diff] [blame] | 388 | div *= clk_info->div.div; |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 389 | |
| 390 | rate /= div; |
Paul Cercueil | e6cfa643 | 2018-01-16 16:47:52 +0100 | [diff] [blame] | 391 | } else if (clk_info->type & CGU_CLK_FIXDIV) { |
| 392 | rate /= clk_info->fixdiv.div; |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 393 | } |
| 394 | |
| 395 | return rate; |
| 396 | } |
| 397 | |
| 398 | static unsigned |
| 399 | ingenic_clk_calc_div(const struct ingenic_cgu_clk_info *clk_info, |
| 400 | unsigned long parent_rate, unsigned long req_rate) |
| 401 | { |
| 402 | unsigned div; |
| 403 | |
| 404 | /* calculate the divide */ |
| 405 | div = DIV_ROUND_UP(parent_rate, req_rate); |
| 406 | |
| 407 | /* and impose hardware constraints */ |
| 408 | div = min_t(unsigned, div, 1 << clk_info->div.bits); |
| 409 | div = max_t(unsigned, div, 1); |
| 410 | |
Harvey Hunt | 4afe2d1 | 2016-05-09 17:29:52 +0100 | [diff] [blame] | 411 | /* |
| 412 | * If the divider value itself must be divided before being written to |
| 413 | * the divider register, we must ensure we don't have any bits set that |
| 414 | * would be lost as a result of doing so. |
| 415 | */ |
| 416 | div /= clk_info->div.div; |
| 417 | div *= clk_info->div.div; |
| 418 | |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 419 | return div; |
| 420 | } |
| 421 | |
| 422 | static long |
| 423 | ingenic_clk_round_rate(struct clk_hw *hw, unsigned long req_rate, |
| 424 | unsigned long *parent_rate) |
| 425 | { |
| 426 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 427 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 428 | const struct ingenic_cgu_clk_info *clk_info; |
Paul Cercueil | bc5d922 | 2019-01-27 23:09:20 -0300 | [diff] [blame] | 429 | unsigned int div = 1; |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 430 | |
| 431 | clk_info = &cgu->clock_info[ingenic_clk->idx]; |
| 432 | |
| 433 | if (clk_info->type & CGU_CLK_DIV) |
Paul Cercueil | bc5d922 | 2019-01-27 23:09:20 -0300 | [diff] [blame] | 434 | div = ingenic_clk_calc_div(clk_info, *parent_rate, req_rate); |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 435 | else if (clk_info->type & CGU_CLK_FIXDIV) |
Paul Cercueil | bc5d922 | 2019-01-27 23:09:20 -0300 | [diff] [blame] | 436 | div = clk_info->fixdiv.div; |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 437 | |
Paul Cercueil | bc5d922 | 2019-01-27 23:09:20 -0300 | [diff] [blame] | 438 | return DIV_ROUND_UP(*parent_rate, div); |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 439 | } |
| 440 | |
| 441 | static int |
| 442 | ingenic_clk_set_rate(struct clk_hw *hw, unsigned long req_rate, |
| 443 | unsigned long parent_rate) |
| 444 | { |
| 445 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 446 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 447 | const struct ingenic_cgu_clk_info *clk_info; |
| 448 | const unsigned timeout = 100; |
| 449 | unsigned long rate, flags; |
| 450 | unsigned div, i; |
| 451 | u32 reg, mask; |
| 452 | int ret = 0; |
| 453 | |
| 454 | clk_info = &cgu->clock_info[ingenic_clk->idx]; |
| 455 | |
| 456 | if (clk_info->type & CGU_CLK_DIV) { |
| 457 | div = ingenic_clk_calc_div(clk_info, parent_rate, req_rate); |
Paul Cercueil | bc5d922 | 2019-01-27 23:09:20 -0300 | [diff] [blame] | 458 | rate = DIV_ROUND_UP(parent_rate, div); |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 459 | |
| 460 | if (rate != req_rate) |
| 461 | return -EINVAL; |
| 462 | |
| 463 | spin_lock_irqsave(&cgu->lock, flags); |
| 464 | reg = readl(cgu->base + clk_info->div.reg); |
| 465 | |
| 466 | /* update the divide */ |
| 467 | mask = GENMASK(clk_info->div.bits - 1, 0); |
| 468 | reg &= ~(mask << clk_info->div.shift); |
Harvey Hunt | 4afe2d1 | 2016-05-09 17:29:52 +0100 | [diff] [blame] | 469 | reg |= ((div / clk_info->div.div) - 1) << clk_info->div.shift; |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 470 | |
| 471 | /* clear the stop bit */ |
| 472 | if (clk_info->div.stop_bit != -1) |
| 473 | reg &= ~BIT(clk_info->div.stop_bit); |
| 474 | |
| 475 | /* set the change enable bit */ |
| 476 | if (clk_info->div.ce_bit != -1) |
| 477 | reg |= BIT(clk_info->div.ce_bit); |
| 478 | |
| 479 | /* update the hardware */ |
| 480 | writel(reg, cgu->base + clk_info->div.reg); |
| 481 | |
| 482 | /* wait for the change to take effect */ |
| 483 | if (clk_info->div.busy_bit != -1) { |
| 484 | for (i = 0; i < timeout; i++) { |
| 485 | reg = readl(cgu->base + clk_info->div.reg); |
| 486 | if (!(reg & BIT(clk_info->div.busy_bit))) |
| 487 | break; |
| 488 | mdelay(1); |
| 489 | } |
| 490 | if (i == timeout) |
| 491 | ret = -EBUSY; |
| 492 | } |
| 493 | |
| 494 | spin_unlock_irqrestore(&cgu->lock, flags); |
| 495 | return ret; |
| 496 | } |
| 497 | |
| 498 | return -EINVAL; |
| 499 | } |
| 500 | |
| 501 | static int ingenic_clk_enable(struct clk_hw *hw) |
| 502 | { |
| 503 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 504 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 505 | const struct ingenic_cgu_clk_info *clk_info; |
| 506 | unsigned long flags; |
| 507 | |
| 508 | clk_info = &cgu->clock_info[ingenic_clk->idx]; |
| 509 | |
| 510 | if (clk_info->type & CGU_CLK_GATE) { |
| 511 | /* ungate the clock */ |
| 512 | spin_lock_irqsave(&cgu->lock, flags); |
| 513 | ingenic_cgu_gate_set(cgu, &clk_info->gate, false); |
| 514 | spin_unlock_irqrestore(&cgu->lock, flags); |
Paul Cercueil | 261a831 | 2018-05-20 16:31:13 +0000 | [diff] [blame] | 515 | |
| 516 | if (clk_info->gate.delay_us) |
| 517 | udelay(clk_info->gate.delay_us); |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 518 | } |
| 519 | |
| 520 | return 0; |
| 521 | } |
| 522 | |
| 523 | static void ingenic_clk_disable(struct clk_hw *hw) |
| 524 | { |
| 525 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 526 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 527 | const struct ingenic_cgu_clk_info *clk_info; |
| 528 | unsigned long flags; |
| 529 | |
| 530 | clk_info = &cgu->clock_info[ingenic_clk->idx]; |
| 531 | |
| 532 | if (clk_info->type & CGU_CLK_GATE) { |
| 533 | /* gate the clock */ |
| 534 | spin_lock_irqsave(&cgu->lock, flags); |
| 535 | ingenic_cgu_gate_set(cgu, &clk_info->gate, true); |
| 536 | spin_unlock_irqrestore(&cgu->lock, flags); |
| 537 | } |
| 538 | } |
| 539 | |
| 540 | static int ingenic_clk_is_enabled(struct clk_hw *hw) |
| 541 | { |
| 542 | struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); |
| 543 | struct ingenic_cgu *cgu = ingenic_clk->cgu; |
| 544 | const struct ingenic_cgu_clk_info *clk_info; |
| 545 | unsigned long flags; |
| 546 | int enabled = 1; |
| 547 | |
| 548 | clk_info = &cgu->clock_info[ingenic_clk->idx]; |
| 549 | |
| 550 | if (clk_info->type & CGU_CLK_GATE) { |
| 551 | spin_lock_irqsave(&cgu->lock, flags); |
| 552 | enabled = !ingenic_cgu_gate_get(cgu, &clk_info->gate); |
| 553 | spin_unlock_irqrestore(&cgu->lock, flags); |
| 554 | } |
| 555 | |
| 556 | return enabled; |
| 557 | } |
| 558 | |
| 559 | static const struct clk_ops ingenic_clk_ops = { |
| 560 | .get_parent = ingenic_clk_get_parent, |
| 561 | .set_parent = ingenic_clk_set_parent, |
| 562 | |
| 563 | .recalc_rate = ingenic_clk_recalc_rate, |
| 564 | .round_rate = ingenic_clk_round_rate, |
| 565 | .set_rate = ingenic_clk_set_rate, |
| 566 | |
| 567 | .enable = ingenic_clk_enable, |
| 568 | .disable = ingenic_clk_disable, |
| 569 | .is_enabled = ingenic_clk_is_enabled, |
| 570 | }; |
| 571 | |
| 572 | /* |
| 573 | * Setup functions. |
| 574 | */ |
| 575 | |
| 576 | static int ingenic_register_clock(struct ingenic_cgu *cgu, unsigned idx) |
| 577 | { |
| 578 | const struct ingenic_cgu_clk_info *clk_info = &cgu->clock_info[idx]; |
| 579 | struct clk_init_data clk_init; |
| 580 | struct ingenic_clk *ingenic_clk = NULL; |
| 581 | struct clk *clk, *parent; |
| 582 | const char *parent_names[4]; |
| 583 | unsigned caps, i, num_possible; |
| 584 | int err = -EINVAL; |
| 585 | |
| 586 | BUILD_BUG_ON(ARRAY_SIZE(clk_info->parents) > ARRAY_SIZE(parent_names)); |
| 587 | |
| 588 | if (clk_info->type == CGU_CLK_EXT) { |
| 589 | clk = of_clk_get_by_name(cgu->np, clk_info->name); |
| 590 | if (IS_ERR(clk)) { |
| 591 | pr_err("%s: no external clock '%s' provided\n", |
| 592 | __func__, clk_info->name); |
| 593 | err = -ENODEV; |
| 594 | goto out; |
| 595 | } |
| 596 | err = clk_register_clkdev(clk, clk_info->name, NULL); |
| 597 | if (err) { |
| 598 | clk_put(clk); |
| 599 | goto out; |
| 600 | } |
| 601 | cgu->clocks.clks[idx] = clk; |
| 602 | return 0; |
| 603 | } |
| 604 | |
| 605 | if (!clk_info->type) { |
| 606 | pr_err("%s: no clock type specified for '%s'\n", __func__, |
| 607 | clk_info->name); |
| 608 | goto out; |
| 609 | } |
| 610 | |
| 611 | ingenic_clk = kzalloc(sizeof(*ingenic_clk), GFP_KERNEL); |
| 612 | if (!ingenic_clk) { |
| 613 | err = -ENOMEM; |
| 614 | goto out; |
| 615 | } |
| 616 | |
| 617 | ingenic_clk->hw.init = &clk_init; |
| 618 | ingenic_clk->cgu = cgu; |
| 619 | ingenic_clk->idx = idx; |
| 620 | |
| 621 | clk_init.name = clk_info->name; |
| 622 | clk_init.flags = 0; |
| 623 | clk_init.parent_names = parent_names; |
| 624 | |
| 625 | caps = clk_info->type; |
| 626 | |
| 627 | if (caps & (CGU_CLK_MUX | CGU_CLK_CUSTOM)) { |
| 628 | clk_init.num_parents = 0; |
| 629 | |
| 630 | if (caps & CGU_CLK_MUX) |
| 631 | num_possible = 1 << clk_info->mux.bits; |
| 632 | else |
| 633 | num_possible = ARRAY_SIZE(clk_info->parents); |
| 634 | |
| 635 | for (i = 0; i < num_possible; i++) { |
| 636 | if (clk_info->parents[i] == -1) |
| 637 | continue; |
| 638 | |
| 639 | parent = cgu->clocks.clks[clk_info->parents[i]]; |
| 640 | parent_names[clk_init.num_parents] = |
| 641 | __clk_get_name(parent); |
| 642 | clk_init.num_parents++; |
| 643 | } |
| 644 | |
| 645 | BUG_ON(!clk_init.num_parents); |
| 646 | BUG_ON(clk_init.num_parents > ARRAY_SIZE(parent_names)); |
| 647 | } else { |
| 648 | BUG_ON(clk_info->parents[0] == -1); |
| 649 | clk_init.num_parents = 1; |
| 650 | parent = cgu->clocks.clks[clk_info->parents[0]]; |
| 651 | parent_names[0] = __clk_get_name(parent); |
| 652 | } |
| 653 | |
| 654 | if (caps & CGU_CLK_CUSTOM) { |
| 655 | clk_init.ops = clk_info->custom.clk_ops; |
| 656 | |
| 657 | caps &= ~CGU_CLK_CUSTOM; |
| 658 | |
| 659 | if (caps) { |
| 660 | pr_err("%s: custom clock may not be combined with type 0x%x\n", |
| 661 | __func__, caps); |
| 662 | goto out; |
| 663 | } |
| 664 | } else if (caps & CGU_CLK_PLL) { |
| 665 | clk_init.ops = &ingenic_pll_ops; |
Paul Cercueil | ab27eb4 | 2018-01-16 16:47:54 +0100 | [diff] [blame] | 666 | clk_init.flags |= CLK_SET_RATE_GATE; |
Paul Burton | b066303 | 2015-05-24 16:11:35 +0100 | [diff] [blame] | 667 | |
| 668 | caps &= ~CGU_CLK_PLL; |
| 669 | |
| 670 | if (caps) { |
| 671 | pr_err("%s: PLL may not be combined with type 0x%x\n", |
| 672 | __func__, caps); |
| 673 | goto out; |
| 674 | } |
| 675 | } else { |
| 676 | clk_init.ops = &ingenic_clk_ops; |
| 677 | } |
| 678 | |
| 679 | /* nothing to do for gates or fixed dividers */ |
| 680 | caps &= ~(CGU_CLK_GATE | CGU_CLK_FIXDIV); |
| 681 | |
| 682 | if (caps & CGU_CLK_MUX) { |
| 683 | if (!(caps & CGU_CLK_MUX_GLITCHFREE)) |
| 684 | clk_init.flags |= CLK_SET_PARENT_GATE; |
| 685 | |
| 686 | caps &= ~(CGU_CLK_MUX | CGU_CLK_MUX_GLITCHFREE); |
| 687 | } |
| 688 | |
| 689 | if (caps & CGU_CLK_DIV) { |
| 690 | caps &= ~CGU_CLK_DIV; |
| 691 | } else { |
| 692 | /* pass rate changes to the parent clock */ |
| 693 | clk_init.flags |= CLK_SET_RATE_PARENT; |
| 694 | } |
| 695 | |
| 696 | if (caps) { |
| 697 | pr_err("%s: unknown clock type 0x%x\n", __func__, caps); |
| 698 | goto out; |
| 699 | } |
| 700 | |
| 701 | clk = clk_register(NULL, &ingenic_clk->hw); |
| 702 | if (IS_ERR(clk)) { |
| 703 | pr_err("%s: failed to register clock '%s'\n", __func__, |
| 704 | clk_info->name); |
| 705 | err = PTR_ERR(clk); |
| 706 | goto out; |
| 707 | } |
| 708 | |
| 709 | err = clk_register_clkdev(clk, clk_info->name, NULL); |
| 710 | if (err) |
| 711 | goto out; |
| 712 | |
| 713 | cgu->clocks.clks[idx] = clk; |
| 714 | out: |
| 715 | if (err) |
| 716 | kfree(ingenic_clk); |
| 717 | return err; |
| 718 | } |
| 719 | |
| 720 | struct ingenic_cgu * |
| 721 | ingenic_cgu_new(const struct ingenic_cgu_clk_info *clock_info, |
| 722 | unsigned num_clocks, struct device_node *np) |
| 723 | { |
| 724 | struct ingenic_cgu *cgu; |
| 725 | |
| 726 | cgu = kzalloc(sizeof(*cgu), GFP_KERNEL); |
| 727 | if (!cgu) |
| 728 | goto err_out; |
| 729 | |
| 730 | cgu->base = of_iomap(np, 0); |
| 731 | if (!cgu->base) { |
| 732 | pr_err("%s: failed to map CGU registers\n", __func__); |
| 733 | goto err_out_free; |
| 734 | } |
| 735 | |
| 736 | cgu->np = np; |
| 737 | cgu->clock_info = clock_info; |
| 738 | cgu->clocks.clk_num = num_clocks; |
| 739 | |
| 740 | spin_lock_init(&cgu->lock); |
| 741 | |
| 742 | return cgu; |
| 743 | |
| 744 | err_out_free: |
| 745 | kfree(cgu); |
| 746 | err_out: |
| 747 | return NULL; |
| 748 | } |
| 749 | |
| 750 | int ingenic_cgu_register_clocks(struct ingenic_cgu *cgu) |
| 751 | { |
| 752 | unsigned i; |
| 753 | int err; |
| 754 | |
| 755 | cgu->clocks.clks = kcalloc(cgu->clocks.clk_num, sizeof(struct clk *), |
| 756 | GFP_KERNEL); |
| 757 | if (!cgu->clocks.clks) { |
| 758 | err = -ENOMEM; |
| 759 | goto err_out; |
| 760 | } |
| 761 | |
| 762 | for (i = 0; i < cgu->clocks.clk_num; i++) { |
| 763 | err = ingenic_register_clock(cgu, i); |
| 764 | if (err) |
| 765 | goto err_out_unregister; |
| 766 | } |
| 767 | |
| 768 | err = of_clk_add_provider(cgu->np, of_clk_src_onecell_get, |
| 769 | &cgu->clocks); |
| 770 | if (err) |
| 771 | goto err_out_unregister; |
| 772 | |
| 773 | return 0; |
| 774 | |
| 775 | err_out_unregister: |
| 776 | for (i = 0; i < cgu->clocks.clk_num; i++) { |
| 777 | if (!cgu->clocks.clks[i]) |
| 778 | continue; |
| 779 | if (cgu->clock_info[i].type & CGU_CLK_EXT) |
| 780 | clk_put(cgu->clocks.clks[i]); |
| 781 | else |
| 782 | clk_unregister(cgu->clocks.clks[i]); |
| 783 | } |
| 784 | kfree(cgu->clocks.clks); |
| 785 | err_out: |
| 786 | return err; |
| 787 | } |