3 * Copyright (C) 2010 Google, Inc.
4 * Copyright (c) 2012 NVIDIA CORPORATION. All rights reserved.
7 * Colin Cross <ccross@google.com>
9 * Copyright (C) 2010-2012 NVIDIA Corporation
11 * This software is licensed under the terms of the GNU General Public
12 * License version 2, as published by the Free Software Foundation, and
13 * may be copied, distributed, and modified under those terms.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
22 #include <linux/kernel.h>
23 #include <linux/clk.h>
24 #include <linux/clkdev.h>
25 #include <linux/debugfs.h>
26 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/list.h>
29 #include <linux/module.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/clk/tegra.h>
33 #include <linux/uaccess.h>
34 #include <trace/events/power.h>
41 /* Global data of Tegra CPU CAR ops */
42 struct tegra_cpu_car_ops *tegra_cpu_car_ops;
44 #define DISABLE_BOOT_CLOCKS 1
49 * Each struct clk has a lock. Depending on the cansleep flag, that lock
50 * may be a spinlock or a mutex. For most clocks, the spinlock is sufficient,
51 * and using the spinlock allows the clock to be manipulated from an interrupt
52 * or while holding a spinlock. Some clocks may need to adjust a regulator
53 * in order to maintain the required voltage for a new frequency. Those
54 * clocks set the cansleep flag, and take a mutex so that the regulator api
55 * can be used while holding the lock.
57 * To avoid AB-BA locking problems, locks must always be traversed from child
58 * clock to parent clock. For example, when enabling a clock, the clock's lock
59 * is taken, and then clk_enable is called on the parent, which take's the
60 * parent clock's lock. There are two exceptions to this ordering:
61 * 1. When setting a clock as cansleep, in which case the entire list of clocks
62 * is traversed to set the children as cansleep as well. This must occur
63 * during init, before any calls to clk_get, so no other clock locks can
65 * 2. When dumping the clock tree through debugfs. In this case, clk_lock_all
66 * is called, which attemps to iterate through the entire list of clocks
67 * and take every clock lock. If any call to clk_trylock fails, a locked
68 * clocks are unlocked, and the process is retried. When all the locks
69 * are held, the only clock operation that can be called is
70 * clk_get_rate_all_locked.
72 * Within a single clock, no clock operation can call another clock operation
73 * on itself, except for clk_xxx_locked. Any clock operation can call any other
74 * clock operation on any of it's possible parents.
76 * clk_set_cansleep is used to mark a clock as sleeping. It is called during
77 * dvfs (Dynamic Voltage and Frequency Scaling) init on any clock that has a
78 * dvfs requirement, and propagated to all possible children of sleeping clock.
80 * An additional mutex, clock_list_lock, is used to protect the list of all
83 * The clock operations must lock internally to protect against
84 * read-modify-write on registers that are shared by multiple clocks
87 /* FIXME: remove and never ignore overclock */
88 #define IGNORE_PARENT_OVERCLOCK 0
90 static DEFINE_MUTEX(clock_list_lock);
91 static LIST_HEAD(clocks);
93 #ifndef CONFIG_COMMON_CLK
94 struct clk *tegra_get_clock_by_name(const char *name)
97 struct clk *ret = NULL;
98 mutex_lock(&clock_list_lock);
99 list_for_each_entry(c, &clocks, node) {
100 if (strcmp(c->name, name) == 0) {
105 mutex_unlock(&clock_list_lock);
108 EXPORT_SYMBOL(tegra_get_clock_by_name);
110 static void clk_stats_update(struct clk *c)
112 u64 cur_jiffies = get_jiffies_64();
115 c->stats.time_on = c->stats.time_on +
116 (jiffies64_to_cputime64(cur_jiffies) -
117 (c->stats.last_update));
120 c->stats.last_update = cur_jiffies;
123 /* Must be called with clk_lock(c) held */
124 static unsigned long clk_predict_rate_from_parent(struct clk *c, struct clk *p)
128 rate = clk_get_rate(p);
130 if (c->mul != 0 && c->div != 0) {
132 rate += c->div - 1; /* round up */
133 do_div(rate, c->div);
139 unsigned long clk_get_max_rate(struct clk *c)
144 unsigned long clk_get_min_rate(struct clk *c)
149 /* Must be called with clk_lock(c) held */
150 unsigned long clk_get_rate_locked(struct clk *c)
155 rate = clk_predict_rate_from_parent(c, c->parent);
162 unsigned long clk_get_rate(struct clk *c)
167 clk_lock_save(c, &flags);
169 rate = clk_get_rate_locked(c);
171 clk_unlock_restore(c, &flags);
175 EXPORT_SYMBOL(clk_get_rate);
177 static void __clk_set_cansleep(struct clk *c)
181 BUG_ON(mutex_is_locked(&c->mutex));
182 BUG_ON(spin_is_locked(&c->spinlock));
184 /* Make sure that all possible descendants of sleeping clock are
185 marked as sleeping (to eliminate "sleeping parent - non-sleeping
186 child" relationship */
187 list_for_each_entry(child, &clocks, node) {
188 bool possible_parent = (child->parent == c);
190 if (!possible_parent && child->inputs) {
191 for (i = 0; child->inputs[i].input; i++) {
192 if ((child->inputs[i].input == c) &&
193 tegra_clk_is_parent_allowed(child, c)) {
194 possible_parent = true;
201 __clk_set_cansleep(child);
207 /* Must be called before any clk_get calls */
208 void clk_set_cansleep(struct clk *c)
211 mutex_lock(&clock_list_lock);
212 __clk_set_cansleep(c);
213 mutex_unlock(&clock_list_lock);
216 int clk_reparent(struct clk *c, struct clk *parent)
222 void clk_init(struct clk *c)
226 if (c->ops && c->ops->init)
229 if (!c->ops || !c->ops->enable) {
233 c->state = c->parent->state;
237 c->stats.last_update = get_jiffies_64();
239 mutex_lock(&clock_list_lock);
240 list_add(&c->node, &clocks);
241 mutex_unlock(&clock_list_lock);
244 static int clk_enable_locked(struct clk *c)
248 if (clk_is_auto_dvfs(c)) {
249 ret = tegra_dvfs_set_rate(c, clk_get_rate_locked(c));
254 if (c->refcnt == 0) {
256 ret = clk_enable(c->parent);
261 if (c->ops && c->ops->enable) {
262 ret = c->ops->enable(c);
263 trace_clock_enable(c->name, 1, 0);
266 clk_disable(c->parent);
280 int clk_enable(struct clk *c)
285 clk_lock_save(c, &flags);
286 ret = clk_enable_locked(c);
287 clk_unlock_restore(c, &flags);
290 EXPORT_SYMBOL(clk_enable);
292 static void clk_disable_locked(struct clk *c)
294 if (c->refcnt == 0) {
295 WARN(1, "Attempting to disable clock %s with refcnt 0", c->name);
298 if (c->refcnt == 1) {
299 if (c->ops && c->ops->disable) {
300 trace_clock_disable(c->name, 0, 0);
304 clk_disable(c->parent);
311 if (clk_is_auto_dvfs(c) && c->refcnt == 0)
312 tegra_dvfs_set_rate(c, 0);
315 void clk_disable(struct clk *c)
319 clk_lock_save(c, &flags);
320 clk_disable_locked(c);
321 clk_unlock_restore(c, &flags);
323 EXPORT_SYMBOL(clk_disable);
325 static int clk_rate_change_notify(struct clk *c, unsigned long rate)
327 if (!c->rate_change_nh)
329 return raw_notifier_call_chain(c->rate_change_nh, rate, NULL);
332 int clk_set_parent_locked(struct clk *c, struct clk *parent)
335 unsigned long new_rate;
336 unsigned long old_rate;
337 bool disable = false;
339 if (!c->ops || !c->ops->set_parent) {
344 if (!tegra_clk_is_parent_allowed(c, parent)) {
349 new_rate = clk_predict_rate_from_parent(c, parent);
350 old_rate = clk_get_rate_locked(c);
352 if (new_rate > clk_get_max_rate(c)) {
354 pr_err("Failed to set parent %s for %s (violates clock limit"
355 " %lu)\n", parent->name, c->name, clk_get_max_rate(c));
356 #if !IGNORE_PARENT_OVERCLOCK
362 /* The new clock control register setting does not take effect if
363 * clock is disabled. Later, when the clock is enabled it would run
364 * for several cycles on the old parent, which may hang h/w if the
365 * parent is already disabled. To guarantee h/w switch to the new
366 * setting enable clock while setting parent.
368 if ((c->refcnt == 0) && (c->flags & MUX)) {
369 pr_debug("Setting parent of clock %s with refcnt 0\n", c->name);
370 ret = clk_enable_locked(c);
376 if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
377 (!c->parent || new_rate > old_rate)) {
378 ret = tegra_dvfs_set_rate(c, new_rate);
383 ret = c->ops->set_parent(c, parent);
387 if (clk_is_auto_dvfs(c) && c->refcnt > 0 &&
389 ret = tegra_dvfs_set_rate(c, new_rate);
391 if (new_rate != old_rate)
392 clk_rate_change_notify(c, new_rate);
396 clk_disable_locked(c);
401 int clk_set_parent(struct clk *c, struct clk *parent)
406 clk_lock_save(c, &flags);
407 ret = clk_set_parent_locked(c, parent);
408 clk_unlock_restore(c, &flags);
412 EXPORT_SYMBOL(clk_set_parent);
414 struct clk *clk_get_parent(struct clk *c)
418 EXPORT_SYMBOL(clk_get_parent);
420 int clk_set_rate_locked(struct clk *c, unsigned long rate)
423 unsigned long old_rate, max_rate;
425 bool disable = false;
427 old_rate = clk_get_rate_locked(c);
429 max_rate = clk_get_max_rate(c);
433 if (c->ops && c->ops->round_rate) {
434 new_rate = c->ops->round_rate(c, rate);
444 /* The new clock control register setting does not take effect if
445 * clock is disabled. Later, when the clock is enabled it would run
446 * for several cycles on the old rate, which may over-clock module
447 * at given voltage. To guarantee h/w switch to the new setting
448 * enable clock while setting rate.
450 if ((c->refcnt == 0) && (c->flags & (DIV_U71 | DIV_U16)) &&
451 clk_is_auto_dvfs(c)) {
452 pr_debug("Setting rate of clock %s with refcnt 0\n", c->name);
453 ret = clk_enable_locked(c);
459 if (clk_is_auto_dvfs(c) && rate > old_rate && c->refcnt > 0) {
460 ret = tegra_dvfs_set_rate(c, rate);
465 trace_clock_set_rate(c->name, rate, 0);
466 ret = c->ops->set_rate(c, rate);
470 if (clk_is_auto_dvfs(c) && rate < old_rate && c->refcnt > 0)
471 ret = tegra_dvfs_set_rate(c, rate);
473 if (rate != old_rate)
474 clk_rate_change_notify(c, rate);
478 clk_disable_locked(c);
482 int clk_set_rate(struct clk *c, unsigned long rate)
487 if (!c->ops || !c->ops->set_rate)
490 clk_lock_save(c, &flags);
492 ret = clk_set_rate_locked(c, rate);
494 clk_unlock_restore(c, &flags);
498 EXPORT_SYMBOL(clk_set_rate);
500 /* Must be called with clocks lock and all indvidual clock locks held */
501 unsigned long clk_get_rate_all_locked(struct clk *c)
510 if (c->mul != 0 && c->div != 0) {
524 long clk_round_rate_locked(struct clk *c, unsigned long rate)
526 unsigned long max_rate;
529 if (!c->ops || !c->ops->round_rate) {
534 max_rate = clk_get_max_rate(c);
538 ret = c->ops->round_rate(c, rate);
544 long clk_round_rate(struct clk *c, unsigned long rate)
549 clk_lock_save(c, &flags);
550 ret = clk_round_rate_locked(c, rate);
551 clk_unlock_restore(c, &flags);
554 EXPORT_SYMBOL(clk_round_rate);
556 static int tegra_clk_clip_rate_for_parent(struct clk *c, struct clk *p)
558 unsigned long flags, max_rate, old_rate, new_rate;
560 clk_lock_save(c, &flags);
562 max_rate = clk_get_max_rate(c);
563 new_rate = clk_predict_rate_from_parent(c, p);
564 old_rate = clk_get_rate_locked(c);
566 clk_unlock_restore(c, &flags);
568 if (new_rate > max_rate) {
571 do_div(rate, new_rate);
573 return clk_set_rate(c, (unsigned long)rate);
578 static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table)
585 c = tegra_get_clock_by_name(table->name);
588 pr_warning("Unable to initialize clock %s\n",
594 p = tegra_get_clock_by_name(table->parent);
596 pr_warning("Unable to find parent %s of clock %s\n",
597 table->parent, table->name);
601 if (c->parent != p) {
602 ret = tegra_clk_clip_rate_for_parent(c, p);
604 pr_warning("Unable to clip rate for parent %s"
605 " of clock %s: %d\n",
606 table->parent, table->name, ret);
610 ret = clk_set_parent(c, p);
612 pr_warning("Unable to set parent %s of clock %s: %d\n",
613 table->parent, table->name, ret);
619 if (table->rate && table->rate != clk_get_rate(c)) {
620 ret = clk_set_rate(c, table->rate);
622 pr_warning("Unable to set clock %s to rate %lu: %d\n",
623 table->name, table->rate, ret);
628 if (table->enabled) {
631 pr_warning("Unable to enable clock %s: %d\n",
640 void tegra_clk_init_from_table(struct tegra_clk_init_table *table)
642 for (; table->name; table++)
643 tegra_clk_init_one_from_table(table);
645 EXPORT_SYMBOL(tegra_clk_init_from_table);
647 void tegra_periph_reset_deassert(struct clk *c)
649 BUG_ON(!c->ops->reset);
650 c->ops->reset(c, false);
652 EXPORT_SYMBOL(tegra_periph_reset_deassert);
654 void tegra_periph_reset_assert(struct clk *c)
656 BUG_ON(!c->ops->reset);
657 c->ops->reset(c, true);
659 EXPORT_SYMBOL(tegra_periph_reset_assert);
661 int tegra_is_clk_enabled(struct clk *c)
665 EXPORT_SYMBOL(tegra_is_clk_enabled);
667 int tegra_clk_shared_bus_update(struct clk *c)
672 clk_lock_save(c, &flags);
674 if (c->ops && c->ops->shared_bus_update)
675 ret = c->ops->shared_bus_update(c);
677 clk_unlock_restore(c, &flags);
681 /* dvfs initialization may lower default maximum rate */
682 void __init tegra_init_max_rate(struct clk *c, unsigned long max_rate)
684 struct clk *shared_bus_user;
686 if (c->max_rate <= max_rate)
689 pr_warning("Lowering %s maximum rate from %lu to %lu\n",
690 c->name, c->max_rate, max_rate);
692 c->max_rate = max_rate;
693 list_for_each_entry(shared_bus_user,
694 &c->shared_bus_list, u.shared_bus_user.node) {
695 shared_bus_user->u.shared_bus_user.rate = max_rate;
696 shared_bus_user->max_rate = max_rate;
700 void __init tegra_common_init_clock(void)
702 tegra_init_early_timer();
705 static bool tegra_keep_boot_clocks = false;
706 static int __init tegra_keep_boot_clocks_setup(char *__unused)
708 tegra_keep_boot_clocks = true;
711 __setup("tegra_keep_boot_clocks", tegra_keep_boot_clocks_setup);
714 * Bootloader may not match kernel restrictions on CPU clock sources.
715 * Make sure CPU clock is sourced from either main or backup parent.
717 static int tegra_sync_cpu_clock(void)
721 struct clk *c = tegra_get_clock_by_name("cpu");
724 rate = clk_get_rate(c);
725 ret = clk_set_rate(c, rate);
727 pr_err("%s: Failed to sync CPU at rate %lu\n", __func__, rate);
729 pr_info("CPU rate: %lu MHz\n", clk_get_rate(c) / 1000000);
732 late_initcall(tegra_sync_cpu_clock);
735 * Iterate through all clocks, disabling any for which the refcount is 0
736 * but the clock init detected the bootloader left the clock on.
738 static int __init tegra_init_disable_boot_clocks(void)
740 #if DISABLE_BOOT_CLOCKS
744 mutex_lock(&clock_list_lock);
746 list_for_each_entry(c, &clocks, node) {
747 clk_lock_save(c, &flags);
748 if (c->refcnt == 0 && c->state == ON &&
749 c->ops && c->ops->disable) {
750 pr_warn_once("%s clocks left on by bootloader:\n",
751 tegra_keep_boot_clocks ?
752 "Prevented disabling" :
755 pr_warn(" %s\n", c->name);
757 if (!tegra_keep_boot_clocks) {
762 clk_unlock_restore(c, &flags);
765 mutex_unlock(&clock_list_lock);
769 late_initcall(tegra_init_disable_boot_clocks);
771 /* Several extended clock configuration bits (e.g., clock routing, clock
772 * phase control) are included in PLL and peripheral clock source
774 int tegra_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting)
779 spin_lock_irqsave(&c->spinlock, flags);
781 if (!c->ops || !c->ops->clk_cfg_ex) {
785 ret = c->ops->clk_cfg_ex(c, p, setting);
788 spin_unlock_irqrestore(&c->spinlock, flags);
792 int tegra_register_clk_rate_notifier(struct clk *c, struct notifier_block *nb)
797 if (!c->rate_change_nh)
800 clk_lock_save(c, &flags);
801 ret = raw_notifier_chain_register(c->rate_change_nh, nb);
802 clk_unlock_restore(c, &flags);
806 void tegra_unregister_clk_rate_notifier(
807 struct clk *c, struct notifier_block *nb)
811 if (!c->rate_change_nh)
814 clk_lock_save(c, &flags);
815 raw_notifier_chain_unregister(c->rate_change_nh, nb);
816 clk_unlock_restore(c, &flags);
819 #ifdef CONFIG_DEBUG_FS
822 * Attempt to lock all the clocks that are marked cansleep
823 * Must be called with irqs enabled
825 static int __clk_lock_all_mutexes(void)
831 list_for_each_entry(c, &clocks, node)
833 if (!mutex_trylock(&c->mutex))
839 list_for_each_entry_continue_reverse(c, &clocks, node)
841 mutex_unlock(&c->mutex);
847 * Attempt to lock all the clocks that are not marked cansleep
848 * Must be called with irqs disabled
850 static int __clk_lock_all_spinlocks(void)
854 list_for_each_entry(c, &clocks, node)
855 if (!clk_cansleep(c))
856 if (!spin_trylock(&c->spinlock))
857 goto unlock_spinlocks;
862 list_for_each_entry_continue_reverse(c, &clocks, node)
863 if (!clk_cansleep(c))
864 spin_unlock(&c->spinlock);
869 static void __clk_unlock_all_mutexes(void)
873 list_for_each_entry_reverse(c, &clocks, node)
875 mutex_unlock(&c->mutex);
878 static void __clk_unlock_all_spinlocks(void)
882 list_for_each_entry_reverse(c, &clocks, node)
883 if (!clk_cansleep(c))
884 spin_unlock(&c->spinlock);
888 * This function retries until it can take all locks, and may take
889 * an arbitrarily long time to complete.
890 * Must be called with irqs enabled, returns with irqs disabled
891 * Must be called with clock_list_lock held
893 static void clk_lock_all(void)
897 ret = __clk_lock_all_mutexes();
903 ret = __clk_lock_all_spinlocks();
905 goto failed_spinlocks;
907 /* All locks taken successfully, return */
912 __clk_unlock_all_mutexes();
919 * Unlocks all clocks after a clk_lock_all
920 * Must be called with irqs disabled, returns with irqs enabled
921 * Must be called with clock_list_lock held
923 static void clk_unlock_all(void)
925 __clk_unlock_all_spinlocks();
929 __clk_unlock_all_mutexes();
932 static struct dentry *clk_debugfs_root;
934 static void dvfs_show_one(struct seq_file *s, struct dvfs *d, int level)
936 seq_printf(s, "%*s %-*s%21s%d mV\n",
938 30 - level * 3, d->dvfs_rail->reg_id,
943 static void clock_tree_show_one(struct seq_file *s, struct clk *c, int level)
946 const char *state = "uninit";
948 unsigned long rate = clk_get_rate_all_locked(c);
949 unsigned long max_rate = clk_get_max_rate(c);;
953 else if (c->state == OFF)
956 if (c->mul != 0 && c->div != 0) {
957 if (c->mul > c->div) {
958 int mul = c->mul / c->div;
959 int mul2 = (c->mul * 10 / c->div) % 10;
960 int mul3 = (c->mul * 10) % c->div;
961 if (mul2 == 0 && mul3 == 0)
962 snprintf(div, sizeof(div), "x%d", mul);
964 snprintf(div, sizeof(div), "x%d.%d", mul, mul2);
966 snprintf(div, sizeof(div), "x%d.%d..", mul, mul2);
968 snprintf(div, sizeof(div), "%d%s", c->div / c->mul,
969 (c->div % c->mul) ? ".5" : "");
973 seq_printf(s, "%*s%c%c%-*s%c %-6s %-3d %-8s %-10lu",
975 rate > max_rate ? '!' : ' ',
977 30 - level * 3, c->name,
978 c->cansleep ? '$' : ' ',
979 state, c->refcnt, div, rate);
980 if (c->parent && !list_empty(&c->parent->shared_bus_list))
981 seq_printf(s, " (%lu)", c->u.shared_bus_user.rate);
985 dvfs_show_one(s, c->dvfs, level + 1);
987 list_for_each_entry(child, &clocks, node) {
988 if (child->parent != c)
991 clock_tree_show_one(s, child, level + 1);
995 static int clock_tree_show(struct seq_file *s, void *data)
998 seq_printf(s, " clock state ref div rate (shared rate)\n");
999 seq_printf(s, "------------------------------------------------------------------------------\n");
1001 mutex_lock(&clock_list_lock);
1005 list_for_each_entry(c, &clocks, node)
1006 if (c->parent == NULL)
1007 clock_tree_show_one(s, c, 0);
1011 mutex_unlock(&clock_list_lock);
1015 static int clock_tree_open(struct inode *inode, struct file *file)
1017 return single_open(file, clock_tree_show, inode->i_private);
1020 static const struct file_operations clock_tree_fops = {
1021 .open = clock_tree_open,
1023 .llseek = seq_lseek,
1024 .release = single_release,
1027 static void syncevent_one(struct clk *c)
1032 trace_clock_enable(c->name, 1, smp_processor_id());
1034 trace_clock_disable(c->name, 0, smp_processor_id());
1036 trace_clock_set_rate(c->name, clk_get_rate_all_locked(c),
1037 smp_processor_id());
1039 list_for_each_entry(child, &clocks, node) {
1040 if (child->parent != c)
1043 syncevent_one(child);
1047 static int syncevent_write(struct file *file, const char __user *user_buf,
1048 size_t count, loff_t *ppos)
1054 memset(buffer, 0, sizeof(buffer));
1055 buf_size = min(count, (sizeof(buffer)-1));
1057 if (copy_from_user(buffer, user_buf, buf_size))
1060 if (!strnicmp("all", buffer, 3)) {
1061 mutex_lock(&clock_list_lock);
1065 list_for_each_entry(c, &clocks, node) {
1066 if (c->parent == NULL)
1072 mutex_unlock(&clock_list_lock);
1078 static const struct file_operations syncevent_fops = {
1079 .write = syncevent_write,
1082 static int possible_parents_show(struct seq_file *s, void *data)
1084 struct clk *c = s->private;
1087 for (i = 0; c->inputs[i].input; i++) {
1088 char *first = (i == 0) ? "" : " ";
1089 seq_printf(s, "%s%s", first, c->inputs[i].input->name);
1091 seq_printf(s, "\n");
1095 static int possible_parents_open(struct inode *inode, struct file *file)
1097 return single_open(file, possible_parents_show, inode->i_private);
1100 static const struct file_operations possible_parents_fops = {
1101 .open = possible_parents_open,
1103 .llseek = seq_lseek,
1104 .release = single_release,
1107 static int parent_show(struct seq_file *s, void *data)
1109 struct clk *c = s->private;
1110 struct clk *p = clk_get_parent(c);
1112 seq_printf(s, "%s\n", p ? p->name : "clk_root");
1116 static int parent_open(struct inode *inode, struct file *file)
1118 return single_open(file, parent_show, inode->i_private);
1121 static int rate_get(void *data, u64 *val)
1123 struct clk *c = (struct clk *)data;
1124 *val = (u64)clk_get_rate(c);
1128 static int state_get(void *data, u64 *val)
1130 struct clk *c = (struct clk *)data;
1131 *val = (u64)((c->state == ON) ? 1 : 0);
1135 #ifdef CONFIG_TEGRA_CLOCK_DEBUG_WRITE
1137 static const mode_t parent_rate_mode = S_IRUGO | S_IWUSR;
1139 static ssize_t parent_write(struct file *file,
1140 const char __user *userbuf, size_t count, loff_t *ppos)
1142 struct seq_file *s = file->private_data;
1143 struct clk *c = s->private;
1144 struct clk *p = NULL;
1147 if (sizeof(buf) <= count)
1150 if (copy_from_user(buf, userbuf, count))
1153 /* terminate buffer and trim - white spaces may be appended
1154 * at the end when invoked from shell command line */
1158 p = tegra_get_clock_by_name(buf);
1162 if (clk_set_parent(c, p))
1168 static const struct file_operations parent_fops = {
1169 .open = parent_open,
1171 .write = parent_write,
1172 .llseek = seq_lseek,
1173 .release = single_release,
1176 static int rate_set(void *data, u64 val)
1178 struct clk *c = (struct clk *)data;
1179 return clk_set_rate(c, (unsigned long)val);
1181 DEFINE_SIMPLE_ATTRIBUTE(rate_fops, rate_get, rate_set, "%llu\n");
1183 static int state_set(void *data, u64 val)
1185 struct clk *c = (struct clk *)data;
1188 return clk_enable(c);
1194 DEFINE_SIMPLE_ATTRIBUTE(state_fops, state_get, state_set, "%llu\n");
1198 static const mode_t parent_rate_mode = S_IRUGO;
1200 static const struct file_operations parent_fops = {
1201 .open = parent_open,
1203 .llseek = seq_lseek,
1204 .release = single_release,
1207 DEFINE_SIMPLE_ATTRIBUTE(rate_fops, rate_get, NULL, "%llu\n");
1208 DEFINE_SIMPLE_ATTRIBUTE(state_fops, state_get, NULL, "%llu\n");
1211 static int time_on_get(void *data, u64 *val)
1213 unsigned long flags;
1214 struct clk *c = (struct clk *)data;
1216 clk_lock_save(c, &flags);
1217 clk_stats_update(c);
1218 *val = cputime64_to_clock_t(c->stats.time_on);
1219 clk_unlock_restore(c, &flags);
1223 DEFINE_SIMPLE_ATTRIBUTE(time_on_fops, time_on_get, NULL, "%llu\n");
1225 static int possible_rates_show(struct seq_file *s, void *data)
1227 struct clk *c = s->private;
1230 /* shared bus clock must round up, unless top of range reached */
1231 while (rate <= c->max_rate) {
1232 long rounded_rate = c->ops->round_rate(c, rate);
1233 if (IS_ERR_VALUE(rounded_rate) || (rounded_rate <= rate))
1236 rate = rounded_rate + 2000; /* 2kHz resolution */
1237 seq_printf(s, "%ld ", rounded_rate / 1000);
1239 seq_printf(s, "(kHz)\n");
1243 static int possible_rates_open(struct inode *inode, struct file *file)
1245 return single_open(file, possible_rates_show, inode->i_private);
1248 static const struct file_operations possible_rates_fops = {
1249 .open = possible_rates_open,
1251 .llseek = seq_lseek,
1252 .release = single_release,
1255 static int clk_debugfs_register_one(struct clk *c)
1259 d = debugfs_create_dir(c->name, clk_debugfs_root);
1264 d = debugfs_create_u8("refcnt", S_IRUGO, c->dent, (u8 *)&c->refcnt);
1268 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
1272 d = debugfs_create_u32("max", S_IRUGO, c->dent, (u32 *)&c->max_rate);
1276 d = debugfs_create_u32("min", S_IRUGO, c->dent, (u32 *)&c->min_rate);
1280 d = debugfs_create_file(
1281 "parent", parent_rate_mode, c->dent, c, &parent_fops);
1285 d = debugfs_create_file(
1286 "rate", parent_rate_mode, c->dent, c, &rate_fops);
1290 d = debugfs_create_file(
1291 "state", parent_rate_mode, c->dent, c, &state_fops);
1295 d = debugfs_create_file(
1296 "time_on", S_IRUGO, c->dent, c, &time_on_fops);
1301 d = debugfs_create_file("possible_parents", S_IRUGO, c->dent,
1302 c, &possible_parents_fops);
1307 if (c->ops && c->ops->round_rate && c->ops->shared_bus_update) {
1308 d = debugfs_create_file("possible_rates", S_IRUGO, c->dent,
1309 c, &possible_rates_fops);
1317 debugfs_remove_recursive(c->dent);
1321 static int clk_debugfs_register(struct clk *c)
1324 struct clk *pa = c->parent;
1326 if (pa && !pa->dent) {
1327 err = clk_debugfs_register(pa);
1333 err = clk_debugfs_register_one(c);
1340 int __init tegra_clk_debugfs_init(void)
1346 d = debugfs_create_dir("clock", NULL);
1349 clk_debugfs_root = d;
1351 d = debugfs_create_file("clock_tree", S_IRUGO, clk_debugfs_root, NULL,
1356 d = debugfs_create_file("syncevents", S_IRUGO|S_IWUSR, clk_debugfs_root, NULL,
1359 if (dvfs_debugfs_init(clk_debugfs_root))
1362 list_for_each_entry(c, &clocks, node) {
1363 err = clk_debugfs_register(c);
1369 debugfs_remove_recursive(clk_debugfs_root);
1375 void tegra_clk_add(struct clk *clk)
1377 struct clk_tegra *c = to_clk_tegra(__clk_get_hw(clk));
1379 mutex_lock(&clock_list_lock);
1380 list_add(&c->node, &clocks);
1381 mutex_unlock(&clock_list_lock);
1384 struct clk *tegra_get_clock_by_name(const char *name)
1386 struct clk_tegra *c;
1387 struct clk *ret = NULL;
1388 mutex_lock(&clock_list_lock);
1389 list_for_each_entry(c, &clocks, node) {
1390 if (strcmp(__clk_get_name(c->hw.clk), name) == 0) {
1395 mutex_unlock(&clock_list_lock);
1399 static int tegra_clk_init_one_from_table(struct tegra_clk_init_table *table)
1407 c = tegra_get_clock_by_name(table->name);
1410 pr_warn("Unable to initialize clock %s\n",
1415 parent = clk_get_parent(c);
1417 if (table->parent) {
1418 p = tegra_get_clock_by_name(table->parent);
1420 pr_warn("Unable to find parent %s of clock %s\n",
1421 table->parent, table->name);
1426 ret = clk_set_parent(c, p);
1428 pr_warn("Unable to set parent %s of clock %s: %d\n",
1429 table->parent, table->name, ret);
1435 if (table->rate && table->rate != clk_get_rate(c)) {
1436 ret = clk_set_rate(c, table->rate);
1438 pr_warn("Unable to set clock %s to rate %lu: %d\n",
1439 table->name, table->rate, ret);
1444 if (table->enabled) {
1445 ret = clk_prepare_enable(c);
1447 pr_warn("Unable to enable clock %s: %d\n",
1456 void tegra_clk_init_from_table(struct tegra_clk_init_table *table)
1458 for (; table->name; table++)
1459 tegra_clk_init_one_from_table(table);
1462 void tegra_periph_reset_deassert(struct clk *c)
1464 struct clk_tegra *clk = to_clk_tegra(__clk_get_hw(c));
1465 BUG_ON(!clk->reset);
1466 clk->reset(__clk_get_hw(c), false);
1468 EXPORT_SYMBOL(tegra_periph_reset_deassert);
1470 void tegra_periph_reset_assert(struct clk *c)
1472 struct clk_tegra *clk = to_clk_tegra(__clk_get_hw(c));
1473 BUG_ON(!clk->reset);
1474 clk->reset(__clk_get_hw(c), true);
1476 EXPORT_SYMBOL(tegra_periph_reset_assert);
1478 /* Several extended clock configuration bits (e.g., clock routing, clock
1479 * phase control) are included in PLL and peripheral clock source
1481 int tegra_clk_cfg_ex(struct clk *c, enum tegra_clk_ex_param p, u32 setting)
1484 struct clk_tegra *clk = to_clk_tegra(__clk_get_hw(c));
1486 if (!clk->clk_cfg_ex) {
1490 ret = clk->clk_cfg_ex(__clk_get_hw(c), p, setting);
1495 #endif /* !CONFIG_COMMON_CLK */