2 * arch/sh/kernel/cpu/clock.c - SuperH clock framework
4 * Copyright (C) 2005 - 2009 Paul Mundt
6 * This clock framework is derived from the OMAP version by:
8 * Copyright (C) 2004 - 2008 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
17 #include <linux/kernel.h>
18 #include <linux/init.h>
19 #include <linux/module.h>
20 #include <linux/mutex.h>
21 #include <linux/list.h>
22 #include <linux/kobject.h>
23 #include <linux/sysdev.h>
24 #include <linux/seq_file.h>
25 #include <linux/err.h>
26 #include <linux/platform_device.h>
27 #include <linux/proc_fs.h>
28 #include <asm/clock.h>
29 #include <asm/timer.h>
31 static LIST_HEAD(clock_list);
32 static DEFINE_SPINLOCK(clock_lock);
33 static DEFINE_MUTEX(clock_list_sem);
36 * Each subtype is expected to define the init routines for these clocks,
37 * as each subtype (or processor family) will have these clocks at the
38 * very least. These are all provided through the CPG, which even some of
39 * the more quirky parts (such as ST40, SH4-202, etc.) still have.
41 * The processor-specific code is expected to register any additional
42 * clock sources that are of interest.
44 static struct clk master_clk = {
46 .flags = CLK_ALWAYS_ENABLED,
47 .rate = CONFIG_SH_PCLK_FREQ,
50 static struct clk module_clk = {
52 .parent = &master_clk,
53 .flags = CLK_ALWAYS_ENABLED,
56 static struct clk bus_clk = {
58 .parent = &master_clk,
59 .flags = CLK_ALWAYS_ENABLED,
62 static struct clk cpu_clk = {
64 .parent = &master_clk,
65 .flags = CLK_ALWAYS_ENABLED,
69 * The ordering of these clocks matters, do not change it.
71 static struct clk *onchip_clocks[] = {
78 /* Used for clocks that always have same value as the parent clock */
79 unsigned long followparent_recalc(struct clk *clk)
81 return clk->parent->rate;
84 /* Propagate rate to children */
85 void propagate_rate(struct clk *tclk)
89 list_for_each_entry(clkp, &tclk->children, sibling) {
90 if (clkp->ops->recalc)
91 clkp->rate = clkp->ops->recalc(clkp);
96 static void __clk_init(struct clk *clk)
99 * See if this is the first time we're enabling the clock, some
100 * clocks that are always enabled still require "special"
101 * initialization. This is especially true if the clock mode
102 * changes and the clock needs to hunt for the proper set of
103 * divisors to use before it can effectively recalc.
106 if (clk->flags & CLK_NEEDS_INIT) {
107 if (clk->ops && clk->ops->init)
110 clk->flags &= ~CLK_NEEDS_INIT;
114 static int __clk_enable(struct clk *clk)
121 /* nothing to do if always enabled */
122 if (clk->flags & CLK_ALWAYS_ENABLED)
125 if (clk->usecount == 1) {
128 __clk_enable(clk->parent);
130 if (clk->ops && clk->ops->enable)
131 clk->ops->enable(clk);
137 int clk_enable(struct clk *clk)
142 spin_lock_irqsave(&clock_lock, flags);
143 ret = __clk_enable(clk);
144 spin_unlock_irqrestore(&clock_lock, flags);
148 EXPORT_SYMBOL_GPL(clk_enable);
150 static void __clk_disable(struct clk *clk)
157 WARN_ON(clk->usecount < 0);
159 if (clk->flags & CLK_ALWAYS_ENABLED)
162 if (clk->usecount == 0) {
163 if (likely(clk->ops && clk->ops->disable))
164 clk->ops->disable(clk);
166 __clk_disable(clk->parent);
170 void clk_disable(struct clk *clk)
174 spin_lock_irqsave(&clock_lock, flags);
176 spin_unlock_irqrestore(&clock_lock, flags);
178 EXPORT_SYMBOL_GPL(clk_disable);
180 static LIST_HEAD(root_clks);
183 * recalculate_root_clocks - recalculate and propagate all root clocks
185 * Recalculates all root clocks (clocks with no parent), which if the
186 * clock's .recalc is set correctly, should also propagate their rates.
189 void recalculate_root_clocks(void)
193 list_for_each_entry(clkp, &root_clks, sibling) {
194 if (clkp->ops->recalc)
195 clkp->rate = clkp->ops->recalc(clkp);
196 propagate_rate(clkp);
200 int clk_register(struct clk *clk)
202 if (clk == NULL || IS_ERR(clk))
206 * trap out already registered clocks
208 if (clk->node.next || clk->node.prev)
211 mutex_lock(&clock_list_sem);
213 INIT_LIST_HEAD(&clk->children);
216 list_add(&clk->sibling, &clk->parent->children);
218 list_add(&clk->sibling, &root_clks);
220 list_add(&clk->node, &clock_list);
222 clk->flags |= CLK_NEEDS_INIT;
224 mutex_unlock(&clock_list_sem);
226 if (clk->flags & CLK_ALWAYS_ENABLED) {
228 pr_debug( "Clock '%s' is ALWAYS_ENABLED\n", clk->name);
229 if (clk->ops && clk->ops->enable)
230 clk->ops->enable(clk);
231 pr_debug( "Enabled.");
236 EXPORT_SYMBOL_GPL(clk_register);
238 void clk_unregister(struct clk *clk)
240 mutex_lock(&clock_list_sem);
241 list_del(&clk->sibling);
242 list_del(&clk->node);
243 mutex_unlock(&clock_list_sem);
245 EXPORT_SYMBOL_GPL(clk_unregister);
247 unsigned long clk_get_rate(struct clk *clk)
251 EXPORT_SYMBOL_GPL(clk_get_rate);
253 int clk_set_rate(struct clk *clk, unsigned long rate)
255 return clk_set_rate_ex(clk, rate, 0);
257 EXPORT_SYMBOL_GPL(clk_set_rate);
259 int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
261 int ret = -EOPNOTSUPP;
263 if (likely(clk->ops && clk->ops->set_rate)) {
266 spin_lock_irqsave(&clock_lock, flags);
267 ret = clk->ops->set_rate(clk, rate, algo_id);
269 if (clk->ops->recalc)
270 clk->rate = clk->ops->recalc(clk);
273 spin_unlock_irqrestore(&clock_lock, flags);
278 EXPORT_SYMBOL_GPL(clk_set_rate_ex);
280 void clk_recalc_rate(struct clk *clk)
284 if (!clk->ops->recalc)
287 spin_lock_irqsave(&clock_lock, flags);
288 clk->rate = clk->ops->recalc(clk);
290 spin_unlock_irqrestore(&clock_lock, flags);
292 EXPORT_SYMBOL_GPL(clk_recalc_rate);
294 int clk_set_parent(struct clk *clk, struct clk *parent)
302 spin_lock_irqsave(&clock_lock, flags);
303 if (clk->usecount == 0) {
304 if (clk->ops->set_parent)
305 ret = clk->ops->set_parent(clk, parent);
307 if (clk->ops->recalc)
308 clk->rate = clk->ops->recalc(clk);
313 spin_unlock_irqrestore(&clock_lock, flags);
317 EXPORT_SYMBOL_GPL(clk_set_parent);
319 struct clk *clk_get_parent(struct clk *clk)
323 EXPORT_SYMBOL_GPL(clk_get_parent);
325 long clk_round_rate(struct clk *clk, unsigned long rate)
327 if (likely(clk->ops && clk->ops->round_rate)) {
328 unsigned long flags, rounded;
330 spin_lock_irqsave(&clock_lock, flags);
331 rounded = clk->ops->round_rate(clk, rate);
332 spin_unlock_irqrestore(&clock_lock, flags);
337 return clk_get_rate(clk);
339 EXPORT_SYMBOL_GPL(clk_round_rate);
342 * Returns a clock. Note that we first try to use device id on the bus
343 * and clock name. If this fails, we try to use clock name only.
345 struct clk *clk_get(struct device *dev, const char *id)
347 struct clk *p, *clk = ERR_PTR(-ENOENT);
350 if (dev == NULL || dev->bus != &platform_bus_type)
353 idno = to_platform_device(dev)->id;
355 mutex_lock(&clock_list_sem);
356 list_for_each_entry(p, &clock_list, node) {
358 strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
364 list_for_each_entry(p, &clock_list, node) {
365 if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
372 mutex_unlock(&clock_list_sem);
376 EXPORT_SYMBOL_GPL(clk_get);
378 void clk_put(struct clk *clk)
380 if (clk && !IS_ERR(clk))
381 module_put(clk->owner);
383 EXPORT_SYMBOL_GPL(clk_put);
385 void __init __attribute__ ((weak))
386 arch_init_clk_ops(struct clk_ops **ops, int type)
390 int __init __attribute__ ((weak))
396 static int show_clocks(char *buf, char **start, off_t off,
397 int len, int *eof, void *data)
402 list_for_each_entry_reverse(clk, &clock_list, node) {
403 unsigned long rate = clk_get_rate(clk);
405 p += sprintf(p, "%-12s\t: %ld.%02ldMHz\t%s\n", clk->name,
406 rate / 1000000, (rate % 1000000) / 10000,
407 ((clk->flags & CLK_ALWAYS_ENABLED) ||
409 "enabled" : "disabled");
416 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
418 static pm_message_t prev_state;
421 switch (state.event) {
423 /* Resumeing from hibernation */
424 if (prev_state.event != PM_EVENT_FREEZE)
427 list_for_each_entry(clkp, &clock_list, node) {
428 if (likely(clkp->ops)) {
429 unsigned long rate = clkp->rate;
431 if (likely(clkp->ops->set_parent))
432 clkp->ops->set_parent(clkp,
434 if (likely(clkp->ops->set_rate))
435 clkp->ops->set_rate(clkp,
437 else if (likely(clkp->ops->recalc))
438 clkp->rate = clkp->ops->recalc(clkp);
442 case PM_EVENT_FREEZE:
444 case PM_EVENT_SUSPEND:
452 static int clks_sysdev_resume(struct sys_device *dev)
454 return clks_sysdev_suspend(dev, PMSG_ON);
457 static struct sysdev_class clks_sysdev_class = {
461 static struct sysdev_driver clks_sysdev_driver = {
462 .suspend = clks_sysdev_suspend,
463 .resume = clks_sysdev_resume,
466 static struct sys_device clks_sysdev_dev = {
467 .cls = &clks_sysdev_class,
470 static int __init clk_sysdev_init(void)
472 sysdev_class_register(&clks_sysdev_class);
473 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
474 sysdev_register(&clks_sysdev_dev);
478 subsys_initcall(clk_sysdev_init);
481 int __init clk_init(void)
485 BUG_ON(!master_clk.rate);
487 for (i = 0; i < ARRAY_SIZE(onchip_clocks); i++) {
488 struct clk *clk = onchip_clocks[i];
490 arch_init_clk_ops(&clk->ops, i);
491 ret |= clk_register(clk);
494 ret |= arch_clk_init();
496 /* Kick the child clocks.. */
497 recalculate_root_clocks();
502 static int __init clk_proc_init(void)
504 struct proc_dir_entry *p;
505 p = create_proc_read_entry("clocks", S_IRUSR, NULL,
512 subsys_initcall(clk_proc_init);