2 * Clock manipulation routines for Freescale STMP37XX/STMP378X
4 * Author: Vitaly Wool <vital@embeddedalley.com>
6 * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
7 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
11 * The code contained herein is licensed under the GNU General Public
12 * License. You may obtain a copy of the GNU General Public License
13 * Version 2 or later at the following locations:
15 * http://www.opensource.org/licenses/gpl-license.html
16 * http://www.gnu.org/copyleft/gpl.html
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/clk.h>
22 #include <linux/spinlock.h>
23 #include <linux/errno.h>
24 #include <linux/err.h>
25 #include <linux/delay.h>
28 #include <asm/mach-types.h>
29 #include <asm/clkdev.h>
30 #include <mach/regs-clkctrl.h>
34 static DEFINE_SPINLOCK(clocks_lock);
36 static struct clk osc_24M;
37 static struct clk pll_clk;
38 static struct clk cpu_clk;
39 static struct clk hclk;
41 static int propagate_rate(struct clk *);
43 static inline int clk_is_busy(struct clk *clk)
45 return __raw_readl(clk->busy_reg) & (1 << clk->busy_bit);
48 static inline int clk_good(struct clk *clk)
50 return clk && !IS_ERR(clk) && clk->ops;
53 static int std_clk_enable(struct clk *clk)
55 if (clk->enable_reg) {
56 u32 clk_reg = __raw_readl(clk->enable_reg);
57 if (clk->enable_negate)
58 clk_reg &= ~(1 << clk->enable_shift);
60 clk_reg |= (1 << clk->enable_shift);
61 __raw_writel(clk_reg, clk->enable_reg);
63 udelay(clk->enable_wait);
69 static int std_clk_disable(struct clk *clk)
71 if (clk->enable_reg) {
72 u32 clk_reg = __raw_readl(clk->enable_reg);
73 if (clk->enable_negate)
74 clk_reg |= (1 << clk->enable_shift);
76 clk_reg &= ~(1 << clk->enable_shift);
77 __raw_writel(clk_reg, clk->enable_reg);
83 static int io_set_rate(struct clk *clk, u32 rate)
85 u32 reg_frac, clkctrl_frac;
86 int i, ret = 0, mask = 0x1f;
88 clkctrl_frac = (clk->parent->rate * 18 + rate - 1) / rate;
90 if (clkctrl_frac < 18 || clkctrl_frac > 35) {
95 reg_frac = __raw_readl(clk->scale_reg);
96 reg_frac &= ~(mask << clk->scale_shift);
97 __raw_writel(reg_frac | (clkctrl_frac << clk->scale_shift),
100 for (i = 10000; i; i--)
101 if (!clk_is_busy(clk))
112 static long io_get_rate(struct clk *clk)
114 long rate = clk->parent->rate * 18;
117 rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask;
123 static long per_get_rate(struct clk *clk)
125 long rate = clk->parent->rate;
127 const int mask = 0xff;
129 if (clk->enable_reg &&
130 !(__raw_readl(clk->enable_reg) & clk->enable_shift))
133 div = (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask;
142 static int per_set_rate(struct clk *clk, u32 rate)
145 int div = (clk->parent->rate + rate - 1) / rate;
147 const int mask = 0xff;
151 if (div == 0 || div > mask)
154 reg_frac = __raw_readl(clk->scale_reg);
155 reg_frac &= ~(mask << clk->scale_shift);
158 __raw_writel(reg_frac | (div << clk->scale_shift),
162 for (i = 10000; i; i--)
163 if (!clk_is_busy(clk))
177 printk(KERN_ERR "%s: error %d\n", __func__, ret);
181 static long lcdif_get_rate(struct clk *clk)
183 long rate = clk->parent->rate;
185 const int mask = 0xff;
187 div = (__raw_readl(clk->scale_reg) >> clk->scale_shift) & mask;
190 div = (HW_CLKCTRL_FRAC_RD() & BM_CLKCTRL_FRAC_PIXFRAC) >>
191 BP_CLKCTRL_FRAC_PIXFRAC;
199 static int lcdif_set_rate(struct clk *clk, u32 rate)
203 * On 3700, we can get most timings exact by modifying ref_pix
204 * and the divider, but keeping the phase timings at 1 (2
207 * ref_pix can be between 480e6*18/35=246.9MHz and 480e6*18/18=480MHz,
208 * which is between 18/(18*480e6)=2.084ns and 35/(18*480e6)=4.050ns.
210 * ns_cycle >= 2*18e3/(18*480) = 25/6
211 * ns_cycle <= 2*35e3/(18*480) = 875/108
213 * Multiply the ns_cycle by 'div' to lengthen it until it fits the
214 * bounds. This is the divider we'll use after ref_pix.
216 * 6 * ns_cycle >= 25 * div
217 * 108 * ns_cycle <= 875 * div
219 u32 ns_cycle = 1000000 / rate;
221 u32 lowest_result = (u32) -1;
222 u32 lowest_div = 0, lowest_fracdiv = 0;
224 for (div = 1; div < 256; ++div) {
227 int lower_bound = 6 * ns_cycle >= 25 * div;
228 int upper_bound = 108 * ns_cycle <= 875 * div;
234 * Found a matching div. Calculate fractional divider needed,
237 fracdiv = ((clk->parent->rate / 1000 * 18 / 2) *
238 ns_cycle + 1000 * div - 1) /
240 if (fracdiv < 18 || fracdiv > 35) {
244 /* Calculate the actual cycle time this results in */
245 ps_result = 6250 * div * fracdiv / 27;
247 /* Use the fastest result that doesn't break ns_cycle */
248 if (ps_result <= lowest_result) {
249 lowest_result = ps_result;
251 lowest_fracdiv = fracdiv;
255 if (div >= 256 || lowest_result == (u32) -1) {
259 pr_debug("Programming PFD=%u,DIV=%u ref_pix=%uMHz "
260 "PIXCLK=%uMHz cycle=%u.%03uns\n",
261 lowest_fracdiv, lowest_div,
262 480*18/lowest_fracdiv, 480*18/lowest_fracdiv/lowest_div,
263 lowest_result / 1000, lowest_result % 1000);
265 /* Program ref_pix phase fractional divider */
266 HW_CLKCTRL_FRAC_WR((HW_CLKCTRL_FRAC_RD() & ~BM_CLKCTRL_FRAC_PIXFRAC) |
267 BF_CLKCTRL_FRAC_PIXFRAC(lowest_fracdiv));
269 HW_CLKCTRL_FRAC_CLR(BM_CLKCTRL_FRAC_CLKGATEPIX);
271 /* Program pix divider */
272 reg_val = __raw_readl(clk->scale_reg);
273 reg_val &= ~(BM_CLKCTRL_PIX_DIV | BM_CLKCTRL_PIX_CLKGATE);
274 reg_val |= BF_CLKCTRL_PIX_DIV(lowest_div);
275 __raw_writel(reg_val, clk->scale_reg);
277 /* Wait for divider update */
280 for (i = 10000; i; i--)
281 if (!clk_is_busy(clk))
289 /* Switch to ref_pix source */
290 HW_CLKCTRL_CLKSEQ_CLR(BM_CLKCTRL_CLKSEQ_BYPASS_PIX);
297 static int cpu_set_rate(struct clk *clk, u32 rate)
301 else if (rate == 24000) {
302 /* switch to the 24M source */
303 clk_set_parent(clk, &osc_24M);
308 u32 clkctrl_frac = 1;
310 for ( ; c < 0x40; c++) {
311 u32 f = (pll_clk.rate*18/c + rate/2) / rate;
314 if (f < 18 || f > 35)
316 s1 = pll_clk.rate*18/clkctrl_frac/clkctrl_cpu - rate;
317 s2 = pll_clk.rate*18/c/f - rate;
318 pr_debug("%s: s1 %d, s2 %d\n", __func__, s1, s2);
319 if (abs(s1) > abs(s2)) {
326 pr_debug("%s: clkctrl_cpu %d, clkctrl_frac %d\n", __func__,
327 clkctrl_cpu, clkctrl_frac);
329 int d = pll_clk.rate*18/clkctrl_frac/clkctrl_cpu -
332 clkctrl_frac < 18 || clkctrl_frac > 35)
337 val = __raw_readl(clk->scale_reg);
338 val &= ~(0x3f << clk->scale_shift);
340 clk_set_parent(clk, &osc_24M);
342 __raw_writel(val, clk->scale_reg);
344 __raw_writel(1<<7, clk->scale_reg + 8);
345 /* write clkctrl_cpu */
346 clk->saved_div = clkctrl_cpu;
347 HW_CLKCTRL_CPU_WR((HW_CLKCTRL_CPU_RD() & ~0x3f) | clkctrl_cpu);
348 for (i = 10000; i; i--)
349 if (!clk_is_busy(clk))
352 printk(KERN_ERR "couldn't set up CPU divisor\n");
355 clk_set_parent(clk, &pll_clk);
362 static long cpu_get_rate(struct clk *clk)
364 long rate = clk->parent->rate * 18;
366 rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & 0x3f;
367 rate /= HW_CLKCTRL_CPU_RD() & 0x3f;
368 rate = ((rate + 9) / 10) * 10;
374 static long cpu_round_rate(struct clk *clk, u32 rate)
385 (pll_clk.rate*18 / clkctrl_cpu + rate/2) / rate;
386 if (clkctrl_frac > 35)
388 if (pll_clk.rate*18 / clkctrl_frac / clkctrl_cpu/10 ==
391 } while (pll_clk.rate / 2 >= clkctrl_cpu++ * rate);
392 if (pll_clk.rate / 2 < (clkctrl_cpu - 1) * rate)
394 pr_debug("%s: clkctrl_cpu %d, clkctrl_frac %d\n", __func__,
395 clkctrl_cpu, clkctrl_frac);
396 if (clkctrl_frac < 18)
398 if (clkctrl_frac > 35)
401 r = pll_clk.rate * 18;
404 r = 10 * ((r + 9) / 10);
409 static long emi_get_rate(struct clk *clk)
411 long rate = clk->parent->rate * 18;
413 rate /= (__raw_readl(clk->scale_reg) >> clk->scale_shift) & 0x3f;
414 rate /= HW_CLKCTRL_EMI_RD() & 0x3f;
420 static int clkseq_set_parent(struct clk *clk, struct clk *parent)
426 if (parent == &osc_24M)
429 if (clk->bypass_reg) {
430 u32 hbus_mask = BM_CLKCTRL_HBUS_DIV_FRAC_EN |
433 if (clk == &cpu_clk && shift == 4) {
434 u32 hbus_val = HW_CLKCTRL_HBUS_RD();
435 u32 cpu_val = HW_CLKCTRL_CPU_RD();
436 hbus_val &= ~hbus_mask;
438 clk->saved_div = cpu_val & BM_CLKCTRL_CPU_DIV_CPU;
439 cpu_val &= ~BM_CLKCTRL_CPU_DIV_CPU;
441 __raw_writel(1 << clk->bypass_shift,
442 clk->bypass_reg + shift);
443 if (machine_is_stmp378x()) {
444 HW_CLKCTRL_HBUS_WR(hbus_val);
445 HW_CLKCTRL_CPU_WR(cpu_val);
448 } else if (clk == &cpu_clk && shift == 8) {
449 u32 hbus_val = HW_CLKCTRL_HBUS_RD();
450 u32 cpu_val = HW_CLKCTRL_CPU_RD();
451 hbus_val &= ~hbus_mask;
453 cpu_val &= ~BM_CLKCTRL_CPU_DIV_CPU;
455 cpu_val |= clk->saved_div;
458 if (machine_is_stmp378x()) {
459 HW_CLKCTRL_HBUS_WR(hbus_val);
460 HW_CLKCTRL_CPU_WR(cpu_val);
463 __raw_writel(1 << clk->bypass_shift,
464 clk->bypass_reg + shift);
466 __raw_writel(1 << clk->bypass_shift,
467 clk->bypass_reg + shift);
475 static int hbus_set_rate(struct clk *clk, u32 rate)
480 struct clk *parent = clk->parent;
482 pr_debug("%s: rate %d, parent rate %d\n", __func__, rate,
485 if (rate > parent->rate)
488 if (((parent->rate + rate/2) / rate) * rate != parent->rate &&
489 parent->rate / rate < 32) {
490 pr_debug("%s: switching to fractional mode\n", __func__);
495 div = (32 * rate + parent->rate / 2) / parent->rate;
497 div = (parent->rate + rate - 1) / rate;
498 pr_debug("%s: div calculated is %d\n", __func__, div);
499 if (!div || div > 0x1f)
502 clk_set_parent(&cpu_clk, &osc_24M);
504 clkctrl_hbus = __raw_readl(clk->scale_reg);
505 clkctrl_hbus &= ~0x3f;
507 clkctrl_hbus |= (is_frac << 5);
509 __raw_writel(clkctrl_hbus, clk->scale_reg);
512 for (i = 10000; i; i--)
513 if (!clk_is_busy(clk))
516 printk(KERN_ERR "couldn't set up CPU divisor\n");
520 clk_set_parent(&cpu_clk, &pll_clk);
521 __raw_writel(clkctrl_hbus, clk->scale_reg);
526 static long hbus_get_rate(struct clk *clk)
528 long rate = clk->parent->rate;
530 if (__raw_readl(clk->scale_reg) & 0x20) {
531 rate *= __raw_readl(clk->scale_reg) & 0x1f;
534 rate /= __raw_readl(clk->scale_reg) & 0x1f;
540 static int xbus_set_rate(struct clk *clk, u32 rate)
545 pr_debug("%s: rate %d, parent rate %d\n", __func__, rate,
548 div = (clk->parent->rate + rate - 1) / rate;
549 pr_debug("%s: div calculated is %d\n", __func__, div);
550 if (!div || div > 0x3ff)
553 clkctrl_xbus = __raw_readl(clk->scale_reg);
554 clkctrl_xbus &= ~0x3ff;
556 __raw_writel(clkctrl_xbus, clk->scale_reg);
559 for (i = 10000; i; i--)
560 if (!clk_is_busy(clk))
563 printk(KERN_ERR "couldn't set up xbus divisor\n");
570 static long xbus_get_rate(struct clk *clk)
572 long rate = clk->parent->rate;
574 rate /= __raw_readl(clk->scale_reg) & 0x3ff;
583 static struct clk_ops std_ops = {
584 .enable = std_clk_enable,
585 .disable = std_clk_disable,
586 .get_rate = per_get_rate,
587 .set_rate = per_set_rate,
588 .set_parent = clkseq_set_parent,
591 static struct clk_ops min_ops = {
592 .enable = std_clk_enable,
593 .disable = std_clk_disable,
596 static struct clk_ops cpu_ops = {
597 .enable = std_clk_enable,
598 .disable = std_clk_disable,
599 .get_rate = cpu_get_rate,
600 .set_rate = cpu_set_rate,
601 .round_rate = cpu_round_rate,
602 .set_parent = clkseq_set_parent,
605 static struct clk_ops io_ops = {
606 .enable = std_clk_enable,
607 .disable = std_clk_disable,
608 .get_rate = io_get_rate,
609 .set_rate = io_set_rate,
612 static struct clk_ops hbus_ops = {
613 .get_rate = hbus_get_rate,
614 .set_rate = hbus_set_rate,
617 static struct clk_ops xbus_ops = {
618 .get_rate = xbus_get_rate,
619 .set_rate = xbus_set_rate,
622 static struct clk_ops lcdif_ops = {
623 .enable = std_clk_enable,
624 .disable = std_clk_disable,
625 .get_rate = lcdif_get_rate,
626 .set_rate = lcdif_set_rate,
627 .set_parent = clkseq_set_parent,
630 static struct clk_ops emi_ops = {
631 .get_rate = emi_get_rate,
634 /* List of on-chip clocks */
636 static struct clk osc_24M = {
637 .flags = FIXED_RATE | ENABLED,
641 static struct clk pll_clk = {
643 .enable_reg = HW_CLKCTRL_PLLCTRL0_ADDR,
646 .flags = FIXED_RATE | ENABLED,
651 static struct clk cpu_clk = {
653 .scale_reg = HW_CLKCTRL_FRAC_ADDR,
655 .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR,
657 .busy_reg = HW_CLKCTRL_CPU_ADDR,
659 .flags = RATE_PROPAGATES | ENABLED,
663 static struct clk io_clk = {
665 .enable_reg = HW_CLKCTRL_FRAC_ADDR,
668 .scale_reg = HW_CLKCTRL_FRAC_ADDR,
670 .flags = RATE_PROPAGATES | ENABLED,
674 static struct clk hclk = {
676 .scale_reg = HW_CLKCTRL_HBUS_ADDR,
677 .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR,
679 .busy_reg = HW_CLKCTRL_HBUS_ADDR,
681 .flags = RATE_PROPAGATES | ENABLED,
685 static struct clk xclk = {
687 .scale_reg = HW_CLKCTRL_XBUS_ADDR,
688 .busy_reg = HW_CLKCTRL_XBUS_ADDR,
690 .flags = RATE_PROPAGATES | ENABLED,
694 static struct clk uart_clk = {
696 .enable_reg = HW_CLKCTRL_XTAL_ADDR,
703 static struct clk audio_clk = {
705 .enable_reg = HW_CLKCTRL_XTAL_ADDR,
711 static struct clk pwm_clk = {
713 .enable_reg = HW_CLKCTRL_XTAL_ADDR,
719 static struct clk dri_clk = {
721 .enable_reg = HW_CLKCTRL_XTAL_ADDR,
727 static struct clk digctl_clk = {
729 .enable_reg = HW_CLKCTRL_XTAL_ADDR,
735 static struct clk timer_clk = {
737 .enable_reg = HW_CLKCTRL_XTAL_ADDR,
744 static struct clk lcdif_clk = {
746 .scale_reg = HW_CLKCTRL_PIX_ADDR,
747 .busy_reg = HW_CLKCTRL_PIX_ADDR,
749 .enable_reg = HW_CLKCTRL_PIX_ADDR,
752 .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR,
754 .flags = NEEDS_SET_PARENT,
758 static struct clk ssp_clk = {
760 .scale_reg = HW_CLKCTRL_SSP_ADDR,
761 .busy_reg = HW_CLKCTRL_SSP_ADDR,
763 .enable_reg = HW_CLKCTRL_SSP_ADDR,
765 .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR,
768 .flags = NEEDS_SET_PARENT,
772 static struct clk gpmi_clk = {
774 .scale_reg = HW_CLKCTRL_GPMI_ADDR,
775 .busy_reg = HW_CLKCTRL_GPMI_ADDR,
777 .enable_reg = HW_CLKCTRL_GPMI_ADDR,
780 .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR,
782 .flags = NEEDS_SET_PARENT,
786 static struct clk spdif_clk = {
788 .enable_reg = HW_CLKCTRL_SPDIF_ADDR,
794 static struct clk emi_clk = {
796 .enable_reg = HW_CLKCTRL_EMI_ADDR,
799 .scale_reg = HW_CLKCTRL_FRAC_ADDR,
801 .busy_reg = HW_CLKCTRL_EMI_ADDR,
803 .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR,
809 static struct clk ir_clk = {
811 .enable_reg = HW_CLKCTRL_IR_ADDR,
814 .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR,
819 static struct clk saif_clk = {
821 .scale_reg = HW_CLKCTRL_SAIF_ADDR,
822 .busy_reg = HW_CLKCTRL_SAIF_ADDR,
824 .enable_reg = HW_CLKCTRL_SAIF_ADDR,
827 .bypass_reg = HW_CLKCTRL_CLKSEQ_ADDR,
832 static struct clk usb_clk = {
834 .enable_reg = HW_CLKCTRL_PLLCTRL0_ADDR,
840 /* list of all the clocks */
841 static __initdata struct clk_lookup onchip_clks[] = {
905 static int __init propagate_rate(struct clk *clk)
907 struct clk_lookup *cl;
909 for (cl = onchip_clks; cl < onchip_clks + ARRAY_SIZE(onchip_clks);
911 if (unlikely(!clk_good(cl->clk)))
913 if (cl->clk->parent == clk && cl->clk->ops->get_rate) {
914 cl->clk->ops->get_rate(cl->clk);
915 if (cl->clk->flags & RATE_PROPAGATES)
916 propagate_rate(cl->clk);
924 unsigned long clk_get_rate(struct clk *clk)
926 if (unlikely(!clk_good(clk)))
932 if (clk->ops->get_rate != NULL)
933 return clk->ops->get_rate(clk);
935 return clk_get_rate(clk->parent);
937 EXPORT_SYMBOL(clk_get_rate);
939 long clk_round_rate(struct clk *clk, unsigned long rate)
941 if (unlikely(!clk_good(clk)))
944 if (clk->ops->round_rate)
945 return clk->ops->round_rate(clk, rate);
949 EXPORT_SYMBOL(clk_round_rate);
951 static inline int close_enough(long rate1, long rate2)
953 return rate1 && !((rate2 - rate1) * 1000 / rate1);
956 int clk_set_rate(struct clk *clk, unsigned long rate)
960 if (unlikely(!clk_good(clk)))
963 if (clk->flags & FIXED_RATE || !clk->ops->set_rate)
966 else if (!close_enough(clk->rate, rate)) {
967 ret = clk->ops->set_rate(clk, rate);
971 if (clk->flags & RATE_PROPAGATES)
979 EXPORT_SYMBOL(clk_set_rate);
981 int clk_enable(struct clk *clk)
983 unsigned long clocks_flags;
985 if (unlikely(!clk_good(clk)))
989 clk_enable(clk->parent);
991 spin_lock_irqsave(&clocks_lock, clocks_flags);
994 if (clk->ops && clk->ops->enable)
995 clk->ops->enable(clk);
997 spin_unlock_irqrestore(&clocks_lock, clocks_flags);
1000 EXPORT_SYMBOL(clk_enable);
1002 static void local_clk_disable(struct clk *clk)
1004 if (unlikely(!clk_good(clk)))
1007 if (clk->usage == 0 && clk->ops->disable)
1008 clk->ops->disable(clk);
1011 local_clk_disable(clk->parent);
1014 void clk_disable(struct clk *clk)
1016 unsigned long clocks_flags;
1018 if (unlikely(!clk_good(clk)))
1021 spin_lock_irqsave(&clocks_lock, clocks_flags);
1023 if ((--clk->usage) == 0 && clk->ops->disable)
1024 clk->ops->disable(clk);
1026 spin_unlock_irqrestore(&clocks_lock, clocks_flags);
1028 clk_disable(clk->parent);
1030 EXPORT_SYMBOL(clk_disable);
1032 /* Some additional API */
1033 int clk_set_parent(struct clk *clk, struct clk *parent)
1036 unsigned long clocks_flags;
1038 if (unlikely(!clk_good(clk)))
1041 if (!clk->ops->set_parent)
1044 spin_lock_irqsave(&clocks_lock, clocks_flags);
1046 ret = clk->ops->set_parent(clk, parent);
1048 /* disable if usage count is 0 */
1049 local_clk_disable(parent);
1051 parent->usage += clk->usage;
1052 clk->parent->usage -= clk->usage;
1054 /* disable if new usage count is 0 */
1055 local_clk_disable(clk->parent);
1057 clk->parent = parent;
1059 spin_unlock_irqrestore(&clocks_lock, clocks_flags);
1064 EXPORT_SYMBOL(clk_set_parent);
1066 struct clk *clk_get_parent(struct clk *clk)
1068 if (unlikely(!clk_good(clk)))
1072 EXPORT_SYMBOL(clk_get_parent);
1074 static int __init clk_init(void)
1076 struct clk_lookup *cl;
1077 struct clk_ops *ops;
1079 spin_lock_init(&clocks_lock);
1081 for (cl = onchip_clks; cl < onchip_clks + ARRAY_SIZE(onchip_clks);
1083 if (cl->clk->flags & ENABLED)
1084 clk_enable(cl->clk);
1086 local_clk_disable(cl->clk);
1090 if ((cl->clk->flags & NEEDS_INITIALIZATION) &&
1091 ops && ops->set_rate)
1092 ops->set_rate(cl->clk, cl->clk->rate);
1094 if (cl->clk->flags & FIXED_RATE) {
1095 if (cl->clk->flags & RATE_PROPAGATES)
1096 propagate_rate(cl->clk);
1098 if (ops && ops->get_rate)
1099 ops->get_rate(cl->clk);
1102 if (cl->clk->flags & NEEDS_SET_PARENT) {
1103 if (ops && ops->set_parent)
1104 ops->set_parent(cl->clk, cl->clk->parent);
1112 arch_initcall(clk_init);