2 * arch/sh/kernel/time_32.c
4 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
5 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
6 * Copyright (C) 2002 - 2008 Paul Mundt
7 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
9 * Some code taken from i386 version.
10 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/profile.h>
16 #include <linux/timex.h>
17 #include <linux/sched.h>
18 #include <linux/clockchips.h>
19 #include <linux/mc146818rtc.h> /* for rtc_lock */
20 #include <linux/smp.h>
21 #include <asm/clock.h>
23 #include <asm/timer.h>
26 struct sys_timer *sys_timer;
28 /* Move this somewhere more sensible.. */
29 DEFINE_SPINLOCK(rtc_lock);
30 EXPORT_SYMBOL(rtc_lock);
33 static void null_rtc_get_time(struct timespec *tv)
35 tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0);
39 static int null_rtc_set_time(const time_t secs)
45 * Null high precision timer functions for systems lacking one.
47 static cycle_t null_hpt_read(void)
52 void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
53 int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
55 #ifndef CONFIG_GENERIC_TIME
56 void do_gettimeofday(struct timeval *tv)
60 unsigned long usec, sec;
64 * Turn off IRQs when grabbing xtime_lock, so that
65 * the sys_timer get_offset code doesn't have to handle it.
67 seq = read_seqbegin_irqsave(&xtime_lock, flags);
68 usec = get_timer_offset();
70 usec += xtime.tv_nsec / NSEC_PER_USEC;
71 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
73 while (usec >= 1000000) {
81 EXPORT_SYMBOL(do_gettimeofday);
83 int do_settimeofday(struct timespec *tv)
85 time_t wtm_sec, sec = tv->tv_sec;
86 long wtm_nsec, nsec = tv->tv_nsec;
88 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
91 write_seqlock_irq(&xtime_lock);
93 * This is revolting. We need to set "xtime" correctly. However, the
94 * value in this location is the value at the most recent update of
95 * wall time. Discover what correction gettimeofday() would have
96 * made, and then undo it!
98 nsec -= get_timer_offset() * NSEC_PER_USEC;
100 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
101 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
103 set_normalized_timespec(&xtime, sec, nsec);
104 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
107 write_sequnlock_irq(&xtime_lock);
112 EXPORT_SYMBOL(do_settimeofday);
113 #endif /* !CONFIG_GENERIC_TIME */
115 #ifndef CONFIG_GENERIC_CLOCKEVENTS
116 /* last time the RTC clock got updated */
117 static long last_rtc_update;
120 * handle_timer_tick() needs to keep up the real-time clock,
121 * as well as call the "do_timer()" routine every clocktick
123 void handle_timer_tick(void)
126 profile_tick(CPU_PROFILING);
129 * Here we are in the timer irq handler. We just have irqs locally
130 * disabled but we don't know if the timer_bh is running on the other
131 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
132 * the irq version of write_lock because as just said we have irq
133 * locally disabled. -arca
135 write_seqlock(&xtime_lock);
139 * If we have an externally synchronized Linux clock, then update
140 * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
141 * called as close as possible to 500 ms before the new second starts.
144 xtime.tv_sec > last_rtc_update + 660 &&
145 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
146 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
147 if (rtc_sh_set_time(xtime.tv_sec) == 0)
148 last_rtc_update = xtime.tv_sec;
150 /* do it again in 60s */
151 last_rtc_update = xtime.tv_sec - 600;
153 write_sequnlock(&xtime_lock);
156 update_process_times(user_mode(get_irq_regs()));
159 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
162 int timer_suspend(struct sys_device *dev, pm_message_t state)
164 struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
166 sys_timer->ops->stop();
171 int timer_resume(struct sys_device *dev)
173 struct sys_timer *sys_timer = container_of(dev, struct sys_timer, dev);
175 sys_timer->ops->start();
180 #define timer_suspend NULL
181 #define timer_resume NULL
184 static struct sysdev_class timer_sysclass = {
186 .suspend = timer_suspend,
187 .resume = timer_resume,
190 static int __init timer_init_sysfs(void)
192 int ret = sysdev_class_register(&timer_sysclass);
196 sys_timer->dev.cls = &timer_sysclass;
197 return sysdev_register(&sys_timer->dev);
199 device_initcall(timer_init_sysfs);
201 void (*board_time_init)(void);
204 * Shamelessly based on the MIPS and Sparc64 work.
206 static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
207 unsigned long sh_hpt_frequency = 0;
209 #define NSEC_PER_CYC_SHIFT 10
211 static struct clocksource clocksource_sh = {
214 .mask = CLOCKSOURCE_MASK(32),
215 .read = null_hpt_read,
217 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
220 static void __init init_sh_clocksource(void)
222 if (!sh_hpt_frequency || clocksource_sh.read == null_hpt_read)
225 clocksource_sh.mult = clocksource_hz2mult(sh_hpt_frequency,
226 clocksource_sh.shift);
228 timer_ticks_per_nsec_quotient =
229 clocksource_hz2mult(sh_hpt_frequency, NSEC_PER_CYC_SHIFT);
231 clocksource_register(&clocksource_sh);
234 #ifdef CONFIG_GENERIC_TIME
235 unsigned long long sched_clock(void)
237 unsigned long long ticks = clocksource_sh.read();
238 return (ticks * timer_ticks_per_nsec_quotient) >> NSEC_PER_CYC_SHIFT;
242 void __init time_init(void)
249 rtc_sh_get_time(&xtime);
250 set_normalized_timespec(&wall_to_monotonic,
251 -xtime.tv_sec, -xtime.tv_nsec);
253 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
254 local_timer_setup(smp_processor_id());
258 * Find the timer to use as the system timer, it will be
259 * initialized for us.
261 sys_timer = get_sys_timer();
262 printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
265 if (sys_timer->ops->read)
266 clocksource_sh.read = sys_timer->ops->read;
268 init_sh_clocksource();
270 if (sh_hpt_frequency)
271 printk("Using %lu.%03lu MHz high precision timer.\n",
272 ((sh_hpt_frequency + 500) / 1000) / 1000,
273 ((sh_hpt_frequency + 500) / 1000) % 1000);