2 * arch/blackfin/kernel/time.c
4 * This file contains the Blackfin-specific time handling details.
5 * Most of the stuff is located in the machine specific files.
7 * Copyright 2004-2008 Analog Devices Inc.
8 * Licensed under the GPL-2 or later.
11 #include <linux/module.h>
12 #include <linux/profile.h>
13 #include <linux/interrupt.h>
14 #include <linux/time.h>
15 #include <linux/irq.h>
16 #include <linux/delay.h>
18 #include <asm/blackfin.h>
20 #include <asm/gptimers.h>
22 /* This is an NTP setting */
23 #define TICK_SIZE (tick_nsec / 1000)
25 static struct irqaction bfin_timer_irq = {
26 .name = "Blackfin Timer Tick",
27 #ifdef CONFIG_IRQ_PER_CPU
28 .flags = IRQF_DISABLED | IRQF_PERCPU,
30 .flags = IRQF_DISABLED
34 #ifdef CONFIG_TICK_SOURCE_SYSTMR0
35 void __init setup_system_timer0(void)
37 /* Power down the core timer, just to play safe. */
40 disable_gptimers(TIMER0bit);
41 set_gptimer_status(0, TIMER_STATUS_TRUN0);
42 while (get_gptimer_status(0) & TIMER_STATUS_TRUN0)
45 set_gptimer_config(0, 0x59); /* IRQ enable, periodic, PWM_OUT, SCLKed, OUT PAD disabled */
46 set_gptimer_period(TIMER0_id, get_sclk() / HZ);
47 set_gptimer_pwidth(TIMER0_id, 1);
49 enable_gptimers(TIMER0bit);
52 void __init setup_core_timer(void)
56 /* power up the timer, but don't enable it just yet */
60 /* the TSCALE prescaler counter */
61 bfin_write_TSCALE(TIME_SCALE - 1);
63 tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
64 bfin_write_TPERIOD(tcount);
65 bfin_write_TCOUNT(tcount);
67 /* now enable the timer */
75 time_sched_init(irqreturn_t(*timer_routine) (int, void *))
77 #ifdef CONFIG_TICK_SOURCE_SYSTMR0
78 setup_system_timer0();
79 bfin_timer_irq.handler = timer_routine;
80 setup_irq(IRQ_TIMER0, &bfin_timer_irq);
83 bfin_timer_irq.handler = timer_routine;
84 setup_irq(IRQ_CORETMR, &bfin_timer_irq);
89 * Should return useconds since last timer tick
91 #ifndef CONFIG_GENERIC_TIME
92 static unsigned long gettimeoffset(void)
95 unsigned long clocks_per_jiffy;
97 #ifdef CONFIG_TICK_SOURCE_SYSTMR0
98 clocks_per_jiffy = bfin_read_TIMER0_PERIOD();
99 offset = bfin_read_TIMER0_COUNTER() / \
100 (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
102 if ((get_gptimer_status(0) & TIMER_STATUS_TIMIL0) && offset < (100000 / HZ / 2))
103 offset += (USEC_PER_SEC / HZ);
105 clocks_per_jiffy = bfin_read_TPERIOD();
106 offset = (clocks_per_jiffy - bfin_read_TCOUNT()) / \
107 (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
109 /* Check if we just wrapped the counters and maybe missed a tick */
110 if ((bfin_read_ILAT() & (1 << IRQ_CORETMR))
111 && (offset < (100000 / HZ / 2)))
112 offset += (USEC_PER_SEC / HZ);
118 static inline int set_rtc_mmss(unsigned long nowtime)
124 * timer_interrupt() needs to keep up the real-time clock,
125 * as well as call the "do_timer()" routine every clocktick
127 #ifdef CONFIG_CORE_TIMER_IRQ_L1
128 __attribute__((l1_text))
130 irqreturn_t timer_interrupt(int irq, void *dummy)
132 /* last time the cmos clock got updated */
133 static long last_rtc_update;
135 write_seqlock(&xtime_lock);
136 #ifdef CONFIG_TICK_SOURCE_SYSTMR0
137 if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) {
142 * If we have an externally synchronized Linux clock, then update
143 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
144 * called as close as possible to 500 ms before the new second starts.
147 xtime.tv_sec > last_rtc_update + 660 &&
148 (xtime.tv_nsec / NSEC_PER_USEC) >=
149 500000 - ((unsigned)TICK_SIZE) / 2
150 && (xtime.tv_nsec / NSEC_PER_USEC) <=
151 500000 + ((unsigned)TICK_SIZE) / 2) {
152 if (set_rtc_mmss(xtime.tv_sec) == 0)
153 last_rtc_update = xtime.tv_sec;
155 /* Do it again in 60s. */
156 last_rtc_update = xtime.tv_sec - 600;
158 #ifdef CONFIG_TICK_SOURCE_SYSTMR0
159 set_gptimer_status(0, TIMER_STATUS_TIMIL0);
162 write_sequnlock(&xtime_lock);
164 update_process_times(user_mode(get_irq_regs()));
165 profile_tick(CPU_PROFILING);
170 void __init time_init(void)
172 time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */
174 #ifdef CONFIG_RTC_DRV_BFIN
175 /* [#2663] hack to filter junk RTC values that would cause
176 * userspace to have to deal with time values greater than
177 * 2^31 seconds (which uClibc cannot cope with yet)
179 if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) {
180 printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n");
181 bfin_write_RTC_STAT(0);
185 /* Initialize xtime. From now on, xtime is updated with timer interrupts */
186 xtime.tv_sec = secs_since_1970;
189 wall_to_monotonic.tv_sec = -xtime.tv_sec;
191 time_sched_init(timer_interrupt);
194 #ifndef CONFIG_GENERIC_TIME
195 void do_gettimeofday(struct timeval *tv)
199 unsigned long usec, sec;
202 seq = read_seqbegin_irqsave(&xtime_lock, flags);
203 usec = gettimeoffset();
205 usec += (xtime.tv_nsec / NSEC_PER_USEC);
207 while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
209 while (usec >= USEC_PER_SEC) {
210 usec -= USEC_PER_SEC;
217 EXPORT_SYMBOL(do_gettimeofday);
219 int do_settimeofday(struct timespec *tv)
221 time_t wtm_sec, sec = tv->tv_sec;
222 long wtm_nsec, nsec = tv->tv_nsec;
224 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
227 write_seqlock_irq(&xtime_lock);
229 * This is revolting. We need to set the xtime.tv_usec
230 * correctly. However, the value in this location is
231 * is value at the last tick.
232 * Discover what correction gettimeofday
233 * would have done, and then undo it!
235 nsec -= (gettimeoffset() * NSEC_PER_USEC);
237 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
238 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
240 set_normalized_timespec(&xtime, sec, nsec);
241 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
245 write_sequnlock_irq(&xtime_lock);
250 EXPORT_SYMBOL(do_settimeofday);
251 #endif /* !CONFIG_GENERIC_TIME */
254 * Scheduler clock - returns current time in nanosec units.
256 unsigned long long sched_clock(void)
258 return (unsigned long long)jiffies *(NSEC_PER_SEC / HZ);