2 * Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
3 * Licensed under the GPL
6 #include "linux/kernel.h"
7 #include "linux/module.h"
8 #include "linux/unistd.h"
9 #include "linux/stddef.h"
10 #include "linux/spinlock.h"
11 #include "linux/time.h"
12 #include "linux/sched.h"
13 #include "linux/interrupt.h"
14 #include "linux/init.h"
15 #include "linux/delay.h"
16 #include "linux/hrtimer.h"
18 #include "asm/param.h"
19 #include "asm/current.h"
20 #include "kern_util.h"
21 #include "user_util.h"
31 * Scheduler clock - returns current time in nanosec units.
33 unsigned long long sched_clock(void)
35 return (unsigned long long)jiffies_64 * (1000000000 / HZ);
38 /* Changed at early boot */
39 int timer_irq_inited = 0;
41 static int first_tick;
42 static unsigned long long prev_nsecs;
43 #ifdef CONFIG_UML_REAL_TIME_CLOCK
44 static long long delta; /* Deviation per interval */
47 void timer_irq(union uml_pt_regs *regs)
49 unsigned long long ticks = 0;
51 if(!timer_irq_inited){
52 /* This is to ensure that ticks don't pile up when
53 * the timer handler is suspended */
59 #ifdef CONFIG_UML_REAL_TIME_CLOCK
60 /* We've had 1 tick */
61 unsigned long long nsecs = os_nsecs();
63 delta += nsecs - prev_nsecs;
66 /* Protect against the host clock being set backwards */
70 ticks += (delta * HZ) / BILLION;
71 delta -= (ticks * BILLION) / HZ;
77 prev_nsecs = os_nsecs();
82 do_IRQ(TIMER_IRQ, regs);
88 void time_init_kern(void)
90 unsigned long long nsecs;
93 set_normalized_timespec(&wall_to_monotonic, -nsecs / BILLION,
97 void do_boot_timer_handler(struct sigcontext * sc)
101 CHOOSE_MODE((void) (UPT_SC(®s.regs) = sc),
102 (void) (regs.regs.skas.is_user = 0));
106 static DEFINE_SPINLOCK(timer_spinlock);
108 static unsigned long long local_offset = 0;
110 static inline unsigned long long get_time(void)
112 unsigned long long nsecs;
115 spin_lock_irqsave(&timer_spinlock, flags);
117 nsecs += local_offset;
118 spin_unlock_irqrestore(&timer_spinlock, flags);
123 irqreturn_t um_timer(int irq, void *dev, struct pt_regs *regs)
125 unsigned long long nsecs;
130 write_seqlock_irqsave(&xtime_lock, flags);
131 nsecs = get_time() + local_offset;
132 xtime.tv_sec = nsecs / NSEC_PER_SEC;
133 xtime.tv_nsec = nsecs - xtime.tv_sec * NSEC_PER_SEC;
134 write_sequnlock_irqrestore(&xtime_lock, flags);
139 long um_time(int __user *tloc)
141 long ret = get_time() / NSEC_PER_SEC;
143 if((tloc != NULL) && put_user(ret, tloc))
149 void do_gettimeofday(struct timeval *tv)
151 unsigned long long nsecs = get_time();
153 tv->tv_sec = nsecs / NSEC_PER_SEC;
154 /* Careful about calculations here - this was originally done as
155 * (nsecs - tv->tv_sec * NSEC_PER_SEC) / NSEC_PER_USEC
156 * which gave bogus (> 1000000) values. Dunno why, suspect gcc
157 * (4.0.0) miscompiled it, or there's a subtle 64/32-bit conversion
158 * problem that I missed.
160 nsecs -= tv->tv_sec * NSEC_PER_SEC;
161 tv->tv_usec = (unsigned long) nsecs / NSEC_PER_USEC;
164 static inline void set_time(unsigned long long nsecs)
166 unsigned long long now;
169 spin_lock_irqsave(&timer_spinlock, flags);
171 local_offset = nsecs - now;
172 spin_unlock_irqrestore(&timer_spinlock, flags);
177 long um_stime(int __user *tptr)
181 if (get_user(value, tptr))
184 set_time((unsigned long long) value * NSEC_PER_SEC);
189 int do_settimeofday(struct timespec *tv)
191 set_time((unsigned long long) tv->tv_sec * NSEC_PER_SEC + tv->tv_nsec);
196 void timer_handler(int sig, union uml_pt_regs *regs)
200 update_process_times(CHOOSE_MODE(
201 (UPT_SC(regs) && user_context(UPT_SP(regs))),
202 (regs)->skas.is_user));
205 if(current_thread->cpu == 0)
209 int __init timer_init(void)
214 err = request_irq(TIMER_IRQ, um_timer, SA_INTERRUPT, "timer", NULL);
216 printk(KERN_ERR "timer_init : request_irq failed - "
217 "errno = %d\n", -err);
218 timer_irq_inited = 1;
222 arch_initcall(timer_init);