2 * linux/arch/x86-64/kernel/time.c
4 * "High Precision Event Timer" based timekeeping.
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/time.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/device.h>
25 #include <linux/sysdev.h>
26 #include <linux/bcd.h>
27 #include <linux/kallsyms.h>
28 #include <linux/acpi.h>
30 #include <acpi/achware.h> /* for PM timer frequency */
32 #include <asm/8253pit.h>
33 #include <asm/pgtable.h>
34 #include <asm/vsyscall.h>
35 #include <asm/timex.h>
36 #include <asm/proto.h>
38 #include <asm/sections.h>
39 #include <linux/cpufreq.h>
40 #include <linux/hpet.h>
41 #ifdef CONFIG_X86_LOCAL_APIC
45 #ifdef CONFIG_CPU_FREQ
46 static void cpufreq_delayed_get(void);
48 extern void i8254_timer_resume(void);
49 extern int using_apic_timer;
51 static char *time_init_gtod(void);
53 DEFINE_SPINLOCK(rtc_lock);
54 DEFINE_SPINLOCK(i8253_lock);
56 int nohpet __initdata = 0;
57 static int notsc __initdata = 0;
59 #undef HPET_HACK_ENABLE_DANGEROUS
61 unsigned int cpu_khz; /* TSC clocks / usec, not used here */
62 static unsigned long hpet_period; /* fsecs / HPET clock */
63 unsigned long hpet_tick; /* HPET clocks / interrupt */
64 int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
65 unsigned long vxtime_hz = PIT_TICK_RATE;
66 int report_lost_ticks; /* command line option */
67 unsigned long long monotonic_base;
69 struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
71 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
72 unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
73 struct timespec __xtime __section_xtime;
74 struct timezone __sys_tz __section_sys_tz;
77 * do_gettimeoffset() returns microseconds since last timer interrupt was
78 * triggered by hardware. A memory read of HPET is slower than a register read
79 * of TSC, but much more reliable. It's also synchronized to the timer
80 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
81 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
82 * This is not a problem, because jiffies hasn't updated either. They are bound
83 * together by xtime_lock.
86 static inline unsigned int do_gettimeoffset_tsc(void)
90 t = get_cycles_sync();
91 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
92 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
96 static inline unsigned int do_gettimeoffset_hpet(void)
98 /* cap counter read to one tick to avoid inconsistencies */
99 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
100 return (min(counter,hpet_tick) * vxtime.quot) >> 32;
103 unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
106 * This version of gettimeofday() has microsecond resolution and better than
107 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
111 void do_gettimeofday(struct timeval *tv)
113 unsigned long seq, t;
114 unsigned int sec, usec;
117 seq = read_seqbegin(&xtime_lock);
120 usec = xtime.tv_nsec / 1000;
122 /* i386 does some correction here to keep the clock
123 monotonous even when ntpd is fixing drift.
124 But they didn't work for me, there is a non monotonic
125 clock anyways with ntp.
126 I dropped all corrections now until a real solution can
127 be found. Note when you fix it here you need to do the same
128 in arch/x86_64/kernel/vsyscall.c and export all needed
129 variables in vmlinux.lds. -AK */
131 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
135 } while (read_seqretry(&xtime_lock, seq));
137 tv->tv_sec = sec + usec / 1000000;
138 tv->tv_usec = usec % 1000000;
141 EXPORT_SYMBOL(do_gettimeofday);
144 * settimeofday() first undoes the correction that gettimeofday would do
145 * on the time, and then saves it. This is ugly, but has been like this for
149 int do_settimeofday(struct timespec *tv)
151 time_t wtm_sec, sec = tv->tv_sec;
152 long wtm_nsec, nsec = tv->tv_nsec;
154 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
157 write_seqlock_irq(&xtime_lock);
159 nsec -= do_gettimeoffset() * 1000 +
160 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
162 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
163 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
165 set_normalized_timespec(&xtime, sec, nsec);
166 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
170 write_sequnlock_irq(&xtime_lock);
175 EXPORT_SYMBOL(do_settimeofday);
177 unsigned long profile_pc(struct pt_regs *regs)
179 unsigned long pc = instruction_pointer(regs);
181 /* Assume the lock function has either no stack frame or only a single word.
182 This checks if the address on the stack looks like a kernel text address.
183 There is a small window for false hits, but in that case the tick
184 is just accounted to the spinlock function.
185 Better would be to write these functions in assembler again
186 and check exactly. */
187 if (in_lock_functions(pc)) {
188 char *v = *(char **)regs->rsp;
189 if ((v >= _stext && v <= _etext) ||
190 (v >= _sinittext && v <= _einittext) ||
191 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
192 return (unsigned long)v;
193 return ((unsigned long *)regs->rsp)[1];
197 EXPORT_SYMBOL(profile_pc);
200 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
201 * ms after the second nowtime has started, because when nowtime is written
202 * into the registers of the CMOS clock, it will jump to the next second
203 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
207 static void set_rtc_mmss(unsigned long nowtime)
209 int real_seconds, real_minutes, cmos_minutes;
210 unsigned char control, freq_select;
213 * IRQs are disabled when we're called from the timer interrupt,
214 * no need for spin_lock_irqsave()
217 spin_lock(&rtc_lock);
220 * Tell the clock it's being set and stop it.
223 control = CMOS_READ(RTC_CONTROL);
224 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
226 freq_select = CMOS_READ(RTC_FREQ_SELECT);
227 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
229 cmos_minutes = CMOS_READ(RTC_MINUTES);
230 BCD_TO_BIN(cmos_minutes);
233 * since we're only adjusting minutes and seconds, don't interfere with hour
234 * overflow. This avoids messing with unknown time zones but requires your RTC
235 * not to be off by more than 15 minutes. Since we're calling it only when
236 * our clock is externally synchronized using NTP, this shouldn't be a problem.
239 real_seconds = nowtime % 60;
240 real_minutes = nowtime / 60;
241 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
242 real_minutes += 30; /* correct for half hour time zone */
246 /* AMD 8111 is a really bad time keeper and hits this regularly.
247 It probably was an attempt to avoid screwing up DST, but ignore
249 if (abs(real_minutes - cmos_minutes) >= 30) {
250 printk(KERN_WARNING "time.c: can't update CMOS clock "
251 "from %d to %d\n", cmos_minutes, real_minutes);
256 BIN_TO_BCD(real_seconds);
257 BIN_TO_BCD(real_minutes);
258 CMOS_WRITE(real_seconds, RTC_SECONDS);
259 CMOS_WRITE(real_minutes, RTC_MINUTES);
263 * The following flags have to be released exactly in this order, otherwise the
264 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
265 * not reset the oscillator and will not update precisely 500 ms later. You
266 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
267 * believes data sheets anyway ... -- Markus Kuhn
270 CMOS_WRITE(control, RTC_CONTROL);
271 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
273 spin_unlock(&rtc_lock);
277 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
278 * Note: This function is required to return accurate
279 * time even in the absence of multiple timer ticks.
281 unsigned long long monotonic_clock(void)
284 u32 last_offset, this_offset, offset;
285 unsigned long long base;
287 if (vxtime.mode == VXTIME_HPET) {
289 seq = read_seqbegin(&xtime_lock);
291 last_offset = vxtime.last;
292 base = monotonic_base;
293 this_offset = hpet_readl(HPET_COUNTER);
294 } while (read_seqretry(&xtime_lock, seq));
295 offset = (this_offset - last_offset);
296 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
297 return base + offset;
300 seq = read_seqbegin(&xtime_lock);
302 last_offset = vxtime.last_tsc;
303 base = monotonic_base;
304 } while (read_seqretry(&xtime_lock, seq));
305 this_offset = get_cycles_sync();
306 offset = (this_offset - last_offset)*1000/cpu_khz;
307 return base + offset;
310 EXPORT_SYMBOL(monotonic_clock);
312 static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
314 static long lost_count;
317 if (report_lost_ticks) {
318 printk(KERN_WARNING "time.c: Lost %d timer "
320 print_symbol("rip %s)\n", regs->rip);
323 if (lost_count == 1000 && !warned) {
325 "warning: many lost ticks.\n"
326 KERN_WARNING "Your time source seems to be instable or "
327 "some driver is hogging interupts\n");
328 print_symbol("rip %s\n", regs->rip);
329 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
330 printk(KERN_WARNING "Falling back to HPET\n");
332 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
334 vxtime.last = hpet_readl(HPET_COUNTER);
335 vxtime.mode = VXTIME_HPET;
336 do_gettimeoffset = do_gettimeoffset_hpet;
338 /* else should fall back to PIT, but code missing. */
343 #ifdef CONFIG_CPU_FREQ
344 /* In some cases the CPU can change frequency without us noticing
345 (like going into thermal throttle)
346 Give cpufreq a change to catch up. */
347 if ((lost_count+1) % 25 == 0) {
348 cpufreq_delayed_get();
353 void main_timer_handler(struct pt_regs *regs)
355 static unsigned long rtc_update = 0;
357 int delay, offset = 0, lost = 0;
360 * Here we are in the timer irq handler. We have irqs locally disabled (so we
361 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
362 * on the other CPU, so we need a lock. We also need to lock the vsyscall
363 * variables, because both do_timer() and us change them -arca+vojtech
366 write_seqlock(&xtime_lock);
368 if (vxtime.hpet_address)
369 offset = hpet_readl(HPET_COUNTER);
371 if (hpet_use_timer) {
372 /* if we're using the hpet timer functionality,
373 * we can more accurately know the counter value
374 * when the timer interrupt occured.
376 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
377 delay = hpet_readl(HPET_COUNTER) - offset;
379 spin_lock(&i8253_lock);
382 delay |= inb(0x40) << 8;
383 spin_unlock(&i8253_lock);
384 delay = LATCH - 1 - delay;
387 tsc = get_cycles_sync();
389 if (vxtime.mode == VXTIME_HPET) {
390 if (offset - vxtime.last > hpet_tick) {
391 lost = (offset - vxtime.last) / hpet_tick - 1;
395 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
397 vxtime.last = offset;
398 #ifdef CONFIG_X86_PM_TIMER
399 } else if (vxtime.mode == VXTIME_PMTMR) {
400 lost = pmtimer_mark_offset();
403 offset = (((tsc - vxtime.last_tsc) *
404 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
409 if (offset > (USEC_PER_SEC / HZ)) {
410 lost = offset / (USEC_PER_SEC / HZ);
411 offset %= (USEC_PER_SEC / HZ);
414 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
416 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
418 if ((((tsc - vxtime.last_tsc) *
419 vxtime.tsc_quot) >> 32) < offset)
420 vxtime.last_tsc = tsc -
421 (((long) offset << 32) / vxtime.tsc_quot) - 1;
425 handle_lost_ticks(lost, regs);
430 * Do the timer stuff.
435 update_process_times(user_mode(regs));
439 * In the SMP case we use the local APIC timer interrupt to do the profiling,
440 * except when we simulate SMP mode on a uniprocessor system, in that case we
441 * have to call the local interrupt handler.
444 #ifndef CONFIG_X86_LOCAL_APIC
445 profile_tick(CPU_PROFILING, regs);
447 if (!using_apic_timer)
448 smp_local_timer_interrupt(regs);
452 * If we have an externally synchronized Linux clock, then update CMOS clock
453 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
454 * closest to exactly 500 ms before the next second. If the update fails, we
455 * don't care, as it'll be updated on the next turn, and the problem (time way
456 * off) isn't likely to go away much sooner anyway.
459 if (ntp_synced() && xtime.tv_sec > rtc_update &&
460 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
461 set_rtc_mmss(xtime.tv_sec);
462 rtc_update = xtime.tv_sec + 660;
465 write_sequnlock(&xtime_lock);
468 static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
470 if (apic_runs_main_timer > 1)
472 main_timer_handler(regs);
473 #ifdef CONFIG_X86_LOCAL_APIC
474 if (using_apic_timer)
475 smp_send_timer_broadcast_ipi();
480 static unsigned int cyc2ns_scale __read_mostly;
481 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
483 static inline void set_cyc2ns_scale(unsigned long cpu_khz)
485 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
488 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
490 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
493 unsigned long long sched_clock(void)
498 /* Don't do a HPET read here. Using TSC always is much faster
499 and HPET may not be mapped yet when the scheduler first runs.
500 Disadvantage is a small drift between CPUs in some configurations,
501 but that should be tolerable. */
502 if (__vxtime.mode == VXTIME_HPET)
503 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
506 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
507 which means it is not completely exact and may not be monotonous between
508 CPUs. But the errors should be too small to matter for scheduling
512 return cycles_2_ns(a);
515 static unsigned long get_cmos_time(void)
517 unsigned int timeout = 1000000, year, mon, day, hour, min, sec;
518 unsigned char uip = 0, this = 0;
522 * The Linux interpretation of the CMOS clock register contents: When the
523 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
524 * second which has precisely just started. Waiting for this can take up to 1
525 * second, we timeout approximately after 2.4 seconds on a machine with
526 * standard 8.3 MHz ISA bus.
529 spin_lock_irqsave(&rtc_lock, flags);
531 while (timeout && (!uip || this)) {
533 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
538 * Here we are safe to assume the registers won't change for a whole
539 * second, so we just go ahead and read them.
541 sec = CMOS_READ(RTC_SECONDS);
542 min = CMOS_READ(RTC_MINUTES);
543 hour = CMOS_READ(RTC_HOURS);
544 day = CMOS_READ(RTC_DAY_OF_MONTH);
545 mon = CMOS_READ(RTC_MONTH);
546 year = CMOS_READ(RTC_YEAR);
548 spin_unlock_irqrestore(&rtc_lock, flags);
551 * We know that x86-64 always uses BCD format, no need to check the
563 * x86-64 systems only exists since 2002.
564 * This will work up to Dec 31, 2100
568 return mktime(year, mon, day, hour, min, sec);
571 #ifdef CONFIG_CPU_FREQ
573 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
576 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
577 not that important because current Opteron setups do not support
578 scaling on SMP anyroads.
580 Should fix up last_tsc too. Currently gettimeofday in the
581 first tick after the change will be slightly wrong. */
583 #include <linux/workqueue.h>
585 static unsigned int cpufreq_delayed_issched = 0;
586 static unsigned int cpufreq_init = 0;
587 static struct work_struct cpufreq_delayed_get_work;
589 static void handle_cpufreq_delayed_get(void *v)
592 for_each_online_cpu(cpu) {
595 cpufreq_delayed_issched = 0;
598 /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
599 * to verify the CPU frequency the timing core thinks the CPU is running
600 * at is still correct.
602 static void cpufreq_delayed_get(void)
605 if (cpufreq_init && !cpufreq_delayed_issched) {
606 cpufreq_delayed_issched = 1;
609 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
611 schedule_work(&cpufreq_delayed_get_work);
615 static unsigned int ref_freq = 0;
616 static unsigned long loops_per_jiffy_ref = 0;
618 static unsigned long cpu_khz_ref = 0;
620 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
623 struct cpufreq_freqs *freq = data;
624 unsigned long *lpj, dummy;
626 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
630 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
632 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
634 lpj = &boot_cpu_data.loops_per_jiffy;
638 ref_freq = freq->old;
639 loops_per_jiffy_ref = *lpj;
640 cpu_khz_ref = cpu_khz;
642 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
643 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
644 (val == CPUFREQ_RESUMECHANGE)) {
646 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
648 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
649 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
650 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
653 set_cyc2ns_scale(cpu_khz_ref);
658 static struct notifier_block time_cpufreq_notifier_block = {
659 .notifier_call = time_cpufreq_notifier
662 static int __init cpufreq_tsc(void)
664 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
665 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
666 CPUFREQ_TRANSITION_NOTIFIER))
671 core_initcall(cpufreq_tsc);
676 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
677 * it to the HPET timer of known frequency.
680 #define TICK_COUNT 100000000
682 static unsigned int __init hpet_calibrate_tsc(void)
684 int tsc_start, hpet_start;
685 int tsc_now, hpet_now;
688 local_irq_save(flags);
691 hpet_start = hpet_readl(HPET_COUNTER);
696 hpet_now = hpet_readl(HPET_COUNTER);
697 tsc_now = get_cycles_sync();
698 local_irq_restore(flags);
699 } while ((tsc_now - tsc_start) < TICK_COUNT &&
700 (hpet_now - hpet_start) < TICK_COUNT);
702 return (tsc_now - tsc_start) * 1000000000L
703 / ((hpet_now - hpet_start) * hpet_period / 1000);
708 * pit_calibrate_tsc() uses the speaker output (channel 2) of
709 * the PIT. This is better than using the timer interrupt output,
710 * because we can read the value of the speaker with just one inb(),
711 * where we need three i/o operations for the interrupt channel.
712 * We count how many ticks the TSC does in 50 ms.
715 static unsigned int __init pit_calibrate_tsc(void)
717 unsigned long start, end;
720 spin_lock_irqsave(&i8253_lock, flags);
722 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
725 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
726 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
727 start = get_cycles_sync();
728 while ((inb(0x61) & 0x20) == 0);
729 end = get_cycles_sync();
731 spin_unlock_irqrestore(&i8253_lock, flags);
733 return (end - start) / 50;
737 static __init int late_hpet_init(void)
742 if (!vxtime.hpet_address)
745 memset(&hd, 0, sizeof (hd));
747 ntimer = hpet_readl(HPET_ID);
748 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
752 * Register with driver.
753 * Timer0 and Timer1 is used by platform.
755 hd.hd_phys_address = vxtime.hpet_address;
756 hd.hd_address = (void __iomem *)fix_to_virt(FIX_HPET_BASE);
757 hd.hd_nirqs = ntimer;
758 hd.hd_flags = HPET_DATA_PLATFORM;
759 hpet_reserve_timer(&hd, 0);
760 #ifdef CONFIG_HPET_EMULATE_RTC
761 hpet_reserve_timer(&hd, 1);
763 hd.hd_irq[0] = HPET_LEGACY_8254;
764 hd.hd_irq[1] = HPET_LEGACY_RTC;
767 struct hpet_timer *timer;
770 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
772 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
774 hd.hd_irq[i] = (timer->hpet_config &
775 Tn_INT_ROUTE_CNF_MASK) >>
776 Tn_INT_ROUTE_CNF_SHIFT;
783 fs_initcall(late_hpet_init);
786 static int hpet_timer_stop_set_go(unsigned long tick)
791 * Stop the timers and reset the main counter.
794 cfg = hpet_readl(HPET_CFG);
795 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
796 hpet_writel(cfg, HPET_CFG);
797 hpet_writel(0, HPET_COUNTER);
798 hpet_writel(0, HPET_COUNTER + 4);
801 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
802 * and period also hpet_tick.
804 if (hpet_use_timer) {
805 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
806 HPET_TN_32BIT, HPET_T0_CFG);
807 hpet_writel(hpet_tick, HPET_T0_CMP);
808 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
809 cfg |= HPET_CFG_LEGACY;
815 cfg |= HPET_CFG_ENABLE;
816 hpet_writel(cfg, HPET_CFG);
821 static int hpet_init(void)
825 if (!vxtime.hpet_address)
827 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
828 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
831 * Read the period, compute tick and quotient.
834 id = hpet_readl(HPET_ID);
836 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
839 hpet_period = hpet_readl(HPET_PERIOD);
840 if (hpet_period < 100000 || hpet_period > 100000000)
843 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
846 hpet_use_timer = (id & HPET_ID_LEGSUP);
848 return hpet_timer_stop_set_go(hpet_tick);
851 static int hpet_reenable(void)
853 return hpet_timer_stop_set_go(hpet_tick);
856 #define PIT_MODE 0x43
859 static void __init __pit_init(int val, u8 mode)
863 spin_lock_irqsave(&i8253_lock, flags);
864 outb_p(mode, PIT_MODE);
865 outb_p(val & 0xff, PIT_CH0); /* LSB */
866 outb_p(val >> 8, PIT_CH0); /* MSB */
867 spin_unlock_irqrestore(&i8253_lock, flags);
870 void __init pit_init(void)
872 __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */
875 void __init pit_stop_interrupt(void)
877 __pit_init(0, 0x30); /* mode 0 */
880 void __init stop_timer_interrupt(void)
883 if (vxtime.hpet_address) {
885 hpet_timer_stop_set_go(0);
888 pit_stop_interrupt();
890 printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
893 int __init time_setup(char *str)
895 report_lost_ticks = 1;
899 static struct irqaction irq0 = {
900 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
903 void __init time_init(void)
908 #ifdef HPET_HACK_ENABLE_DANGEROUS
909 if (!vxtime.hpet_address) {
910 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
912 outl(0x800038a0, 0xcf8);
913 outl(0xff000001, 0xcfc);
914 outl(0x800038a0, 0xcf8);
915 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
916 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
917 "at %#lx.\n", vxtime.hpet_address);
921 vxtime.hpet_address = 0;
923 xtime.tv_sec = get_cmos_time();
926 set_normalized_timespec(&wall_to_monotonic,
927 -xtime.tv_sec, -xtime.tv_nsec);
930 vxtime_hz = (1000000000000000L + hpet_period / 2) /
933 vxtime.hpet_address = 0;
935 if (hpet_use_timer) {
936 cpu_khz = hpet_calibrate_tsc();
938 #ifdef CONFIG_X86_PM_TIMER
939 } else if (pmtmr_ioport && !vxtime.hpet_address) {
940 vxtime_hz = PM_TIMER_FREQUENCY;
943 cpu_khz = pit_calibrate_tsc();
947 cpu_khz = pit_calibrate_tsc();
951 vxtime.mode = VXTIME_TSC;
952 gtod = time_init_gtod();
954 printk(KERN_INFO "time.c: Using %ld.%06ld MHz WALL %s GTOD %s timer.\n",
955 vxtime_hz / 1000000, vxtime_hz % 1000000, timename, gtod);
956 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
957 cpu_khz / 1000, cpu_khz % 1000);
958 vxtime.quot = (1000000L << 32) / vxtime_hz;
959 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
960 vxtime.last_tsc = get_cycles_sync();
963 set_cyc2ns_scale(cpu_khz);
967 * Make an educated guess if the TSC is trustworthy and synchronized
970 __cpuinit int unsynchronized_tsc(void)
973 if (oem_force_hpet_timer())
975 /* Intel systems are normally all synchronized. Exceptions
976 are handled in the OEM check above. */
977 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
980 /* Assume multi socket systems are not synchronized */
981 return num_present_cpus() > 1;
985 * Decide what mode gettimeofday should use.
987 __init static char *time_init_gtod(void)
991 if (unsynchronized_tsc())
993 if (vxtime.hpet_address && notsc) {
994 timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
996 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
998 vxtime.last = hpet_readl(HPET_COUNTER);
999 vxtime.mode = VXTIME_HPET;
1000 do_gettimeoffset = do_gettimeoffset_hpet;
1001 #ifdef CONFIG_X86_PM_TIMER
1002 /* Using PM for gettimeofday is quite slow, but we have no other
1003 choice because the TSC is too unreliable on some systems. */
1004 } else if (pmtmr_ioport && !vxtime.hpet_address && notsc) {
1006 do_gettimeoffset = do_gettimeoffset_pm;
1007 vxtime.mode = VXTIME_PMTMR;
1008 sysctl_vsyscall = 0;
1009 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
1012 timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
1013 vxtime.mode = VXTIME_TSC;
1018 __setup("report_lost_ticks", time_setup);
1020 static long clock_cmos_diff;
1021 static unsigned long sleep_start;
1024 * sysfs support for the timer.
1027 static int timer_suspend(struct sys_device *dev, pm_message_t state)
1030 * Estimate time zone so that set_time can update the clock
1032 long cmos_time = get_cmos_time();
1034 clock_cmos_diff = -cmos_time;
1035 clock_cmos_diff += get_seconds();
1036 sleep_start = cmos_time;
1040 static int timer_resume(struct sys_device *dev)
1042 unsigned long flags;
1044 unsigned long ctime = get_cmos_time();
1045 unsigned long sleep_length = (ctime - sleep_start) * HZ;
1047 if (vxtime.hpet_address)
1050 i8254_timer_resume();
1052 sec = ctime + clock_cmos_diff;
1053 write_seqlock_irqsave(&xtime_lock,flags);
1056 if (vxtime.mode == VXTIME_HPET) {
1058 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
1060 vxtime.last = hpet_readl(HPET_COUNTER);
1061 #ifdef CONFIG_X86_PM_TIMER
1062 } else if (vxtime.mode == VXTIME_PMTMR) {
1066 vxtime.last_tsc = get_cycles_sync();
1067 write_sequnlock_irqrestore(&xtime_lock,flags);
1068 jiffies += sleep_length;
1069 wall_jiffies += sleep_length;
1070 monotonic_base += sleep_length * (NSEC_PER_SEC/HZ);
1071 touch_softlockup_watchdog();
1075 static struct sysdev_class timer_sysclass = {
1076 .resume = timer_resume,
1077 .suspend = timer_suspend,
1078 set_kset_name("timer"),
1081 /* XXX this driverfs stuff should probably go elsewhere later -john */
1082 static struct sys_device device_timer = {
1084 .cls = &timer_sysclass,
1087 static int time_init_device(void)
1089 int error = sysdev_class_register(&timer_sysclass);
1091 error = sysdev_register(&device_timer);
1095 device_initcall(time_init_device);
1097 #ifdef CONFIG_HPET_EMULATE_RTC
1098 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1099 * is enabled, we support RTC interrupt functionality in software.
1100 * RTC has 3 kinds of interrupts:
1101 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1103 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1104 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1105 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1106 * (1) and (2) above are implemented using polling at a frequency of
1107 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1108 * overhead. (DEFAULT_RTC_INT_FREQ)
1109 * For (3), we use interrupts at 64Hz or user specified periodic
1110 * frequency, whichever is higher.
1112 #include <linux/rtc.h>
1114 #define DEFAULT_RTC_INT_FREQ 64
1115 #define RTC_NUM_INTS 1
1117 static unsigned long UIE_on;
1118 static unsigned long prev_update_sec;
1120 static unsigned long AIE_on;
1121 static struct rtc_time alarm_time;
1123 static unsigned long PIE_on;
1124 static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1125 static unsigned long PIE_count;
1127 static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1128 static unsigned int hpet_t1_cmp; /* cached comparator register */
1130 int is_hpet_enabled(void)
1132 return vxtime.hpet_address != 0;
1136 * Timer 1 for RTC, we do not use periodic interrupt feature,
1137 * even if HPET supports periodic interrupts on Timer 1.
1138 * The reason being, to set up a periodic interrupt in HPET, we need to
1139 * stop the main counter. And if we do that everytime someone diables/enables
1140 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1141 * So, for the time being, simulate the periodic interrupt in software.
1143 * hpet_rtc_timer_init() is called for the first time and during subsequent
1144 * interuppts reinit happens through hpet_rtc_timer_reinit().
1146 int hpet_rtc_timer_init(void)
1148 unsigned int cfg, cnt;
1149 unsigned long flags;
1151 if (!is_hpet_enabled())
1154 * Set the counter 1 and enable the interrupts.
1156 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1157 hpet_rtc_int_freq = PIE_freq;
1159 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1161 local_irq_save(flags);
1162 cnt = hpet_readl(HPET_COUNTER);
1163 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1164 hpet_writel(cnt, HPET_T1_CMP);
1166 local_irq_restore(flags);
1168 cfg = hpet_readl(HPET_T1_CFG);
1169 cfg &= ~HPET_TN_PERIODIC;
1170 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
1171 hpet_writel(cfg, HPET_T1_CFG);
1176 static void hpet_rtc_timer_reinit(void)
1178 unsigned int cfg, cnt;
1180 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
1181 cfg = hpet_readl(HPET_T1_CFG);
1182 cfg &= ~HPET_TN_ENABLE;
1183 hpet_writel(cfg, HPET_T1_CFG);
1187 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1188 hpet_rtc_int_freq = PIE_freq;
1190 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1192 /* It is more accurate to use the comparator value than current count.*/
1194 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1195 hpet_writel(cnt, HPET_T1_CMP);
1200 * The functions below are called from rtc driver.
1201 * Return 0 if HPET is not being used.
1202 * Otherwise do the necessary changes and return 1.
1204 int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1206 if (!is_hpet_enabled())
1209 if (bit_mask & RTC_UIE)
1211 if (bit_mask & RTC_PIE)
1213 if (bit_mask & RTC_AIE)
1219 int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1221 int timer_init_reqd = 0;
1223 if (!is_hpet_enabled())
1226 if (!(PIE_on | AIE_on | UIE_on))
1227 timer_init_reqd = 1;
1229 if (bit_mask & RTC_UIE) {
1232 if (bit_mask & RTC_PIE) {
1236 if (bit_mask & RTC_AIE) {
1240 if (timer_init_reqd)
1241 hpet_rtc_timer_init();
1246 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1248 if (!is_hpet_enabled())
1251 alarm_time.tm_hour = hrs;
1252 alarm_time.tm_min = min;
1253 alarm_time.tm_sec = sec;
1258 int hpet_set_periodic_freq(unsigned long freq)
1260 if (!is_hpet_enabled())
1269 int hpet_rtc_dropped_irq(void)
1271 if (!is_hpet_enabled())
1277 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1279 struct rtc_time curr_time;
1280 unsigned long rtc_int_flag = 0;
1281 int call_rtc_interrupt = 0;
1283 hpet_rtc_timer_reinit();
1285 if (UIE_on | AIE_on) {
1286 rtc_get_rtc_time(&curr_time);
1289 if (curr_time.tm_sec != prev_update_sec) {
1290 /* Set update int info, call real rtc int routine */
1291 call_rtc_interrupt = 1;
1292 rtc_int_flag = RTC_UF;
1293 prev_update_sec = curr_time.tm_sec;
1298 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1299 /* Set periodic int info, call real rtc int routine */
1300 call_rtc_interrupt = 1;
1301 rtc_int_flag |= RTC_PF;
1306 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1307 (curr_time.tm_min == alarm_time.tm_min) &&
1308 (curr_time.tm_hour == alarm_time.tm_hour)) {
1309 /* Set alarm int info, call real rtc int routine */
1310 call_rtc_interrupt = 1;
1311 rtc_int_flag |= RTC_AF;
1314 if (call_rtc_interrupt) {
1315 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1316 rtc_interrupt(rtc_int_flag, dev_id, regs);
1322 static int __init nohpet_setup(char *s)
1328 __setup("nohpet", nohpet_setup);
1330 int __init notsc_setup(char *s)
1336 __setup("notsc", notsc_setup);