2 * linux/arch/x86-64/kernel/time.c
4 * "High Precision Event Timer" based timekeeping.
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/time.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/device.h>
25 #include <linux/sysdev.h>
26 #include <linux/bcd.h>
27 #include <linux/kallsyms.h>
28 #include <linux/acpi.h>
30 #include <acpi/achware.h> /* for PM timer frequency */
32 #include <asm/8253pit.h>
33 #include <asm/pgtable.h>
34 #include <asm/vsyscall.h>
35 #include <asm/timex.h>
36 #include <asm/proto.h>
38 #include <asm/sections.h>
39 #include <linux/cpufreq.h>
40 #include <linux/hpet.h>
41 #ifdef CONFIG_X86_LOCAL_APIC
45 #ifdef CONFIG_CPU_FREQ
46 static void cpufreq_delayed_get(void);
48 extern void i8254_timer_resume(void);
49 extern int using_apic_timer;
51 DEFINE_SPINLOCK(rtc_lock);
52 DEFINE_SPINLOCK(i8253_lock);
54 static int nohpet __initdata = 0;
55 static int notsc __initdata = 0;
57 #undef HPET_HACK_ENABLE_DANGEROUS
59 unsigned int cpu_khz; /* TSC clocks / usec, not used here */
60 static unsigned long hpet_period; /* fsecs / HPET clock */
61 unsigned long hpet_tick; /* HPET clocks / interrupt */
62 static int hpet_use_timer; /* Use counter of hpet for time keeping, otherwise PIT */
63 unsigned long vxtime_hz = PIT_TICK_RATE;
64 int report_lost_ticks; /* command line option */
65 unsigned long long monotonic_base;
67 struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
69 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
70 unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
71 struct timespec __xtime __section_xtime;
72 struct timezone __sys_tz __section_sys_tz;
75 * do_gettimeoffset() returns microseconds since last timer interrupt was
76 * triggered by hardware. A memory read of HPET is slower than a register read
77 * of TSC, but much more reliable. It's also synchronized to the timer
78 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
79 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
80 * This is not a problem, because jiffies hasn't updated either. They are bound
81 * together by xtime_lock.
84 static inline unsigned int do_gettimeoffset_tsc(void)
88 t = get_cycles_sync();
89 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
90 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
94 static inline unsigned int do_gettimeoffset_hpet(void)
96 /* cap counter read to one tick to avoid inconsistencies */
97 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
98 return (min(counter,hpet_tick) * vxtime.quot) >> 32;
101 unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
104 * This version of gettimeofday() has microsecond resolution and better than
105 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
109 void do_gettimeofday(struct timeval *tv)
111 unsigned long seq, t;
112 unsigned int sec, usec;
115 seq = read_seqbegin(&xtime_lock);
118 usec = xtime.tv_nsec / 1000;
120 /* i386 does some correction here to keep the clock
121 monotonous even when ntpd is fixing drift.
122 But they didn't work for me, there is a non monotonic
123 clock anyways with ntp.
124 I dropped all corrections now until a real solution can
125 be found. Note when you fix it here you need to do the same
126 in arch/x86_64/kernel/vsyscall.c and export all needed
127 variables in vmlinux.lds. -AK */
129 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
133 } while (read_seqretry(&xtime_lock, seq));
135 tv->tv_sec = sec + usec / 1000000;
136 tv->tv_usec = usec % 1000000;
139 EXPORT_SYMBOL(do_gettimeofday);
142 * settimeofday() first undoes the correction that gettimeofday would do
143 * on the time, and then saves it. This is ugly, but has been like this for
147 int do_settimeofday(struct timespec *tv)
149 time_t wtm_sec, sec = tv->tv_sec;
150 long wtm_nsec, nsec = tv->tv_nsec;
152 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
155 write_seqlock_irq(&xtime_lock);
157 nsec -= do_gettimeoffset() * 1000 +
158 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
160 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
161 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
163 set_normalized_timespec(&xtime, sec, nsec);
164 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
168 write_sequnlock_irq(&xtime_lock);
173 EXPORT_SYMBOL(do_settimeofday);
175 unsigned long profile_pc(struct pt_regs *regs)
177 unsigned long pc = instruction_pointer(regs);
179 /* Assume the lock function has either no stack frame or only a single word.
180 This checks if the address on the stack looks like a kernel text address.
181 There is a small window for false hits, but in that case the tick
182 is just accounted to the spinlock function.
183 Better would be to write these functions in assembler again
184 and check exactly. */
185 if (in_lock_functions(pc)) {
186 char *v = *(char **)regs->rsp;
187 if ((v >= _stext && v <= _etext) ||
188 (v >= _sinittext && v <= _einittext) ||
189 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
190 return (unsigned long)v;
191 return ((unsigned long *)regs->rsp)[1];
195 EXPORT_SYMBOL(profile_pc);
198 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
199 * ms after the second nowtime has started, because when nowtime is written
200 * into the registers of the CMOS clock, it will jump to the next second
201 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
205 static void set_rtc_mmss(unsigned long nowtime)
207 int real_seconds, real_minutes, cmos_minutes;
208 unsigned char control, freq_select;
211 * IRQs are disabled when we're called from the timer interrupt,
212 * no need for spin_lock_irqsave()
215 spin_lock(&rtc_lock);
218 * Tell the clock it's being set and stop it.
221 control = CMOS_READ(RTC_CONTROL);
222 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
224 freq_select = CMOS_READ(RTC_FREQ_SELECT);
225 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
227 cmos_minutes = CMOS_READ(RTC_MINUTES);
228 BCD_TO_BIN(cmos_minutes);
231 * since we're only adjusting minutes and seconds, don't interfere with hour
232 * overflow. This avoids messing with unknown time zones but requires your RTC
233 * not to be off by more than 15 minutes. Since we're calling it only when
234 * our clock is externally synchronized using NTP, this shouldn't be a problem.
237 real_seconds = nowtime % 60;
238 real_minutes = nowtime / 60;
239 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
240 real_minutes += 30; /* correct for half hour time zone */
244 /* AMD 8111 is a really bad time keeper and hits this regularly.
245 It probably was an attempt to avoid screwing up DST, but ignore
247 if (abs(real_minutes - cmos_minutes) >= 30) {
248 printk(KERN_WARNING "time.c: can't update CMOS clock "
249 "from %d to %d\n", cmos_minutes, real_minutes);
254 BIN_TO_BCD(real_seconds);
255 BIN_TO_BCD(real_minutes);
256 CMOS_WRITE(real_seconds, RTC_SECONDS);
257 CMOS_WRITE(real_minutes, RTC_MINUTES);
261 * The following flags have to be released exactly in this order, otherwise the
262 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
263 * not reset the oscillator and will not update precisely 500 ms later. You
264 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
265 * believes data sheets anyway ... -- Markus Kuhn
268 CMOS_WRITE(control, RTC_CONTROL);
269 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
271 spin_unlock(&rtc_lock);
275 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
276 * Note: This function is required to return accurate
277 * time even in the absence of multiple timer ticks.
279 unsigned long long monotonic_clock(void)
282 u32 last_offset, this_offset, offset;
283 unsigned long long base;
285 if (vxtime.mode == VXTIME_HPET) {
287 seq = read_seqbegin(&xtime_lock);
289 last_offset = vxtime.last;
290 base = monotonic_base;
291 this_offset = hpet_readl(HPET_COUNTER);
292 } while (read_seqretry(&xtime_lock, seq));
293 offset = (this_offset - last_offset);
294 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
295 return base + offset;
298 seq = read_seqbegin(&xtime_lock);
300 last_offset = vxtime.last_tsc;
301 base = monotonic_base;
302 } while (read_seqretry(&xtime_lock, seq));
303 this_offset = get_cycles_sync();
304 offset = (this_offset - last_offset)*1000/cpu_khz;
305 return base + offset;
308 EXPORT_SYMBOL(monotonic_clock);
310 static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
312 static long lost_count;
315 if (report_lost_ticks) {
316 printk(KERN_WARNING "time.c: Lost %d timer "
318 print_symbol("rip %s)\n", regs->rip);
321 if (lost_count == 1000 && !warned) {
323 "warning: many lost ticks.\n"
324 KERN_WARNING "Your time source seems to be instable or "
325 "some driver is hogging interupts\n");
326 print_symbol("rip %s\n", regs->rip);
327 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
328 printk(KERN_WARNING "Falling back to HPET\n");
329 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
330 vxtime.mode = VXTIME_HPET;
331 do_gettimeoffset = do_gettimeoffset_hpet;
333 /* else should fall back to PIT, but code missing. */
338 #ifdef CONFIG_CPU_FREQ
339 /* In some cases the CPU can change frequency without us noticing
340 (like going into thermal throttle)
341 Give cpufreq a change to catch up. */
342 if ((lost_count+1) % 25 == 0) {
343 cpufreq_delayed_get();
348 static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
350 static unsigned long rtc_update = 0;
352 int delay, offset = 0, lost = 0;
355 * Here we are in the timer irq handler. We have irqs locally disabled (so we
356 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
357 * on the other CPU, so we need a lock. We also need to lock the vsyscall
358 * variables, because both do_timer() and us change them -arca+vojtech
361 write_seqlock(&xtime_lock);
363 if (vxtime.hpet_address)
364 offset = hpet_readl(HPET_COUNTER);
366 if (hpet_use_timer) {
367 /* if we're using the hpet timer functionality,
368 * we can more accurately know the counter value
369 * when the timer interrupt occured.
371 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
372 delay = hpet_readl(HPET_COUNTER) - offset;
374 spin_lock(&i8253_lock);
377 delay |= inb(0x40) << 8;
378 spin_unlock(&i8253_lock);
379 delay = LATCH - 1 - delay;
382 tsc = get_cycles_sync();
384 if (vxtime.mode == VXTIME_HPET) {
385 if (offset - vxtime.last > hpet_tick) {
386 lost = (offset - vxtime.last) / hpet_tick - 1;
390 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
392 vxtime.last = offset;
393 #ifdef CONFIG_X86_PM_TIMER
394 } else if (vxtime.mode == VXTIME_PMTMR) {
395 lost = pmtimer_mark_offset();
398 offset = (((tsc - vxtime.last_tsc) *
399 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
404 if (offset > (USEC_PER_SEC / HZ)) {
405 lost = offset / (USEC_PER_SEC / HZ);
406 offset %= (USEC_PER_SEC / HZ);
409 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
411 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
413 if ((((tsc - vxtime.last_tsc) *
414 vxtime.tsc_quot) >> 32) < offset)
415 vxtime.last_tsc = tsc -
416 (((long) offset << 32) / vxtime.tsc_quot) - 1;
420 handle_lost_ticks(lost, regs);
425 * Do the timer stuff.
430 update_process_times(user_mode(regs));
434 * In the SMP case we use the local APIC timer interrupt to do the profiling,
435 * except when we simulate SMP mode on a uniprocessor system, in that case we
436 * have to call the local interrupt handler.
439 #ifndef CONFIG_X86_LOCAL_APIC
440 profile_tick(CPU_PROFILING, regs);
442 if (!using_apic_timer)
443 smp_local_timer_interrupt(regs);
447 * If we have an externally synchronized Linux clock, then update CMOS clock
448 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
449 * closest to exactly 500 ms before the next second. If the update fails, we
450 * don't care, as it'll be updated on the next turn, and the problem (time way
451 * off) isn't likely to go away much sooner anyway.
454 if (ntp_synced() && xtime.tv_sec > rtc_update &&
455 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
456 set_rtc_mmss(xtime.tv_sec);
457 rtc_update = xtime.tv_sec + 660;
460 write_sequnlock(&xtime_lock);
462 #ifdef CONFIG_X86_LOCAL_APIC
463 if (using_apic_timer)
464 smp_send_timer_broadcast_ipi();
470 static unsigned int cyc2ns_scale;
471 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
473 static inline void set_cyc2ns_scale(unsigned long cpu_khz)
475 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
478 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
480 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
483 unsigned long long sched_clock(void)
488 /* Don't do a HPET read here. Using TSC always is much faster
489 and HPET may not be mapped yet when the scheduler first runs.
490 Disadvantage is a small drift between CPUs in some configurations,
491 but that should be tolerable. */
492 if (__vxtime.mode == VXTIME_HPET)
493 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
496 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
497 which means it is not completely exact and may not be monotonous between
498 CPUs. But the errors should be too small to matter for scheduling
502 return cycles_2_ns(a);
505 static unsigned long get_cmos_time(void)
507 unsigned int timeout = 1000000, year, mon, day, hour, min, sec;
508 unsigned char uip = 0, this = 0;
512 * The Linux interpretation of the CMOS clock register contents: When the
513 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
514 * second which has precisely just started. Waiting for this can take up to 1
515 * second, we timeout approximately after 2.4 seconds on a machine with
516 * standard 8.3 MHz ISA bus.
519 spin_lock_irqsave(&rtc_lock, flags);
521 while (timeout && (!uip || this)) {
523 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
528 * Here we are safe to assume the registers won't change for a whole
529 * second, so we just go ahead and read them.
531 sec = CMOS_READ(RTC_SECONDS);
532 min = CMOS_READ(RTC_MINUTES);
533 hour = CMOS_READ(RTC_HOURS);
534 day = CMOS_READ(RTC_DAY_OF_MONTH);
535 mon = CMOS_READ(RTC_MONTH);
536 year = CMOS_READ(RTC_YEAR);
538 spin_unlock_irqrestore(&rtc_lock, flags);
541 * We know that x86-64 always uses BCD format, no need to check the
553 * x86-64 systems only exists since 2002.
554 * This will work up to Dec 31, 2100
558 return mktime(year, mon, day, hour, min, sec);
561 #ifdef CONFIG_CPU_FREQ
563 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
566 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
567 not that important because current Opteron setups do not support
568 scaling on SMP anyroads.
570 Should fix up last_tsc too. Currently gettimeofday in the
571 first tick after the change will be slightly wrong. */
573 #include <linux/workqueue.h>
575 static unsigned int cpufreq_delayed_issched = 0;
576 static unsigned int cpufreq_init = 0;
577 static struct work_struct cpufreq_delayed_get_work;
579 static void handle_cpufreq_delayed_get(void *v)
582 for_each_online_cpu(cpu) {
585 cpufreq_delayed_issched = 0;
588 /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
589 * to verify the CPU frequency the timing core thinks the CPU is running
590 * at is still correct.
592 static void cpufreq_delayed_get(void)
595 if (cpufreq_init && !cpufreq_delayed_issched) {
596 cpufreq_delayed_issched = 1;
599 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
601 schedule_work(&cpufreq_delayed_get_work);
605 static unsigned int ref_freq = 0;
606 static unsigned long loops_per_jiffy_ref = 0;
608 static unsigned long cpu_khz_ref = 0;
610 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
613 struct cpufreq_freqs *freq = data;
614 unsigned long *lpj, dummy;
616 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
620 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
622 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
624 lpj = &boot_cpu_data.loops_per_jiffy;
628 ref_freq = freq->old;
629 loops_per_jiffy_ref = *lpj;
630 cpu_khz_ref = cpu_khz;
632 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
633 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
634 (val == CPUFREQ_RESUMECHANGE)) {
636 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
638 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
639 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
640 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
643 set_cyc2ns_scale(cpu_khz_ref);
648 static struct notifier_block time_cpufreq_notifier_block = {
649 .notifier_call = time_cpufreq_notifier
652 static int __init cpufreq_tsc(void)
654 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
655 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
656 CPUFREQ_TRANSITION_NOTIFIER))
661 core_initcall(cpufreq_tsc);
666 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
667 * it to the HPET timer of known frequency.
670 #define TICK_COUNT 100000000
672 static unsigned int __init hpet_calibrate_tsc(void)
674 int tsc_start, hpet_start;
675 int tsc_now, hpet_now;
678 local_irq_save(flags);
681 hpet_start = hpet_readl(HPET_COUNTER);
686 hpet_now = hpet_readl(HPET_COUNTER);
687 tsc_now = get_cycles_sync();
688 local_irq_restore(flags);
689 } while ((tsc_now - tsc_start) < TICK_COUNT &&
690 (hpet_now - hpet_start) < TICK_COUNT);
692 return (tsc_now - tsc_start) * 1000000000L
693 / ((hpet_now - hpet_start) * hpet_period / 1000);
698 * pit_calibrate_tsc() uses the speaker output (channel 2) of
699 * the PIT. This is better than using the timer interrupt output,
700 * because we can read the value of the speaker with just one inb(),
701 * where we need three i/o operations for the interrupt channel.
702 * We count how many ticks the TSC does in 50 ms.
705 static unsigned int __init pit_calibrate_tsc(void)
707 unsigned long start, end;
710 spin_lock_irqsave(&i8253_lock, flags);
712 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
715 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
716 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
717 start = get_cycles_sync();
718 while ((inb(0x61) & 0x20) == 0);
719 end = get_cycles_sync();
721 spin_unlock_irqrestore(&i8253_lock, flags);
723 return (end - start) / 50;
727 static __init int late_hpet_init(void)
732 if (!vxtime.hpet_address)
735 memset(&hd, 0, sizeof (hd));
737 ntimer = hpet_readl(HPET_ID);
738 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
742 * Register with driver.
743 * Timer0 and Timer1 is used by platform.
745 hd.hd_phys_address = vxtime.hpet_address;
746 hd.hd_address = (void *)fix_to_virt(FIX_HPET_BASE);
747 hd.hd_nirqs = ntimer;
748 hd.hd_flags = HPET_DATA_PLATFORM;
749 hpet_reserve_timer(&hd, 0);
750 #ifdef CONFIG_HPET_EMULATE_RTC
751 hpet_reserve_timer(&hd, 1);
753 hd.hd_irq[0] = HPET_LEGACY_8254;
754 hd.hd_irq[1] = HPET_LEGACY_RTC;
757 struct hpet_timer *timer;
760 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
762 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
764 hd.hd_irq[i] = (timer->hpet_config &
765 Tn_INT_ROUTE_CNF_MASK) >>
766 Tn_INT_ROUTE_CNF_SHIFT;
773 fs_initcall(late_hpet_init);
776 static int hpet_timer_stop_set_go(unsigned long tick)
781 * Stop the timers and reset the main counter.
784 cfg = hpet_readl(HPET_CFG);
785 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
786 hpet_writel(cfg, HPET_CFG);
787 hpet_writel(0, HPET_COUNTER);
788 hpet_writel(0, HPET_COUNTER + 4);
791 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
792 * and period also hpet_tick.
794 if (hpet_use_timer) {
795 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
796 HPET_TN_32BIT, HPET_T0_CFG);
797 hpet_writel(hpet_tick, HPET_T0_CMP);
798 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
799 cfg |= HPET_CFG_LEGACY;
805 cfg |= HPET_CFG_ENABLE;
806 hpet_writel(cfg, HPET_CFG);
811 static int hpet_init(void)
815 if (!vxtime.hpet_address)
817 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
818 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
821 * Read the period, compute tick and quotient.
824 id = hpet_readl(HPET_ID);
826 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
829 hpet_period = hpet_readl(HPET_PERIOD);
830 if (hpet_period < 100000 || hpet_period > 100000000)
833 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
836 hpet_use_timer = (id & HPET_ID_LEGSUP);
838 return hpet_timer_stop_set_go(hpet_tick);
841 static int hpet_reenable(void)
843 return hpet_timer_stop_set_go(hpet_tick);
846 void __init pit_init(void)
850 spin_lock_irqsave(&i8253_lock, flags);
851 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
852 outb_p(LATCH & 0xff, 0x40); /* LSB */
853 outb_p(LATCH >> 8, 0x40); /* MSB */
854 spin_unlock_irqrestore(&i8253_lock, flags);
857 int __init time_setup(char *str)
859 report_lost_ticks = 1;
863 static struct irqaction irq0 = {
864 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
867 void __init time_init(void)
871 #ifdef HPET_HACK_ENABLE_DANGEROUS
872 if (!vxtime.hpet_address) {
873 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
875 outl(0x800038a0, 0xcf8);
876 outl(0xff000001, 0xcfc);
877 outl(0x800038a0, 0xcf8);
878 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
879 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
880 "at %#lx.\n", vxtime.hpet_address);
884 vxtime.hpet_address = 0;
886 xtime.tv_sec = get_cmos_time();
889 set_normalized_timespec(&wall_to_monotonic,
890 -xtime.tv_sec, -xtime.tv_nsec);
893 vxtime_hz = (1000000000000000L + hpet_period / 2) /
896 vxtime.hpet_address = 0;
898 if (hpet_use_timer) {
899 cpu_khz = hpet_calibrate_tsc();
901 #ifdef CONFIG_X86_PM_TIMER
902 } else if (pmtmr_ioport && !vxtime.hpet_address) {
903 vxtime_hz = PM_TIMER_FREQUENCY;
906 cpu_khz = pit_calibrate_tsc();
910 cpu_khz = pit_calibrate_tsc();
914 printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
915 vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
916 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
917 cpu_khz / 1000, cpu_khz % 1000);
918 vxtime.mode = VXTIME_TSC;
919 vxtime.quot = (1000000L << 32) / vxtime_hz;
920 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
921 vxtime.last_tsc = get_cycles_sync();
924 set_cyc2ns_scale(cpu_khz);
932 * Make an educated guess if the TSC is trustworthy and synchronized
935 __init int unsynchronized_tsc(void)
938 if (oem_force_hpet_timer())
940 /* Intel systems are normally all synchronized. Exceptions
941 are handled in the OEM check above. */
942 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
945 /* Assume multi socket systems are not synchronized */
946 return num_present_cpus() > 1;
950 * Decide after all CPUs are booted what mode gettimeofday should use.
952 void __init time_init_gtod(void)
956 if (unsynchronized_tsc())
958 if (vxtime.hpet_address && notsc) {
959 timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
960 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
961 vxtime.mode = VXTIME_HPET;
962 do_gettimeoffset = do_gettimeoffset_hpet;
963 #ifdef CONFIG_X86_PM_TIMER
964 /* Using PM for gettimeofday is quite slow, but we have no other
965 choice because the TSC is too unreliable on some systems. */
966 } else if (pmtmr_ioport && !vxtime.hpet_address && notsc) {
968 do_gettimeoffset = do_gettimeoffset_pm;
969 vxtime.mode = VXTIME_PMTMR;
971 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
974 timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
975 vxtime.mode = VXTIME_TSC;
978 printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
981 __setup("report_lost_ticks", time_setup);
983 static long clock_cmos_diff;
984 static unsigned long sleep_start;
987 * sysfs support for the timer.
990 static int timer_suspend(struct sys_device *dev, pm_message_t state)
993 * Estimate time zone so that set_time can update the clock
995 long cmos_time = get_cmos_time();
997 clock_cmos_diff = -cmos_time;
998 clock_cmos_diff += get_seconds();
999 sleep_start = cmos_time;
1003 static int timer_resume(struct sys_device *dev)
1005 unsigned long flags;
1007 unsigned long ctime = get_cmos_time();
1008 unsigned long sleep_length = (ctime - sleep_start) * HZ;
1010 if (vxtime.hpet_address)
1013 i8254_timer_resume();
1015 sec = ctime + clock_cmos_diff;
1016 write_seqlock_irqsave(&xtime_lock,flags);
1019 write_sequnlock_irqrestore(&xtime_lock,flags);
1020 jiffies += sleep_length;
1021 wall_jiffies += sleep_length;
1022 touch_softlockup_watchdog();
1026 static struct sysdev_class timer_sysclass = {
1027 .resume = timer_resume,
1028 .suspend = timer_suspend,
1029 set_kset_name("timer"),
1032 /* XXX this driverfs stuff should probably go elsewhere later -john */
1033 static struct sys_device device_timer = {
1035 .cls = &timer_sysclass,
1038 static int time_init_device(void)
1040 int error = sysdev_class_register(&timer_sysclass);
1042 error = sysdev_register(&device_timer);
1046 device_initcall(time_init_device);
1048 #ifdef CONFIG_HPET_EMULATE_RTC
1049 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1050 * is enabled, we support RTC interrupt functionality in software.
1051 * RTC has 3 kinds of interrupts:
1052 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1054 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1055 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1056 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1057 * (1) and (2) above are implemented using polling at a frequency of
1058 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1059 * overhead. (DEFAULT_RTC_INT_FREQ)
1060 * For (3), we use interrupts at 64Hz or user specified periodic
1061 * frequency, whichever is higher.
1063 #include <linux/rtc.h>
1065 #define DEFAULT_RTC_INT_FREQ 64
1066 #define RTC_NUM_INTS 1
1068 static unsigned long UIE_on;
1069 static unsigned long prev_update_sec;
1071 static unsigned long AIE_on;
1072 static struct rtc_time alarm_time;
1074 static unsigned long PIE_on;
1075 static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1076 static unsigned long PIE_count;
1078 static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1079 static unsigned int hpet_t1_cmp; /* cached comparator register */
1081 int is_hpet_enabled(void)
1083 return vxtime.hpet_address != 0;
1087 * Timer 1 for RTC, we do not use periodic interrupt feature,
1088 * even if HPET supports periodic interrupts on Timer 1.
1089 * The reason being, to set up a periodic interrupt in HPET, we need to
1090 * stop the main counter. And if we do that everytime someone diables/enables
1091 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1092 * So, for the time being, simulate the periodic interrupt in software.
1094 * hpet_rtc_timer_init() is called for the first time and during subsequent
1095 * interuppts reinit happens through hpet_rtc_timer_reinit().
1097 int hpet_rtc_timer_init(void)
1099 unsigned int cfg, cnt;
1100 unsigned long flags;
1102 if (!is_hpet_enabled())
1105 * Set the counter 1 and enable the interrupts.
1107 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1108 hpet_rtc_int_freq = PIE_freq;
1110 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1112 local_irq_save(flags);
1113 cnt = hpet_readl(HPET_COUNTER);
1114 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1115 hpet_writel(cnt, HPET_T1_CMP);
1117 local_irq_restore(flags);
1119 cfg = hpet_readl(HPET_T1_CFG);
1120 cfg &= ~HPET_TN_PERIODIC;
1121 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
1122 hpet_writel(cfg, HPET_T1_CFG);
1127 static void hpet_rtc_timer_reinit(void)
1129 unsigned int cfg, cnt;
1131 if (unlikely(!(PIE_on | AIE_on | UIE_on))) {
1132 cfg = hpet_readl(HPET_T1_CFG);
1133 cfg &= ~HPET_TN_ENABLE;
1134 hpet_writel(cfg, HPET_T1_CFG);
1138 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1139 hpet_rtc_int_freq = PIE_freq;
1141 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1143 /* It is more accurate to use the comparator value than current count.*/
1145 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1146 hpet_writel(cnt, HPET_T1_CMP);
1151 * The functions below are called from rtc driver.
1152 * Return 0 if HPET is not being used.
1153 * Otherwise do the necessary changes and return 1.
1155 int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1157 if (!is_hpet_enabled())
1160 if (bit_mask & RTC_UIE)
1162 if (bit_mask & RTC_PIE)
1164 if (bit_mask & RTC_AIE)
1170 int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1172 int timer_init_reqd = 0;
1174 if (!is_hpet_enabled())
1177 if (!(PIE_on | AIE_on | UIE_on))
1178 timer_init_reqd = 1;
1180 if (bit_mask & RTC_UIE) {
1183 if (bit_mask & RTC_PIE) {
1187 if (bit_mask & RTC_AIE) {
1191 if (timer_init_reqd)
1192 hpet_rtc_timer_init();
1197 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1199 if (!is_hpet_enabled())
1202 alarm_time.tm_hour = hrs;
1203 alarm_time.tm_min = min;
1204 alarm_time.tm_sec = sec;
1209 int hpet_set_periodic_freq(unsigned long freq)
1211 if (!is_hpet_enabled())
1220 int hpet_rtc_dropped_irq(void)
1222 if (!is_hpet_enabled())
1228 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1230 struct rtc_time curr_time;
1231 unsigned long rtc_int_flag = 0;
1232 int call_rtc_interrupt = 0;
1234 hpet_rtc_timer_reinit();
1236 if (UIE_on | AIE_on) {
1237 rtc_get_rtc_time(&curr_time);
1240 if (curr_time.tm_sec != prev_update_sec) {
1241 /* Set update int info, call real rtc int routine */
1242 call_rtc_interrupt = 1;
1243 rtc_int_flag = RTC_UF;
1244 prev_update_sec = curr_time.tm_sec;
1249 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1250 /* Set periodic int info, call real rtc int routine */
1251 call_rtc_interrupt = 1;
1252 rtc_int_flag |= RTC_PF;
1257 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1258 (curr_time.tm_min == alarm_time.tm_min) &&
1259 (curr_time.tm_hour == alarm_time.tm_hour)) {
1260 /* Set alarm int info, call real rtc int routine */
1261 call_rtc_interrupt = 1;
1262 rtc_int_flag |= RTC_AF;
1265 if (call_rtc_interrupt) {
1266 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1267 rtc_interrupt(rtc_int_flag, dev_id, regs);
1273 static int __init nohpet_setup(char *s)
1279 __setup("nohpet", nohpet_setup);
1282 static int __init notsc_setup(char *s)
1288 __setup("notsc", notsc_setup);