2 * linux/arch/x86-64/kernel/time.c
4 * "High Precision Event Timer" based timekeeping.
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/irq.h>
22 #include <linux/time.h>
23 #include <linux/ioport.h>
24 #include <linux/module.h>
25 #include <linux/device.h>
26 #include <linux/sysdev.h>
27 #include <linux/bcd.h>
28 #include <linux/kallsyms.h>
29 #include <linux/acpi.h>
31 #include <acpi/achware.h> /* for PM timer frequency */
33 #include <asm/8253pit.h>
34 #include <asm/pgtable.h>
35 #include <asm/vsyscall.h>
36 #include <asm/timex.h>
37 #include <asm/proto.h>
39 #include <asm/sections.h>
40 #include <linux/cpufreq.h>
41 #include <linux/hpet.h>
42 #ifdef CONFIG_X86_LOCAL_APIC
46 u64 jiffies_64 = INITIAL_JIFFIES;
48 EXPORT_SYMBOL(jiffies_64);
50 #ifdef CONFIG_CPU_FREQ
51 static void cpufreq_delayed_get(void);
53 extern void i8254_timer_resume(void);
54 extern int using_apic_timer;
56 DEFINE_SPINLOCK(rtc_lock);
57 DEFINE_SPINLOCK(i8253_lock);
59 static int nohpet __initdata = 0;
60 static int notsc __initdata = 0;
62 #undef HPET_HACK_ENABLE_DANGEROUS
64 unsigned int cpu_khz; /* TSC clocks / usec, not used here */
65 static unsigned long hpet_period; /* fsecs / HPET clock */
66 unsigned long hpet_tick; /* HPET clocks / interrupt */
67 static int hpet_use_timer;
68 unsigned long vxtime_hz = PIT_TICK_RATE;
69 int report_lost_ticks; /* command line option */
70 unsigned long long monotonic_base;
72 struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
74 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
75 unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
76 struct timespec __xtime __section_xtime;
77 struct timezone __sys_tz __section_sys_tz;
79 static inline void rdtscll_sync(unsigned long *tsc)
88 * do_gettimeoffset() returns microseconds since last timer interrupt was
89 * triggered by hardware. A memory read of HPET is slower than a register read
90 * of TSC, but much more reliable. It's also synchronized to the timer
91 * interrupt. Note that do_gettimeoffset() may return more than hpet_tick, if a
92 * timer interrupt has happened already, but vxtime.trigger wasn't updated yet.
93 * This is not a problem, because jiffies hasn't updated either. They are bound
94 * together by xtime_lock.
97 static inline unsigned int do_gettimeoffset_tsc(void)
102 if (t < vxtime.last_tsc) t = vxtime.last_tsc; /* hack */
103 x = ((t - vxtime.last_tsc) * vxtime.tsc_quot) >> 32;
107 static inline unsigned int do_gettimeoffset_hpet(void)
109 /* cap counter read to one tick to avoid inconsistencies */
110 unsigned long counter = hpet_readl(HPET_COUNTER) - vxtime.last;
111 return (min(counter,hpet_tick) * vxtime.quot) >> 32;
114 unsigned int (*do_gettimeoffset)(void) = do_gettimeoffset_tsc;
117 * This version of gettimeofday() has microsecond resolution and better than
118 * microsecond precision, as we're using at least a 10 MHz (usually 14.31818
122 void do_gettimeofday(struct timeval *tv)
124 unsigned long seq, t;
125 unsigned int sec, usec;
128 seq = read_seqbegin(&xtime_lock);
131 usec = xtime.tv_nsec / 1000;
133 /* i386 does some correction here to keep the clock
134 monotonous even when ntpd is fixing drift.
135 But they didn't work for me, there is a non monotonic
136 clock anyways with ntp.
137 I dropped all corrections now until a real solution can
138 be found. Note when you fix it here you need to do the same
139 in arch/x86_64/kernel/vsyscall.c and export all needed
140 variables in vmlinux.lds. -AK */
142 t = (jiffies - wall_jiffies) * (1000000L / HZ) +
146 } while (read_seqretry(&xtime_lock, seq));
148 tv->tv_sec = sec + usec / 1000000;
149 tv->tv_usec = usec % 1000000;
152 EXPORT_SYMBOL(do_gettimeofday);
155 * settimeofday() first undoes the correction that gettimeofday would do
156 * on the time, and then saves it. This is ugly, but has been like this for
160 int do_settimeofday(struct timespec *tv)
162 time_t wtm_sec, sec = tv->tv_sec;
163 long wtm_nsec, nsec = tv->tv_nsec;
165 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
168 write_seqlock_irq(&xtime_lock);
170 nsec -= do_gettimeoffset() * 1000 +
171 (jiffies - wall_jiffies) * (NSEC_PER_SEC/HZ);
173 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
174 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
176 set_normalized_timespec(&xtime, sec, nsec);
177 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
179 time_adjust = 0; /* stop active adjtime() */
180 time_status |= STA_UNSYNC;
181 time_maxerror = NTP_PHASE_LIMIT;
182 time_esterror = NTP_PHASE_LIMIT;
184 write_sequnlock_irq(&xtime_lock);
189 EXPORT_SYMBOL(do_settimeofday);
191 unsigned long profile_pc(struct pt_regs *regs)
193 unsigned long pc = instruction_pointer(regs);
195 /* Assume the lock function has either no stack frame or only a single word.
196 This checks if the address on the stack looks like a kernel text address.
197 There is a small window for false hits, but in that case the tick
198 is just accounted to the spinlock function.
199 Better would be to write these functions in assembler again
200 and check exactly. */
201 if (in_lock_functions(pc)) {
202 char *v = *(char **)regs->rsp;
203 if ((v >= _stext && v <= _etext) ||
204 (v >= _sinittext && v <= _einittext) ||
205 (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
206 return (unsigned long)v;
207 return ((unsigned long *)regs->rsp)[1];
211 EXPORT_SYMBOL(profile_pc);
214 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
215 * ms after the second nowtime has started, because when nowtime is written
216 * into the registers of the CMOS clock, it will jump to the next second
217 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
221 static void set_rtc_mmss(unsigned long nowtime)
223 int real_seconds, real_minutes, cmos_minutes;
224 unsigned char control, freq_select;
227 * IRQs are disabled when we're called from the timer interrupt,
228 * no need for spin_lock_irqsave()
231 spin_lock(&rtc_lock);
234 * Tell the clock it's being set and stop it.
237 control = CMOS_READ(RTC_CONTROL);
238 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
240 freq_select = CMOS_READ(RTC_FREQ_SELECT);
241 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
243 cmos_minutes = CMOS_READ(RTC_MINUTES);
244 BCD_TO_BIN(cmos_minutes);
247 * since we're only adjusting minutes and seconds, don't interfere with hour
248 * overflow. This avoids messing with unknown time zones but requires your RTC
249 * not to be off by more than 15 minutes. Since we're calling it only when
250 * our clock is externally synchronized using NTP, this shouldn't be a problem.
253 real_seconds = nowtime % 60;
254 real_minutes = nowtime / 60;
255 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
256 real_minutes += 30; /* correct for half hour time zone */
260 /* AMD 8111 is a really bad time keeper and hits this regularly.
261 It probably was an attempt to avoid screwing up DST, but ignore
263 if (abs(real_minutes - cmos_minutes) >= 30) {
264 printk(KERN_WARNING "time.c: can't update CMOS clock "
265 "from %d to %d\n", cmos_minutes, real_minutes);
270 BIN_TO_BCD(real_seconds);
271 BIN_TO_BCD(real_minutes);
272 CMOS_WRITE(real_seconds, RTC_SECONDS);
273 CMOS_WRITE(real_minutes, RTC_MINUTES);
277 * The following flags have to be released exactly in this order, otherwise the
278 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
279 * not reset the oscillator and will not update precisely 500 ms later. You
280 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
281 * believes data sheets anyway ... -- Markus Kuhn
284 CMOS_WRITE(control, RTC_CONTROL);
285 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
287 spin_unlock(&rtc_lock);
291 /* monotonic_clock(): returns # of nanoseconds passed since time_init()
292 * Note: This function is required to return accurate
293 * time even in the absence of multiple timer ticks.
295 unsigned long long monotonic_clock(void)
298 u32 last_offset, this_offset, offset;
299 unsigned long long base;
301 if (vxtime.mode == VXTIME_HPET) {
303 seq = read_seqbegin(&xtime_lock);
305 last_offset = vxtime.last;
306 base = monotonic_base;
307 this_offset = hpet_readl(HPET_COUNTER);
309 } while (read_seqretry(&xtime_lock, seq));
310 offset = (this_offset - last_offset);
311 offset *=(NSEC_PER_SEC/HZ)/hpet_tick;
312 return base + offset;
315 seq = read_seqbegin(&xtime_lock);
317 last_offset = vxtime.last_tsc;
318 base = monotonic_base;
319 } while (read_seqretry(&xtime_lock, seq));
321 rdtscll(this_offset);
322 offset = (this_offset - last_offset)*1000/cpu_khz;
323 return base + offset;
328 EXPORT_SYMBOL(monotonic_clock);
330 static noinline void handle_lost_ticks(int lost, struct pt_regs *regs)
332 static long lost_count;
335 if (report_lost_ticks) {
336 printk(KERN_WARNING "time.c: Lost %d timer "
338 print_symbol("rip %s)\n", regs->rip);
341 if (lost_count == 1000 && !warned) {
343 "warning: many lost ticks.\n"
344 KERN_WARNING "Your time source seems to be instable or "
345 "some driver is hogging interupts\n");
346 print_symbol("rip %s\n", regs->rip);
347 if (vxtime.mode == VXTIME_TSC && vxtime.hpet_address) {
348 printk(KERN_WARNING "Falling back to HPET\n");
349 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
350 vxtime.mode = VXTIME_HPET;
351 do_gettimeoffset = do_gettimeoffset_hpet;
353 /* else should fall back to PIT, but code missing. */
358 #ifdef CONFIG_CPU_FREQ
359 /* In some cases the CPU can change frequency without us noticing
360 (like going into thermal throttle)
361 Give cpufreq a change to catch up. */
362 if ((lost_count+1) % 25 == 0) {
363 cpufreq_delayed_get();
368 static irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
370 static unsigned long rtc_update = 0;
372 int delay, offset = 0, lost = 0;
375 * Here we are in the timer irq handler. We have irqs locally disabled (so we
376 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
377 * on the other CPU, so we need a lock. We also need to lock the vsyscall
378 * variables, because both do_timer() and us change them -arca+vojtech
381 write_seqlock(&xtime_lock);
383 if (vxtime.hpet_address)
384 offset = hpet_readl(HPET_COUNTER);
386 if (hpet_use_timer) {
387 /* if we're using the hpet timer functionality,
388 * we can more accurately know the counter value
389 * when the timer interrupt occured.
391 offset = hpet_readl(HPET_T0_CMP) - hpet_tick;
392 delay = hpet_readl(HPET_COUNTER) - offset;
394 spin_lock(&i8253_lock);
397 delay |= inb(0x40) << 8;
398 spin_unlock(&i8253_lock);
399 delay = LATCH - 1 - delay;
404 if (vxtime.mode == VXTIME_HPET) {
405 if (offset - vxtime.last > hpet_tick) {
406 lost = (offset - vxtime.last) / hpet_tick - 1;
410 (offset - vxtime.last)*(NSEC_PER_SEC/HZ) / hpet_tick;
412 vxtime.last = offset;
413 #ifdef CONFIG_X86_PM_TIMER
414 } else if (vxtime.mode == VXTIME_PMTMR) {
415 lost = pmtimer_mark_offset();
418 offset = (((tsc - vxtime.last_tsc) *
419 vxtime.tsc_quot) >> 32) - (USEC_PER_SEC / HZ);
424 if (offset > (USEC_PER_SEC / HZ)) {
425 lost = offset / (USEC_PER_SEC / HZ);
426 offset %= (USEC_PER_SEC / HZ);
429 monotonic_base += (tsc - vxtime.last_tsc)*1000000/cpu_khz ;
431 vxtime.last_tsc = tsc - vxtime.quot * delay / vxtime.tsc_quot;
433 if ((((tsc - vxtime.last_tsc) *
434 vxtime.tsc_quot) >> 32) < offset)
435 vxtime.last_tsc = tsc -
436 (((long) offset << 32) / vxtime.tsc_quot) - 1;
440 handle_lost_ticks(lost, regs);
445 * Do the timer stuff.
450 update_process_times(user_mode(regs));
454 * In the SMP case we use the local APIC timer interrupt to do the profiling,
455 * except when we simulate SMP mode on a uniprocessor system, in that case we
456 * have to call the local interrupt handler.
459 #ifndef CONFIG_X86_LOCAL_APIC
460 profile_tick(CPU_PROFILING, regs);
462 if (!using_apic_timer)
463 smp_local_timer_interrupt(regs);
467 * If we have an externally synchronized Linux clock, then update CMOS clock
468 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
469 * closest to exactly 500 ms before the next second. If the update fails, we
470 * don't care, as it'll be updated on the next turn, and the problem (time way
471 * off) isn't likely to go away much sooner anyway.
474 if ((~time_status & STA_UNSYNC) && xtime.tv_sec > rtc_update &&
475 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
476 set_rtc_mmss(xtime.tv_sec);
477 rtc_update = xtime.tv_sec + 660;
480 write_sequnlock(&xtime_lock);
485 static unsigned int cyc2ns_scale;
486 #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
488 static inline void set_cyc2ns_scale(unsigned long cpu_mhz)
490 cyc2ns_scale = (1000 << CYC2NS_SCALE_FACTOR)/cpu_mhz;
493 static inline unsigned long long cycles_2_ns(unsigned long long cyc)
495 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
498 unsigned long long sched_clock(void)
503 /* Don't do a HPET read here. Using TSC always is much faster
504 and HPET may not be mapped yet when the scheduler first runs.
505 Disadvantage is a small drift between CPUs in some configurations,
506 but that should be tolerable. */
507 if (__vxtime.mode == VXTIME_HPET)
508 return (hpet_readl(HPET_COUNTER) * vxtime.quot) >> 32;
511 /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
512 which means it is not completely exact and may not be monotonous between
513 CPUs. But the errors should be too small to matter for scheduling
517 return cycles_2_ns(a);
520 unsigned long get_cmos_time(void)
522 unsigned int timeout, year, mon, day, hour, min, sec;
523 unsigned char last, this;
527 * The Linux interpretation of the CMOS clock register contents: When the
528 * Update-In-Progress (UIP) flag goes from 1 to 0, the RTC registers show the
529 * second which has precisely just started. Waiting for this can take up to 1
530 * second, we timeout approximately after 2.4 seconds on a machine with
531 * standard 8.3 MHz ISA bus.
534 spin_lock_irqsave(&rtc_lock, flags);
539 while (timeout && last && !this) {
541 this = CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP;
546 * Here we are safe to assume the registers won't change for a whole second, so
547 * we just go ahead and read them.
550 sec = CMOS_READ(RTC_SECONDS);
551 min = CMOS_READ(RTC_MINUTES);
552 hour = CMOS_READ(RTC_HOURS);
553 day = CMOS_READ(RTC_DAY_OF_MONTH);
554 mon = CMOS_READ(RTC_MONTH);
555 year = CMOS_READ(RTC_YEAR);
557 spin_unlock_irqrestore(&rtc_lock, flags);
560 * We know that x86-64 always uses BCD format, no need to check the config
572 * x86-64 systems only exists since 2002.
573 * This will work up to Dec 31, 2100
577 return mktime(year, mon, day, hour, min, sec);
580 #ifdef CONFIG_CPU_FREQ
582 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
585 RED-PEN: On SMP we assume all CPUs run with the same frequency. It's
586 not that important because current Opteron setups do not support
587 scaling on SMP anyroads.
589 Should fix up last_tsc too. Currently gettimeofday in the
590 first tick after the change will be slightly wrong. */
592 #include <linux/workqueue.h>
594 static unsigned int cpufreq_delayed_issched = 0;
595 static unsigned int cpufreq_init = 0;
596 static struct work_struct cpufreq_delayed_get_work;
598 static void handle_cpufreq_delayed_get(void *v)
601 for_each_online_cpu(cpu) {
604 cpufreq_delayed_issched = 0;
607 /* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
608 * to verify the CPU frequency the timing core thinks the CPU is running
609 * at is still correct.
611 static void cpufreq_delayed_get(void)
614 if (cpufreq_init && !cpufreq_delayed_issched) {
615 cpufreq_delayed_issched = 1;
618 printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
620 schedule_work(&cpufreq_delayed_get_work);
624 static unsigned int ref_freq = 0;
625 static unsigned long loops_per_jiffy_ref = 0;
627 static unsigned long cpu_khz_ref = 0;
629 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
632 struct cpufreq_freqs *freq = data;
633 unsigned long *lpj, dummy;
635 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
639 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
641 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
643 lpj = &boot_cpu_data.loops_per_jiffy;
647 ref_freq = freq->old;
648 loops_per_jiffy_ref = *lpj;
649 cpu_khz_ref = cpu_khz;
651 if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
652 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
653 (val == CPUFREQ_RESUMECHANGE)) {
655 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
657 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
658 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
659 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
662 set_cyc2ns_scale(cpu_khz_ref / 1000);
667 static struct notifier_block time_cpufreq_notifier_block = {
668 .notifier_call = time_cpufreq_notifier
671 static int __init cpufreq_tsc(void)
673 INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get, NULL);
674 if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
675 CPUFREQ_TRANSITION_NOTIFIER))
680 core_initcall(cpufreq_tsc);
685 * calibrate_tsc() calibrates the processor TSC in a very simple way, comparing
686 * it to the HPET timer of known frequency.
689 #define TICK_COUNT 100000000
691 static unsigned int __init hpet_calibrate_tsc(void)
693 int tsc_start, hpet_start;
694 int tsc_now, hpet_now;
697 local_irq_save(flags);
700 hpet_start = hpet_readl(HPET_COUNTER);
705 hpet_now = hpet_readl(HPET_COUNTER);
708 local_irq_restore(flags);
709 } while ((tsc_now - tsc_start) < TICK_COUNT &&
710 (hpet_now - hpet_start) < TICK_COUNT);
712 return (tsc_now - tsc_start) * 1000000000L
713 / ((hpet_now - hpet_start) * hpet_period / 1000);
718 * pit_calibrate_tsc() uses the speaker output (channel 2) of
719 * the PIT. This is better than using the timer interrupt output,
720 * because we can read the value of the speaker with just one inb(),
721 * where we need three i/o operations for the interrupt channel.
722 * We count how many ticks the TSC does in 50 ms.
725 static unsigned int __init pit_calibrate_tsc(void)
727 unsigned long start, end;
730 spin_lock_irqsave(&i8253_lock, flags);
732 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
735 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
736 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
739 while ((inb(0x61) & 0x20) == 0);
743 spin_unlock_irqrestore(&i8253_lock, flags);
745 return (end - start) / 50;
749 static __init int late_hpet_init(void)
754 if (!vxtime.hpet_address)
757 memset(&hd, 0, sizeof (hd));
759 ntimer = hpet_readl(HPET_ID);
760 ntimer = (ntimer & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT;
764 * Register with driver.
765 * Timer0 and Timer1 is used by platform.
767 hd.hd_phys_address = vxtime.hpet_address;
768 hd.hd_address = (void *)fix_to_virt(FIX_HPET_BASE);
769 hd.hd_nirqs = ntimer;
770 hd.hd_flags = HPET_DATA_PLATFORM;
771 hpet_reserve_timer(&hd, 0);
772 #ifdef CONFIG_HPET_EMULATE_RTC
773 hpet_reserve_timer(&hd, 1);
775 hd.hd_irq[0] = HPET_LEGACY_8254;
776 hd.hd_irq[1] = HPET_LEGACY_RTC;
779 struct hpet_timer *timer;
782 hpet = (struct hpet *) fix_to_virt(FIX_HPET_BASE);
784 for (i = 2, timer = &hpet->hpet_timers[2]; i < ntimer;
786 hd.hd_irq[i] = (timer->hpet_config &
787 Tn_INT_ROUTE_CNF_MASK) >>
788 Tn_INT_ROUTE_CNF_SHIFT;
795 fs_initcall(late_hpet_init);
798 static int hpet_timer_stop_set_go(unsigned long tick)
803 * Stop the timers and reset the main counter.
806 cfg = hpet_readl(HPET_CFG);
807 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
808 hpet_writel(cfg, HPET_CFG);
809 hpet_writel(0, HPET_COUNTER);
810 hpet_writel(0, HPET_COUNTER + 4);
813 * Set up timer 0, as periodic with first interrupt to happen at hpet_tick,
814 * and period also hpet_tick.
816 if (hpet_use_timer) {
817 hpet_writel(HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
818 HPET_TN_32BIT, HPET_T0_CFG);
819 hpet_writel(hpet_tick, HPET_T0_CMP);
820 hpet_writel(hpet_tick, HPET_T0_CMP); /* AK: why twice? */
821 cfg |= HPET_CFG_LEGACY;
827 cfg |= HPET_CFG_ENABLE;
828 hpet_writel(cfg, HPET_CFG);
833 static int hpet_init(void)
837 if (!vxtime.hpet_address)
839 set_fixmap_nocache(FIX_HPET_BASE, vxtime.hpet_address);
840 __set_fixmap(VSYSCALL_HPET, vxtime.hpet_address, PAGE_KERNEL_VSYSCALL_NOCACHE);
843 * Read the period, compute tick and quotient.
846 id = hpet_readl(HPET_ID);
848 if (!(id & HPET_ID_VENDOR) || !(id & HPET_ID_NUMBER))
851 hpet_period = hpet_readl(HPET_PERIOD);
852 if (hpet_period < 100000 || hpet_period > 100000000)
855 hpet_tick = (1000000000L * (USEC_PER_SEC / HZ) + hpet_period / 2) /
858 hpet_use_timer = (id & HPET_ID_LEGSUP);
860 return hpet_timer_stop_set_go(hpet_tick);
863 static int hpet_reenable(void)
865 return hpet_timer_stop_set_go(hpet_tick);
868 void __init pit_init(void)
872 spin_lock_irqsave(&i8253_lock, flags);
873 outb_p(0x34, 0x43); /* binary, mode 2, LSB/MSB, ch 0 */
874 outb_p(LATCH & 0xff, 0x40); /* LSB */
875 outb_p(LATCH >> 8, 0x40); /* MSB */
876 spin_unlock_irqrestore(&i8253_lock, flags);
879 int __init time_setup(char *str)
881 report_lost_ticks = 1;
885 static struct irqaction irq0 = {
886 timer_interrupt, SA_INTERRUPT, CPU_MASK_NONE, "timer", NULL, NULL
889 extern void __init config_acpi_tables(void);
891 void __init time_init(void)
895 #ifdef HPET_HACK_ENABLE_DANGEROUS
896 if (!vxtime.hpet_address) {
897 printk(KERN_WARNING "time.c: WARNING: Enabling HPET base "
899 outl(0x800038a0, 0xcf8);
900 outl(0xff000001, 0xcfc);
901 outl(0x800038a0, 0xcf8);
902 vxtime.hpet_address = inl(0xcfc) & 0xfffffffe;
903 printk(KERN_WARNING "time.c: WARNING: Enabled HPET "
904 "at %#lx.\n", vxtime.hpet_address);
908 vxtime.hpet_address = 0;
910 xtime.tv_sec = get_cmos_time();
913 set_normalized_timespec(&wall_to_monotonic,
914 -xtime.tv_sec, -xtime.tv_nsec);
917 vxtime_hz = (1000000000000000L + hpet_period / 2) /
920 if (hpet_use_timer) {
921 cpu_khz = hpet_calibrate_tsc();
923 #ifdef CONFIG_X86_PM_TIMER
924 } else if (pmtmr_ioport) {
925 vxtime_hz = PM_TIMER_FREQUENCY;
928 cpu_khz = pit_calibrate_tsc();
932 cpu_khz = pit_calibrate_tsc();
936 printk(KERN_INFO "time.c: Using %ld.%06ld MHz %s timer.\n",
937 vxtime_hz / 1000000, vxtime_hz % 1000000, timename);
938 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
939 cpu_khz / 1000, cpu_khz % 1000);
940 vxtime.mode = VXTIME_TSC;
941 vxtime.quot = (1000000L << 32) / vxtime_hz;
942 vxtime.tsc_quot = (1000L << 32) / cpu_khz;
943 vxtime.hz = vxtime_hz;
944 rdtscll_sync(&vxtime.last_tsc);
947 set_cyc2ns_scale(cpu_khz / 1000);
955 * Make an educated guess if the TSC is trustworthy and synchronized
958 static __init int unsynchronized_tsc(void)
961 if (oem_force_hpet_timer())
963 /* Intel systems are normally all synchronized. Exceptions
964 are handled in the OEM check above. */
965 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
967 /* All in a single socket - should be synchronized */
968 if (cpus_weight(cpu_core_map[0]) == num_online_cpus())
971 /* Assume multi socket systems are not synchronized */
972 return num_online_cpus() > 1;
976 * Decide after all CPUs are booted what mode gettimeofday should use.
978 void __init time_init_gtod(void)
982 if (unsynchronized_tsc())
984 if (vxtime.hpet_address && notsc) {
985 timetype = hpet_use_timer ? "HPET" : "PIT/HPET";
986 vxtime.last = hpet_readl(HPET_T0_CMP) - hpet_tick;
987 vxtime.mode = VXTIME_HPET;
988 do_gettimeoffset = do_gettimeoffset_hpet;
989 #ifdef CONFIG_X86_PM_TIMER
990 /* Using PM for gettimeofday is quite slow, but we have no other
991 choice because the TSC is too unreliable on some systems. */
992 } else if (pmtmr_ioport && !vxtime.hpet_address && notsc) {
994 do_gettimeoffset = do_gettimeoffset_pm;
995 vxtime.mode = VXTIME_PMTMR;
997 printk(KERN_INFO "Disabling vsyscall due to use of PM timer\n");
1000 timetype = hpet_use_timer ? "HPET/TSC" : "PIT/TSC";
1001 vxtime.mode = VXTIME_TSC;
1004 printk(KERN_INFO "time.c: Using %s based timekeeping.\n", timetype);
1007 __setup("report_lost_ticks", time_setup);
1009 static long clock_cmos_diff;
1010 static unsigned long sleep_start;
1012 static int timer_suspend(struct sys_device *dev, pm_message_t state)
1015 * Estimate time zone so that set_time can update the clock
1017 long cmos_time = get_cmos_time();
1019 clock_cmos_diff = -cmos_time;
1020 clock_cmos_diff += get_seconds();
1021 sleep_start = cmos_time;
1025 static int timer_resume(struct sys_device *dev)
1027 unsigned long flags;
1029 unsigned long ctime = get_cmos_time();
1030 unsigned long sleep_length = (ctime - sleep_start) * HZ;
1032 if (vxtime.hpet_address)
1035 i8254_timer_resume();
1037 sec = ctime + clock_cmos_diff;
1038 write_seqlock_irqsave(&xtime_lock,flags);
1041 write_sequnlock_irqrestore(&xtime_lock,flags);
1042 jiffies += sleep_length;
1043 wall_jiffies += sleep_length;
1047 static struct sysdev_class timer_sysclass = {
1048 .resume = timer_resume,
1049 .suspend = timer_suspend,
1050 set_kset_name("timer"),
1054 /* XXX this driverfs stuff should probably go elsewhere later -john */
1055 static struct sys_device device_timer = {
1057 .cls = &timer_sysclass,
1060 static int time_init_device(void)
1062 int error = sysdev_class_register(&timer_sysclass);
1064 error = sysdev_register(&device_timer);
1068 device_initcall(time_init_device);
1070 #ifdef CONFIG_HPET_EMULATE_RTC
1071 /* HPET in LegacyReplacement Mode eats up RTC interrupt line. When, HPET
1072 * is enabled, we support RTC interrupt functionality in software.
1073 * RTC has 3 kinds of interrupts:
1074 * 1) Update Interrupt - generate an interrupt, every sec, when RTC clock
1076 * 2) Alarm Interrupt - generate an interrupt at a specific time of day
1077 * 3) Periodic Interrupt - generate periodic interrupt, with frequencies
1078 * 2Hz-8192Hz (2Hz-64Hz for non-root user) (all freqs in powers of 2)
1079 * (1) and (2) above are implemented using polling at a frequency of
1080 * 64 Hz. The exact frequency is a tradeoff between accuracy and interrupt
1081 * overhead. (DEFAULT_RTC_INT_FREQ)
1082 * For (3), we use interrupts at 64Hz or user specified periodic
1083 * frequency, whichever is higher.
1085 #include <linux/rtc.h>
1087 extern irqreturn_t rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs);
1089 #define DEFAULT_RTC_INT_FREQ 64
1090 #define RTC_NUM_INTS 1
1092 static unsigned long UIE_on;
1093 static unsigned long prev_update_sec;
1095 static unsigned long AIE_on;
1096 static struct rtc_time alarm_time;
1098 static unsigned long PIE_on;
1099 static unsigned long PIE_freq = DEFAULT_RTC_INT_FREQ;
1100 static unsigned long PIE_count;
1102 static unsigned long hpet_rtc_int_freq; /* RTC interrupt frequency */
1104 int is_hpet_enabled(void)
1106 return vxtime.hpet_address != 0;
1110 * Timer 1 for RTC, we do not use periodic interrupt feature,
1111 * even if HPET supports periodic interrupts on Timer 1.
1112 * The reason being, to set up a periodic interrupt in HPET, we need to
1113 * stop the main counter. And if we do that everytime someone diables/enables
1114 * RTC, we will have adverse effect on main kernel timer running on Timer 0.
1115 * So, for the time being, simulate the periodic interrupt in software.
1117 * hpet_rtc_timer_init() is called for the first time and during subsequent
1118 * interuppts reinit happens through hpet_rtc_timer_reinit().
1120 int hpet_rtc_timer_init(void)
1122 unsigned int cfg, cnt;
1123 unsigned long flags;
1125 if (!is_hpet_enabled())
1128 * Set the counter 1 and enable the interrupts.
1130 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1131 hpet_rtc_int_freq = PIE_freq;
1133 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1135 local_irq_save(flags);
1136 cnt = hpet_readl(HPET_COUNTER);
1137 cnt += ((hpet_tick*HZ)/hpet_rtc_int_freq);
1138 hpet_writel(cnt, HPET_T1_CMP);
1139 local_irq_restore(flags);
1141 cfg = hpet_readl(HPET_T1_CFG);
1142 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1143 hpet_writel(cfg, HPET_T1_CFG);
1148 static void hpet_rtc_timer_reinit(void)
1150 unsigned int cfg, cnt;
1152 if (!(PIE_on | AIE_on | UIE_on))
1155 if (PIE_on && (PIE_freq > DEFAULT_RTC_INT_FREQ))
1156 hpet_rtc_int_freq = PIE_freq;
1158 hpet_rtc_int_freq = DEFAULT_RTC_INT_FREQ;
1160 /* It is more accurate to use the comparator value than current count.*/
1161 cnt = hpet_readl(HPET_T1_CMP);
1162 cnt += hpet_tick*HZ/hpet_rtc_int_freq;
1163 hpet_writel(cnt, HPET_T1_CMP);
1165 cfg = hpet_readl(HPET_T1_CFG);
1166 cfg |= HPET_TN_ENABLE | HPET_TN_SETVAL | HPET_TN_32BIT;
1167 hpet_writel(cfg, HPET_T1_CFG);
1173 * The functions below are called from rtc driver.
1174 * Return 0 if HPET is not being used.
1175 * Otherwise do the necessary changes and return 1.
1177 int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
1179 if (!is_hpet_enabled())
1182 if (bit_mask & RTC_UIE)
1184 if (bit_mask & RTC_PIE)
1186 if (bit_mask & RTC_AIE)
1192 int hpet_set_rtc_irq_bit(unsigned long bit_mask)
1194 int timer_init_reqd = 0;
1196 if (!is_hpet_enabled())
1199 if (!(PIE_on | AIE_on | UIE_on))
1200 timer_init_reqd = 1;
1202 if (bit_mask & RTC_UIE) {
1205 if (bit_mask & RTC_PIE) {
1209 if (bit_mask & RTC_AIE) {
1213 if (timer_init_reqd)
1214 hpet_rtc_timer_init();
1219 int hpet_set_alarm_time(unsigned char hrs, unsigned char min, unsigned char sec)
1221 if (!is_hpet_enabled())
1224 alarm_time.tm_hour = hrs;
1225 alarm_time.tm_min = min;
1226 alarm_time.tm_sec = sec;
1231 int hpet_set_periodic_freq(unsigned long freq)
1233 if (!is_hpet_enabled())
1242 int hpet_rtc_dropped_irq(void)
1244 if (!is_hpet_enabled())
1250 irqreturn_t hpet_rtc_interrupt(int irq, void *dev_id, struct pt_regs *regs)
1252 struct rtc_time curr_time;
1253 unsigned long rtc_int_flag = 0;
1254 int call_rtc_interrupt = 0;
1256 hpet_rtc_timer_reinit();
1258 if (UIE_on | AIE_on) {
1259 rtc_get_rtc_time(&curr_time);
1262 if (curr_time.tm_sec != prev_update_sec) {
1263 /* Set update int info, call real rtc int routine */
1264 call_rtc_interrupt = 1;
1265 rtc_int_flag = RTC_UF;
1266 prev_update_sec = curr_time.tm_sec;
1271 if (PIE_count >= hpet_rtc_int_freq/PIE_freq) {
1272 /* Set periodic int info, call real rtc int routine */
1273 call_rtc_interrupt = 1;
1274 rtc_int_flag |= RTC_PF;
1279 if ((curr_time.tm_sec == alarm_time.tm_sec) &&
1280 (curr_time.tm_min == alarm_time.tm_min) &&
1281 (curr_time.tm_hour == alarm_time.tm_hour)) {
1282 /* Set alarm int info, call real rtc int routine */
1283 call_rtc_interrupt = 1;
1284 rtc_int_flag |= RTC_AF;
1287 if (call_rtc_interrupt) {
1288 rtc_int_flag |= (RTC_IRQF | (RTC_NUM_INTS << 8));
1289 rtc_interrupt(rtc_int_flag, dev_id, regs);
1297 static int __init nohpet_setup(char *s)
1303 __setup("nohpet", nohpet_setup);
1306 static int __init notsc_setup(char *s)
1312 __setup("notsc", notsc_setup);