2 * linux/arch/x86-64/kernel/time.c
4 * "High Precision Event Timer" based timekeeping.
6 * Copyright (c) 1991,1992,1995 Linus Torvalds
7 * Copyright (c) 1994 Alan Modra
8 * Copyright (c) 1995 Markus Kuhn
9 * Copyright (c) 1996 Ingo Molnar
10 * Copyright (c) 1998 Andrea Arcangeli
11 * Copyright (c) 2002,2006 Vojtech Pavlik
12 * Copyright (c) 2003 Andi Kleen
13 * RTC support code taken from arch/i386/kernel/timers/time_hpet.c
16 #include <linux/kernel.h>
17 #include <linux/sched.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/mc146818rtc.h>
21 #include <linux/time.h>
22 #include <linux/ioport.h>
23 #include <linux/module.h>
24 #include <linux/device.h>
25 #include <linux/sysdev.h>
26 #include <linux/bcd.h>
27 #include <linux/notifier.h>
28 #include <linux/cpu.h>
29 #include <linux/kallsyms.h>
30 #include <linux/acpi.h>
32 #include <acpi/achware.h> /* for PM timer frequency */
33 #include <acpi/acpi_bus.h>
35 #include <asm/8253pit.h>
36 #include <asm/pgtable.h>
37 #include <asm/vsyscall.h>
38 #include <asm/timex.h>
39 #include <asm/proto.h>
41 #include <asm/sections.h>
42 #include <linux/hpet.h>
45 #include <asm/mpspec.h>
48 static char *timename = NULL;
50 DEFINE_SPINLOCK(rtc_lock);
51 EXPORT_SYMBOL(rtc_lock);
52 DEFINE_SPINLOCK(i8253_lock);
54 volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
56 unsigned long profile_pc(struct pt_regs *regs)
58 unsigned long pc = instruction_pointer(regs);
60 /* Assume the lock function has either no stack frame or a copy
62 Eflags always has bits 22 and up cleared unlike kernel addresses. */
63 if (!user_mode(regs) && in_lock_functions(pc)) {
64 unsigned long *sp = (unsigned long *)regs->rsp;
72 EXPORT_SYMBOL(profile_pc);
75 * In order to set the CMOS clock precisely, set_rtc_mmss has to be called 500
76 * ms after the second nowtime has started, because when nowtime is written
77 * into the registers of the CMOS clock, it will jump to the next second
78 * precisely 500 ms later. Check the Motorola MC146818A or Dallas DS12887 data
82 static void set_rtc_mmss(unsigned long nowtime)
84 int real_seconds, real_minutes, cmos_minutes;
85 unsigned char control, freq_select;
88 * IRQs are disabled when we're called from the timer interrupt,
89 * no need for spin_lock_irqsave()
95 * Tell the clock it's being set and stop it.
98 control = CMOS_READ(RTC_CONTROL);
99 CMOS_WRITE(control | RTC_SET, RTC_CONTROL);
101 freq_select = CMOS_READ(RTC_FREQ_SELECT);
102 CMOS_WRITE(freq_select | RTC_DIV_RESET2, RTC_FREQ_SELECT);
104 cmos_minutes = CMOS_READ(RTC_MINUTES);
105 BCD_TO_BIN(cmos_minutes);
108 * since we're only adjusting minutes and seconds, don't interfere with hour
109 * overflow. This avoids messing with unknown time zones but requires your RTC
110 * not to be off by more than 15 minutes. Since we're calling it only when
111 * our clock is externally synchronized using NTP, this shouldn't be a problem.
114 real_seconds = nowtime % 60;
115 real_minutes = nowtime / 60;
116 if (((abs(real_minutes - cmos_minutes) + 15) / 30) & 1)
117 real_minutes += 30; /* correct for half hour time zone */
120 if (abs(real_minutes - cmos_minutes) >= 30) {
121 printk(KERN_WARNING "time.c: can't update CMOS clock "
122 "from %d to %d\n", cmos_minutes, real_minutes);
124 BIN_TO_BCD(real_seconds);
125 BIN_TO_BCD(real_minutes);
126 CMOS_WRITE(real_seconds, RTC_SECONDS);
127 CMOS_WRITE(real_minutes, RTC_MINUTES);
131 * The following flags have to be released exactly in this order, otherwise the
132 * DS12887 (popular MC146818A clone with integrated battery and quartz) will
133 * not reset the oscillator and will not update precisely 500 ms later. You
134 * won't find this mentioned in the Dallas Semiconductor data sheets, but who
135 * believes data sheets anyway ... -- Markus Kuhn
138 CMOS_WRITE(control, RTC_CONTROL);
139 CMOS_WRITE(freq_select, RTC_FREQ_SELECT);
141 spin_unlock(&rtc_lock);
145 void main_timer_handler(void)
147 static unsigned long rtc_update = 0;
149 * Here we are in the timer irq handler. We have irqs locally disabled (so we
150 * don't need spin_lock_irqsave()) but we don't know if the timer_bh is running
151 * on the other CPU, so we need a lock. We also need to lock the vsyscall
152 * variables, because both do_timer() and us change them -arca+vojtech
155 write_seqlock(&xtime_lock);
158 * Do the timer stuff.
163 update_process_times(user_mode(get_irq_regs()));
167 * In the SMP case we use the local APIC timer interrupt to do the profiling,
168 * except when we simulate SMP mode on a uniprocessor system, in that case we
169 * have to call the local interrupt handler.
172 if (!using_apic_timer)
173 smp_local_timer_interrupt();
176 * If we have an externally synchronized Linux clock, then update CMOS clock
177 * accordingly every ~11 minutes. set_rtc_mmss() will be called in the jiffy
178 * closest to exactly 500 ms before the next second. If the update fails, we
179 * don't care, as it'll be updated on the next turn, and the problem (time way
180 * off) isn't likely to go away much sooner anyway.
183 if (ntp_synced() && xtime.tv_sec > rtc_update &&
184 abs(xtime.tv_nsec - 500000000) <= tick_nsec / 2) {
185 set_rtc_mmss(xtime.tv_sec);
186 rtc_update = xtime.tv_sec + 660;
189 write_sequnlock(&xtime_lock);
192 static irqreturn_t timer_interrupt(int irq, void *dev_id)
194 if (apic_runs_main_timer > 1)
196 main_timer_handler();
197 if (using_apic_timer)
198 smp_send_timer_broadcast_ipi();
202 static unsigned long get_cmos_time(void)
204 unsigned int year, mon, day, hour, min, sec;
206 unsigned century = 0;
208 spin_lock_irqsave(&rtc_lock, flags);
211 sec = CMOS_READ(RTC_SECONDS);
212 min = CMOS_READ(RTC_MINUTES);
213 hour = CMOS_READ(RTC_HOURS);
214 day = CMOS_READ(RTC_DAY_OF_MONTH);
215 mon = CMOS_READ(RTC_MONTH);
216 year = CMOS_READ(RTC_YEAR);
218 if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
219 acpi_gbl_FADT.century)
220 century = CMOS_READ(acpi_gbl_FADT.century);
222 } while (sec != CMOS_READ(RTC_SECONDS));
224 spin_unlock_irqrestore(&rtc_lock, flags);
227 * We know that x86-64 always uses BCD format, no need to check the
240 year += century * 100;
241 printk(KERN_INFO "Extended CMOS year: %d\n", century * 100);
244 * x86-64 systems only exists since 2002.
245 * This will work up to Dec 31, 2100
250 return mktime(year, mon, day, hour, min, sec);
253 /* calibrate_cpu is used on systems with fixed rate TSCs to determine
254 * processor frequency */
255 #define TICK_COUNT 100000000
256 static unsigned int __init tsc_calibrate_cpu_khz(void)
258 int tsc_start, tsc_now;
260 unsigned long evntsel3 = 0, pmc3 = 0, pmc_now = 0;
263 for (i = 0; i < 4; i++)
264 if (avail_to_resrv_perfctr_nmi_bit(i))
266 no_ctr_free = (i == 4);
269 rdmsrl(MSR_K7_EVNTSEL3, evntsel3);
270 wrmsrl(MSR_K7_EVNTSEL3, 0);
271 rdmsrl(MSR_K7_PERFCTR3, pmc3);
273 reserve_perfctr_nmi(MSR_K7_PERFCTR0 + i);
274 reserve_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
276 local_irq_save(flags);
277 /* start meauring cycles, incrementing from 0 */
278 wrmsrl(MSR_K7_PERFCTR0 + i, 0);
279 wrmsrl(MSR_K7_EVNTSEL0 + i, 1 << 22 | 3 << 16 | 0x76);
282 rdmsrl(MSR_K7_PERFCTR0 + i, pmc_now);
283 tsc_now = get_cycles_sync();
284 } while ((tsc_now - tsc_start) < TICK_COUNT);
286 local_irq_restore(flags);
288 wrmsrl(MSR_K7_EVNTSEL3, 0);
289 wrmsrl(MSR_K7_PERFCTR3, pmc3);
290 wrmsrl(MSR_K7_EVNTSEL3, evntsel3);
292 release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
293 release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
296 return pmc_now * tsc_khz / (tsc_now - tsc_start);
300 * pit_calibrate_tsc() uses the speaker output (channel 2) of
301 * the PIT. This is better than using the timer interrupt output,
302 * because we can read the value of the speaker with just one inb(),
303 * where we need three i/o operations for the interrupt channel.
304 * We count how many ticks the TSC does in 50 ms.
307 static unsigned int __init pit_calibrate_tsc(void)
309 unsigned long start, end;
312 spin_lock_irqsave(&i8253_lock, flags);
314 outb((inb(0x61) & ~0x02) | 0x01, 0x61);
317 outb((PIT_TICK_RATE / (1000 / 50)) & 0xff, 0x42);
318 outb((PIT_TICK_RATE / (1000 / 50)) >> 8, 0x42);
319 start = get_cycles_sync();
320 while ((inb(0x61) & 0x20) == 0);
321 end = get_cycles_sync();
323 spin_unlock_irqrestore(&i8253_lock, flags);
325 return (end - start) / 50;
328 #define PIT_MODE 0x43
331 static void __pit_init(int val, u8 mode)
335 spin_lock_irqsave(&i8253_lock, flags);
336 outb_p(mode, PIT_MODE);
337 outb_p(val & 0xff, PIT_CH0); /* LSB */
338 outb_p(val >> 8, PIT_CH0); /* MSB */
339 spin_unlock_irqrestore(&i8253_lock, flags);
342 void __init pit_init(void)
344 __pit_init(LATCH, 0x34); /* binary, mode 2, LSB/MSB, ch 0 */
347 void pit_stop_interrupt(void)
349 __pit_init(0, 0x30); /* mode 0 */
352 void stop_timer_interrupt(void)
357 hpet_timer_stop_set_go(0);
360 pit_stop_interrupt();
362 printk(KERN_INFO "timer: %s interrupt stopped.\n", name);
365 static struct irqaction irq0 = {
366 .handler = timer_interrupt,
367 .flags = IRQF_DISABLED | IRQF_IRQPOLL,
368 .mask = CPU_MASK_NONE,
372 void __init time_init(void)
376 xtime.tv_sec = get_cmos_time();
379 set_normalized_timespec(&wall_to_monotonic,
380 -xtime.tv_sec, -xtime.tv_nsec);
382 if (hpet_arch_init())
385 if (hpet_use_timer) {
386 /* set tick_nsec to use the proper rate for HPET */
387 tick_nsec = TICK_NSEC_HPET;
388 tsc_khz = hpet_calibrate_tsc();
392 tsc_khz = pit_calibrate_tsc();
397 if (cpu_has(&boot_cpu_data, X86_FEATURE_CONSTANT_TSC) &&
398 boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
399 boot_cpu_data.x86 == 16)
400 cpu_khz = tsc_calibrate_cpu_khz();
402 if (unsynchronized_tsc())
403 mark_tsc_unstable("TSCs unsynchronized");
405 if (cpu_has(&boot_cpu_data, X86_FEATURE_RDTSCP))
406 vgetcpu_mode = VGETCPU_RDTSCP;
408 vgetcpu_mode = VGETCPU_LSL;
410 set_cyc2ns_scale(tsc_khz);
411 printk(KERN_INFO "time.c: Detected %d.%03d MHz processor.\n",
412 cpu_khz / 1000, cpu_khz % 1000);
413 init_tsc_clocksource();
419 static long clock_cmos_diff;
420 static unsigned long sleep_start;
423 * sysfs support for the timer.
426 static int timer_suspend(struct sys_device *dev, pm_message_t state)
429 * Estimate time zone so that set_time can update the clock
431 long cmos_time = get_cmos_time();
433 clock_cmos_diff = -cmos_time;
434 clock_cmos_diff += get_seconds();
435 sleep_start = cmos_time;
439 static int timer_resume(struct sys_device *dev)
443 unsigned long ctime = get_cmos_time();
444 long sleep_length = (ctime - sleep_start) * HZ;
446 if (sleep_length < 0) {
447 printk(KERN_WARNING "Time skew detected in timer resume!\n");
448 /* The time after the resume must not be earlier than the time
449 * before the suspend or some nasty things will happen
457 i8254_timer_resume();
459 sec = ctime + clock_cmos_diff;
460 write_seqlock_irqsave(&xtime_lock,flags);
463 jiffies += sleep_length;
464 write_sequnlock_irqrestore(&xtime_lock,flags);
465 touch_softlockup_watchdog();
469 static struct sysdev_class timer_sysclass = {
470 .resume = timer_resume,
471 .suspend = timer_suspend,
472 set_kset_name("timer"),
475 /* XXX this sysfs stuff should probably go elsewhere later -john */
476 static struct sys_device device_timer = {
478 .cls = &timer_sysclass,
481 static int time_init_device(void)
483 int error = sysdev_class_register(&timer_sysclass);
485 error = sysdev_register(&device_timer);
489 device_initcall(time_init_device);