x86, 64-bit: adjust mapping of physical pagetables to work with Xen
[linux-2.6] / arch / x86 / lib / delay_64.c
1 /*
2  *      Precise Delay Loops for x86-64
3  *
4  *      Copyright (C) 1993 Linus Torvalds
5  *      Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
6  *
7  *      The __delay function must _NOT_ be inlined as its execution time
8  *      depends wildly on alignment on many x86 processors. 
9  */
10
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/timex.h>
14 #include <linux/preempt.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
17
18 #include <asm/delay.h>
19 #include <asm/msr.h>
20
21 #ifdef CONFIG_SMP
22 #include <asm/smp.h>
23 #endif
24
25 int __devinit read_current_timer(unsigned long *timer_value)
26 {
27         rdtscll(*timer_value);
28         return 0;
29 }
30
31 void __delay(unsigned long loops)
32 {
33         unsigned bclock, now;
34         int cpu;
35
36         preempt_disable();
37         cpu = smp_processor_id();
38         rdtscl(bclock);
39         for (;;) {
40                 rdtscl(now);
41                 if ((now - bclock) >= loops)
42                         break;
43
44                 /* Allow RT tasks to run */
45                 preempt_enable();
46                 rep_nop();
47                 preempt_disable();
48
49                 /*
50                  * It is possible that we moved to another CPU, and
51                  * since TSC's are per-cpu we need to calculate
52                  * that. The delay must guarantee that we wait "at
53                  * least" the amount of time. Being moved to another
54                  * CPU could make the wait longer but we just need to
55                  * make sure we waited long enough. Rebalance the
56                  * counter for this CPU.
57                  */
58                 if (unlikely(cpu != smp_processor_id())) {
59                         loops -= (now - bclock);
60                         cpu = smp_processor_id();
61                         rdtscl(bclock);
62                 }
63         }
64         preempt_enable();
65 }
66 EXPORT_SYMBOL(__delay);
67
68 inline void __const_udelay(unsigned long xloops)
69 {
70         __delay(((xloops * HZ *
71                 cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1);
72 }
73 EXPORT_SYMBOL(__const_udelay);
74
75 void __udelay(unsigned long usecs)
76 {
77         __const_udelay(usecs * 0x000010c7);  /* 2**32 / 1000000 (rounded up) */
78 }
79 EXPORT_SYMBOL(__udelay);
80
81 void __ndelay(unsigned long nsecs)
82 {
83         __const_udelay(nsecs * 0x00005);  /* 2**32 / 1000000000 (rounded up) */
84 }
85 EXPORT_SYMBOL(__ndelay);