2 * Precise Delay Loops for x86-64
4 * Copyright (C) 1993 Linus Torvalds
5 * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz>
7 * The __delay function must _NOT_ be inlined as its execution time
8 * depends wildly on alignment on many x86 processors.
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/timex.h>
14 #include <linux/preempt.h>
15 #include <linux/delay.h>
16 #include <linux/init.h>
18 #include <asm/delay.h>
25 int __devinit read_current_timer(unsigned long *timer_value)
27 rdtscll(*timer_value);
31 void __delay(unsigned long loops)
37 cpu = smp_processor_id();
41 if ((now - bclock) >= loops)
44 /* Allow RT tasks to run */
50 * It is possible that we moved to another CPU, and
51 * since TSC's are per-cpu we need to calculate
52 * that. The delay must guarantee that we wait "at
53 * least" the amount of time. Being moved to another
54 * CPU could make the wait longer but we just need to
55 * make sure we waited long enough. Rebalance the
56 * counter for this CPU.
58 if (unlikely(cpu != smp_processor_id())) {
59 loops -= (now - bclock);
60 cpu = smp_processor_id();
66 EXPORT_SYMBOL(__delay);
68 inline void __const_udelay(unsigned long xloops)
70 __delay(((xloops * HZ *
71 cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1);
73 EXPORT_SYMBOL(__const_udelay);
75 void __udelay(unsigned long usecs)
77 __const_udelay(usecs * 0x000010c7); /* 2**32 / 1000000 (rounded up) */
79 EXPORT_SYMBOL(__udelay);
81 void __ndelay(unsigned long nsecs)
83 __const_udelay(nsecs * 0x00005); /* 2**32 / 1000000000 (rounded up) */
85 EXPORT_SYMBOL(__ndelay);