Pull fluff into release branch
[linux-2.6] / arch / x86_64 / kernel / tsc.c
1 #include <linux/kernel.h>
2 #include <linux/sched.h>
3 #include <linux/interrupt.h>
4 #include <linux/init.h>
5 #include <linux/clocksource.h>
6 #include <linux/time.h>
7 #include <linux/acpi.h>
8 #include <linux/cpufreq.h>
9
10 #include <asm/timex.h>
11
12 static int notsc __initdata = 0;
13
14 unsigned int cpu_khz;           /* TSC clocks / usec, not used here */
15 EXPORT_SYMBOL(cpu_khz);
16
17 static unsigned int cyc2ns_scale __read_mostly;
18
19 void set_cyc2ns_scale(unsigned long khz)
20 {
21         cyc2ns_scale = (NSEC_PER_MSEC << NS_SCALE) / khz;
22 }
23
24 static unsigned long long cycles_2_ns(unsigned long long cyc)
25 {
26         return (cyc * cyc2ns_scale) >> NS_SCALE;
27 }
28
29 unsigned long long sched_clock(void)
30 {
31         unsigned long a = 0;
32
33         /* Could do CPU core sync here. Opteron can execute rdtsc speculatively,
34          * which means it is not completely exact and may not be monotonous
35          * between CPUs. But the errors should be too small to matter for
36          * scheduling purposes.
37          */
38
39         rdtscll(a);
40         return cycles_2_ns(a);
41 }
42
43 static int tsc_unstable;
44
45 static inline int check_tsc_unstable(void)
46 {
47         return tsc_unstable;
48 }
49 #ifdef CONFIG_CPU_FREQ
50
51 /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
52  * changes.
53  *
54  * RED-PEN: On SMP we assume all CPUs run with the same frequency.  It's
55  * not that important because current Opteron setups do not support
56  * scaling on SMP anyroads.
57  *
58  * Should fix up last_tsc too. Currently gettimeofday in the
59  * first tick after the change will be slightly wrong.
60  */
61
62 #include <linux/workqueue.h>
63
64 static unsigned int cpufreq_delayed_issched = 0;
65 static unsigned int cpufreq_init = 0;
66 static struct work_struct cpufreq_delayed_get_work;
67
68 static void handle_cpufreq_delayed_get(struct work_struct *v)
69 {
70         unsigned int cpu;
71         for_each_online_cpu(cpu) {
72                 cpufreq_get(cpu);
73         }
74         cpufreq_delayed_issched = 0;
75 }
76
77 static unsigned int  ref_freq = 0;
78 static unsigned long loops_per_jiffy_ref = 0;
79
80 static unsigned long cpu_khz_ref = 0;
81
82 static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
83                                  void *data)
84 {
85         struct cpufreq_freqs *freq = data;
86         unsigned long *lpj, dummy;
87
88         if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
89                 return 0;
90
91         lpj = &dummy;
92         if (!(freq->flags & CPUFREQ_CONST_LOOPS))
93 #ifdef CONFIG_SMP
94                 lpj = &cpu_data[freq->cpu].loops_per_jiffy;
95 #else
96                 lpj = &boot_cpu_data.loops_per_jiffy;
97 #endif
98
99         if (!ref_freq) {
100                 ref_freq = freq->old;
101                 loops_per_jiffy_ref = *lpj;
102                 cpu_khz_ref = cpu_khz;
103         }
104         if ((val == CPUFREQ_PRECHANGE  && freq->old < freq->new) ||
105                 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
106                 (val == CPUFREQ_RESUMECHANGE)) {
107                 *lpj =
108                 cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
109
110                 cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
111                 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
112                         mark_tsc_unstable();
113         }
114
115         set_cyc2ns_scale(cpu_khz_ref);
116
117         return 0;
118 }
119
120 static struct notifier_block time_cpufreq_notifier_block = {
121         .notifier_call  = time_cpufreq_notifier
122 };
123
124 static int __init cpufreq_tsc(void)
125 {
126         INIT_WORK(&cpufreq_delayed_get_work, handle_cpufreq_delayed_get);
127         if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
128                                        CPUFREQ_TRANSITION_NOTIFIER))
129                 cpufreq_init = 1;
130         return 0;
131 }
132
133 core_initcall(cpufreq_tsc);
134
135 #endif
136
137 static int tsc_unstable = 0;
138
139 /*
140  * Make an educated guess if the TSC is trustworthy and synchronized
141  * over all CPUs.
142  */
143 __cpuinit int unsynchronized_tsc(void)
144 {
145         if (tsc_unstable)
146                 return 1;
147
148 #ifdef CONFIG_SMP
149         if (apic_is_clustered_box())
150                 return 1;
151 #endif
152         /* Most intel systems have synchronized TSCs except for
153            multi node systems */
154         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
155 #ifdef CONFIG_ACPI
156                 /* But TSC doesn't tick in C3 so don't use it there */
157                 if (acpi_gbl_FADT.header.length > 0 && acpi_gbl_FADT.C3latency < 1000)
158                         return 1;
159 #endif
160                 return 0;
161         }
162
163         /* Assume multi socket systems are not synchronized */
164         return num_present_cpus() > 1;
165 }
166
167 int __init notsc_setup(char *s)
168 {
169         notsc = 1;
170         return 1;
171 }
172
173 __setup("notsc", notsc_setup);
174
175
176 /* clock source code: */
177 static cycle_t read_tsc(void)
178 {
179         cycle_t ret = (cycle_t)get_cycles_sync();
180         return ret;
181 }
182
183 static cycle_t __vsyscall_fn vread_tsc(void)
184 {
185         cycle_t ret = (cycle_t)get_cycles_sync();
186         return ret;
187 }
188
189 static struct clocksource clocksource_tsc = {
190         .name                   = "tsc",
191         .rating                 = 300,
192         .read                   = read_tsc,
193         .mask                   = CLOCKSOURCE_MASK(64),
194         .shift                  = 22,
195         .flags                  = CLOCK_SOURCE_IS_CONTINUOUS |
196                                   CLOCK_SOURCE_MUST_VERIFY,
197         .vread                  = vread_tsc,
198 };
199
200 void mark_tsc_unstable(void)
201 {
202         if (!tsc_unstable) {
203                 tsc_unstable = 1;
204                 /* Change only the rating, when not registered */
205                 if (clocksource_tsc.mult)
206                         clocksource_change_rating(&clocksource_tsc, 0);
207                 else
208                         clocksource_tsc.rating = 0;
209         }
210 }
211 EXPORT_SYMBOL_GPL(mark_tsc_unstable);
212
213 static int __init init_tsc_clocksource(void)
214 {
215         if (!notsc) {
216                 clocksource_tsc.mult = clocksource_khz2mult(cpu_khz,
217                                                         clocksource_tsc.shift);
218                 if (check_tsc_unstable())
219                         clocksource_tsc.rating = 0;
220
221                 return clocksource_register(&clocksource_tsc);
222         }
223         return 0;
224 }
225
226 module_init(init_tsc_clocksource);