2 * linux/arch/ia64/kernel/time.c
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * Stephane Eranian <eranian@hpl.hp.com>
6 * David Mosberger <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
8 * Copyright (C) 1999-2000 VA Linux Systems
9 * Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
12 #include <linux/cpu.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/profile.h>
17 #include <linux/sched.h>
18 #include <linux/time.h>
19 #include <linux/interrupt.h>
20 #include <linux/efi.h>
21 #include <linux/timex.h>
22 #include <linux/clocksource.h>
24 #include <asm/machvec.h>
25 #include <asm/delay.h>
26 #include <asm/hw_irq.h>
27 #include <asm/ptrace.h>
29 #include <asm/sections.h>
30 #include <asm/system.h>
32 #include "fsyscall_gtod_data.h"
34 static cycle_t itc_get_cycles(void);
36 struct fsyscall_gtod_data_t fsyscall_gtod_data = {
37 .lock = SEQLOCK_UNLOCKED,
40 struct itc_jitter_data_t itc_jitter_data;
42 volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
44 #ifdef CONFIG_IA64_DEBUG_IRQ
46 unsigned long last_cli_ip;
47 EXPORT_SYMBOL(last_cli_ip);
51 static struct clocksource clocksource_itc = {
54 .read = itc_get_cycles,
55 .mask = CLOCKSOURCE_MASK(64),
56 .mult = 0, /*to be calculated*/
58 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
60 static struct clocksource *itc_clocksource;
63 timer_interrupt (int irq, void *dev_id)
65 unsigned long new_itm;
67 if (unlikely(cpu_is_offline(smp_processor_id()))) {
71 platform_timer_interrupt(irq, dev_id);
73 new_itm = local_cpu_data->itm_next;
75 if (!time_after(ia64_get_itc(), new_itm))
76 printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
77 ia64_get_itc(), new_itm);
79 profile_tick(CPU_PROFILING);
82 update_process_times(user_mode(get_irq_regs()));
84 new_itm += local_cpu_data->itm_delta;
86 if (smp_processor_id() == time_keeper_id) {
88 * Here we are in the timer irq handler. We have irqs locally
89 * disabled, but we don't know if the timer_bh is running on
90 * another CPU. We need to avoid to SMP race by acquiring the
93 write_seqlock(&xtime_lock);
95 local_cpu_data->itm_next = new_itm;
96 write_sequnlock(&xtime_lock);
98 local_cpu_data->itm_next = new_itm;
100 if (time_after(new_itm, ia64_get_itc()))
104 * Allow IPIs to interrupt the timer loop.
112 * If we're too close to the next clock tick for
113 * comfort, we increase the safety margin by
114 * intentionally dropping the next tick(s). We do NOT
115 * update itm.next because that would force us to call
116 * do_timer() which in turn would let our clock run
117 * too fast (with the potentially devastating effect
118 * of losing monotony of time).
120 while (!time_after(new_itm, ia64_get_itc() + local_cpu_data->itm_delta/2))
121 new_itm += local_cpu_data->itm_delta;
122 ia64_set_itm(new_itm);
123 /* double check, in case we got hit by a (slow) PMI: */
124 } while (time_after_eq(ia64_get_itc(), new_itm));
129 * Encapsulate access to the itm structure for SMP.
132 ia64_cpu_local_tick (void)
134 int cpu = smp_processor_id();
135 unsigned long shift = 0, delta;
137 /* arrange for the cycle counter to generate a timer interrupt: */
138 ia64_set_itv(IA64_TIMER_VECTOR);
140 delta = local_cpu_data->itm_delta;
142 * Stagger the timer tick for each CPU so they don't occur all at (almost) the
146 unsigned long hi = 1UL << ia64_fls(cpu);
147 shift = (2*(cpu - hi) + 1) * delta/hi/2;
149 local_cpu_data->itm_next = ia64_get_itc() + delta + shift;
150 ia64_set_itm(local_cpu_data->itm_next);
155 static int __init nojitter_setup(char *str)
158 printk("Jitter checking for ITC timers disabled\n");
162 __setup("nojitter", nojitter_setup);
168 unsigned long platform_base_freq, itc_freq;
169 struct pal_freq_ratio itc_ratio, proc_ratio;
170 long status, platform_base_drift, itc_drift;
173 * According to SAL v2.6, we need to use a SAL call to determine the platform base
174 * frequency and then a PAL call to determine the frequency ratio between the ITC
175 * and the base frequency.
177 status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
178 &platform_base_freq, &platform_base_drift);
180 printk(KERN_ERR "SAL_FREQ_BASE_PLATFORM failed: %s\n", ia64_sal_strerror(status));
182 status = ia64_pal_freq_ratios(&proc_ratio, NULL, &itc_ratio);
184 printk(KERN_ERR "PAL_FREQ_RATIOS failed with status=%ld\n", status);
187 /* invent "random" values */
189 "SAL/PAL failed to obtain frequency info---inventing reasonable values\n");
190 platform_base_freq = 100000000;
191 platform_base_drift = -1; /* no drift info */
195 if (platform_base_freq < 40000000) {
196 printk(KERN_ERR "Platform base frequency %lu bogus---resetting to 75MHz!\n",
198 platform_base_freq = 75000000;
199 platform_base_drift = -1;
202 proc_ratio.den = 1; /* avoid division by zero */
204 itc_ratio.den = 1; /* avoid division by zero */
206 itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
208 local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
209 printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
210 "ITC freq=%lu.%03luMHz", smp_processor_id(),
211 platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
212 itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);
214 if (platform_base_drift != -1) {
215 itc_drift = platform_base_drift*itc_ratio.num/itc_ratio.den;
216 printk("+/-%ldppm\n", itc_drift);
222 local_cpu_data->proc_freq = (platform_base_freq*proc_ratio.num)/proc_ratio.den;
223 local_cpu_data->itc_freq = itc_freq;
224 local_cpu_data->cyc_per_usec = (itc_freq + USEC_PER_SEC/2) / USEC_PER_SEC;
225 local_cpu_data->nsec_per_cyc = ((NSEC_PER_SEC<<IA64_NSEC_PER_CYC_SHIFT)
226 + itc_freq/2)/itc_freq;
228 if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
230 /* On IA64 in an SMP configuration ITCs are never accurately synchronized.
231 * Jitter compensation requires a cmpxchg which may limit
232 * the scalability of the syscalls for retrieving time.
233 * The ITC synchronization is usually successful to within a few
234 * ITC ticks but this is not a sure thing. If you need to improve
235 * timer performance in SMP situations then boot the kernel with the
236 * "nojitter" option. However, doing so may result in time fluctuating (maybe
237 * even going backward) if the ITC offsets between the individual CPUs
241 itc_jitter_data.itc_jitter = 1;
245 * ITC is drifty and we have not synchronized the ITCs in smpboot.c.
246 * ITC values may fluctuate significantly between processors.
247 * Clock should not be used for hrtimers. Mark itc as only
248 * useful for boot and testing.
250 * Note that jitter compensation is off! There is no point of
251 * synchronizing ITCs since they may be large differentials
252 * that change over time.
254 * The only way to fix this would be to repeatedly sync the
255 * ITCs. Until that time we have to avoid ITC.
257 clocksource_itc.rating = 50;
259 /* Setup the CPU local timer tick */
260 ia64_cpu_local_tick();
262 if (!itc_clocksource) {
263 /* Sort out mult/shift values: */
264 clocksource_itc.mult =
265 clocksource_hz2mult(local_cpu_data->itc_freq,
266 clocksource_itc.shift);
267 clocksource_register(&clocksource_itc);
268 itc_clocksource = &clocksource_itc;
272 static cycle_t itc_get_cycles(void)
274 u64 lcycle, now, ret;
276 if (!itc_jitter_data.itc_jitter)
279 lcycle = itc_jitter_data.itc_lastcycle;
281 if (lcycle && time_after(lcycle, now))
285 * Keep track of the last timer value returned.
286 * In an SMP environment, you could lose out in contention of
287 * cmpxchg. If so, your cmpxchg returns new value which the
288 * winner of contention updated to. Use the new value instead.
290 ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now);
291 if (unlikely(ret != lcycle))
298 static struct irqaction timer_irqaction = {
299 .handler = timer_interrupt,
300 .flags = IRQF_DISABLED | IRQF_IRQPOLL,
304 void __devinit ia64_disable_timer(void)
306 ia64_set_itv(1 << 16);
312 register_percpu_irq(IA64_TIMER_VECTOR, &timer_irqaction);
313 efi_gettimeofday(&xtime);
317 * Initialize wall_to_monotonic such that adding it to xtime will yield zero, the
318 * tv_nsec field must be normalized (i.e., 0 <= nsec < NSEC_PER_SEC).
320 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
324 * Generic udelay assumes that if preemption is allowed and the thread
325 * migrates to another CPU, that the ITC values are synchronized across
329 ia64_itc_udelay (unsigned long usecs)
331 unsigned long start = ia64_get_itc();
332 unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
334 while (time_before(ia64_get_itc(), end))
338 void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
341 udelay (unsigned long usecs)
343 (*ia64_udelay)(usecs);
345 EXPORT_SYMBOL(udelay);
347 /* IA64 doesn't cache the timezone */
348 void update_vsyscall_tz(void)
352 void update_vsyscall(struct timespec *wall, struct clocksource *c)
356 write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags);
358 /* copy fsyscall clock data */
359 fsyscall_gtod_data.clk_mask = c->mask;
360 fsyscall_gtod_data.clk_mult = c->mult;
361 fsyscall_gtod_data.clk_shift = c->shift;
362 fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
363 fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
365 /* copy kernel time structures */
366 fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
367 fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec;
368 fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec
370 fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec
374 while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) {
375 fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC;
376 fsyscall_gtod_data.monotonic_time.tv_sec++;
379 write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags);