2 * Common time routines among all ppc machines.
4 * Written by Cort Dougan (cort@cs.nmt.edu) to merge
5 * Paul Mackerras' version and mine for PReP and Pmac.
6 * MPC8xx/MBX changes by Dan Malek (dmalek@jlc.net).
7 * Converted for 64-bit by Mike Corrigan (mikejc@us.ibm.com)
9 * First round of bugfixes by Gabriel Paubert (paubert@iram.es)
10 * to make clock more stable (2.4.0-test5). The only thing
11 * that this code assumes is that the timebases have been synchronized
12 * by firmware on SMP and are never stopped (never do sleep
13 * on SMP then, nap and doze are OK).
15 * Speeded up do_gettimeofday by getting rid of references to
16 * xtime (which required locks for consistency). (mikejc@us.ibm.com)
18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name.
26 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
27 * "A Kernel Model for Precision Timekeeping" by Dave Mills
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
35 #include <linux/config.h>
36 #include <linux/errno.h>
37 #include <linux/module.h>
38 #include <linux/sched.h>
39 #include <linux/kernel.h>
40 #include <linux/param.h>
41 #include <linux/string.h>
43 #include <linux/interrupt.h>
44 #include <linux/timex.h>
45 #include <linux/kernel_stat.h>
46 #include <linux/time.h>
47 #include <linux/init.h>
48 #include <linux/profile.h>
49 #include <linux/cpu.h>
50 #include <linux/security.h>
51 #include <linux/percpu.h>
52 #include <linux/rtc.h>
55 #include <asm/processor.h>
56 #include <asm/nvram.h>
57 #include <asm/cache.h>
58 #include <asm/machdep.h>
59 #include <asm/uaccess.h>
63 #include <asm/div64.h>
65 #include <asm/vdso_datapage.h>
67 #include <asm/firmware.h>
69 #ifdef CONFIG_PPC_ISERIES
70 #include <asm/iseries/it_lp_queue.h>
71 #include <asm/iseries/hv_call_xm.h>
75 /* keep track of when we need to update the rtc */
76 time_t last_rtc_update;
77 extern int piranha_simulator;
78 #ifdef CONFIG_PPC_ISERIES
79 unsigned long iSeries_recal_titan = 0;
80 unsigned long iSeries_recal_tb = 0;
81 static unsigned long first_settimeofday = 1;
84 /* The decrementer counts down by 128 every 128ns on a 601. */
85 #define DECREMENTER_COUNT_601 (1000000000 / HZ)
87 #define XSEC_PER_SEC (1024*1024)
90 #define SCALE_XSEC(xsec, max) (((xsec) * max) / XSEC_PER_SEC)
92 /* compute ((xsec << 12) * max) >> 32 */
93 #define SCALE_XSEC(xsec, max) mulhwu((xsec) << 12, max)
96 unsigned long tb_ticks_per_jiffy;
97 unsigned long tb_ticks_per_usec = 100; /* sane default */
98 EXPORT_SYMBOL(tb_ticks_per_usec);
99 unsigned long tb_ticks_per_sec;
102 unsigned long processor_freq;
103 DEFINE_SPINLOCK(rtc_lock);
104 EXPORT_SYMBOL_GPL(rtc_lock);
107 unsigned tb_to_ns_shift;
109 struct gettimeofday_struct do_gtod;
111 extern unsigned long wall_jiffies;
113 extern struct timezone sys_tz;
114 static long timezone_offset;
116 void ppc_adjtimex(void);
118 static unsigned adjusting_time = 0;
120 unsigned long ppc_proc_freq;
121 unsigned long ppc_tb_freq;
123 u64 tb_last_jiffy __cacheline_aligned_in_smp;
124 unsigned long tb_last_stamp;
127 * Note that on ppc32 this only stores the bottom 32 bits of
128 * the timebase value, but that's enough to tell when a jiffy
131 DEFINE_PER_CPU(unsigned long, last_jiffy);
133 void __delay(unsigned long loops)
141 /* the RTCL register wraps at 1000000000 */
142 diff = get_rtcl() - start;
145 } while (diff < loops);
148 while (get_tbl() - start < loops)
153 EXPORT_SYMBOL(__delay);
155 void udelay(unsigned long usecs)
157 __delay(tb_ticks_per_usec * usecs);
159 EXPORT_SYMBOL(udelay);
161 static __inline__ void timer_check_rtc(void)
164 * update the rtc when needed, this should be performed on the
165 * right fraction of a second. Half or full second ?
166 * Full second works on mk48t59 clocks, others need testing.
167 * Note that this update is basically only used through
168 * the adjtimex system calls. Setting the HW clock in
169 * any other way is a /dev/rtc and userland business.
170 * This is still wrong by -0.5/+1.5 jiffies because of the
171 * timer interrupt resolution and possible delay, but here we
172 * hit a quantization limit which can only be solved by higher
173 * resolution timers and decoupling time management from timer
174 * interrupts. This is also wrong on the clocks
175 * which require being written at the half second boundary.
176 * We should have an rtc call that only sets the minutes and
177 * seconds like on Intel to avoid problems with non UTC clocks.
179 if (ppc_md.set_rtc_time && ntp_synced() &&
180 xtime.tv_sec - last_rtc_update >= 659 &&
181 abs((xtime.tv_nsec/1000) - (1000000-1000000/HZ)) < 500000/HZ &&
182 jiffies - wall_jiffies == 1) {
184 to_tm(xtime.tv_sec + 1 + timezone_offset, &tm);
187 if (ppc_md.set_rtc_time(&tm) == 0)
188 last_rtc_update = xtime.tv_sec + 1;
190 /* Try again one minute later */
191 last_rtc_update += 60;
196 * This version of gettimeofday has microsecond resolution.
198 static inline void __do_gettimeofday(struct timeval *tv, u64 tb_val)
200 unsigned long sec, usec;
202 struct gettimeofday_vars *temp_varp;
203 u64 temp_tb_to_xs, temp_stamp_xsec;
206 * These calculations are faster (gets rid of divides)
207 * if done in units of 1/2^20 rather than microseconds.
208 * The conversion to microseconds at the end is done
209 * without a divide (and in fact, without a multiply)
211 temp_varp = do_gtod.varp;
212 tb_ticks = tb_val - temp_varp->tb_orig_stamp;
213 temp_tb_to_xs = temp_varp->tb_to_xs;
214 temp_stamp_xsec = temp_varp->stamp_xsec;
215 xsec = temp_stamp_xsec + mulhdu(tb_ticks, temp_tb_to_xs);
216 sec = xsec / XSEC_PER_SEC;
217 usec = (unsigned long)xsec & (XSEC_PER_SEC - 1);
218 usec = SCALE_XSEC(usec, 1000000);
224 void do_gettimeofday(struct timeval *tv)
227 /* do this the old way */
228 unsigned long flags, seq;
229 unsigned int sec, nsec, usec, lost;
232 seq = read_seqbegin_irqsave(&xtime_lock, flags);
234 nsec = xtime.tv_nsec + tb_ticks_since(tb_last_stamp);
235 lost = jiffies - wall_jiffies;
236 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
237 usec = nsec / 1000 + lost * (1000000 / HZ);
238 while (usec >= 1000000) {
246 __do_gettimeofday(tv, get_tb());
249 EXPORT_SYMBOL(do_gettimeofday);
251 /* Synchronize xtime with do_gettimeofday */
253 static inline void timer_sync_xtime(unsigned long cur_tb)
256 /* why do we do this? */
257 struct timeval my_tv;
259 __do_gettimeofday(&my_tv, cur_tb);
261 if (xtime.tv_sec <= my_tv.tv_sec) {
262 xtime.tv_sec = my_tv.tv_sec;
263 xtime.tv_nsec = my_tv.tv_usec * 1000;
269 * There are two copies of tb_to_xs and stamp_xsec so that no
270 * lock is needed to access and use these values in
271 * do_gettimeofday. We alternate the copies and as long as a
272 * reasonable time elapses between changes, there will never
273 * be inconsistent values. ntpd has a minimum of one minute
276 static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
280 struct gettimeofday_vars *temp_varp;
282 temp_idx = (do_gtod.var_idx == 0);
283 temp_varp = &do_gtod.vars[temp_idx];
285 temp_varp->tb_to_xs = new_tb_to_xs;
286 temp_varp->tb_orig_stamp = new_tb_stamp;
287 temp_varp->stamp_xsec = new_stamp_xsec;
289 do_gtod.varp = temp_varp;
290 do_gtod.var_idx = temp_idx;
293 * tb_update_count is used to allow the userspace gettimeofday code
294 * to assure itself that it sees a consistent view of the tb_to_xs and
295 * stamp_xsec variables. It reads the tb_update_count, then reads
296 * tb_to_xs and stamp_xsec and then reads tb_update_count again. If
297 * the two values of tb_update_count match and are even then the
298 * tb_to_xs and stamp_xsec values are consistent. If not, then it
299 * loops back and reads them again until this criteria is met.
301 ++(vdso_data->tb_update_count);
303 vdso_data->tb_orig_stamp = new_tb_stamp;
304 vdso_data->stamp_xsec = new_stamp_xsec;
305 vdso_data->tb_to_xs = new_tb_to_xs;
306 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
307 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
309 ++(vdso_data->tb_update_count);
313 * When the timebase - tb_orig_stamp gets too big, we do a manipulation
314 * between tb_orig_stamp and stamp_xsec. The goal here is to keep the
315 * difference tb - tb_orig_stamp small enough to always fit inside a
316 * 32 bits number. This is a requirement of our fast 32 bits userland
317 * implementation in the vdso. If we "miss" a call to this function
318 * (interrupt latency, CPU locked in a spinlock, ...) and we end up
319 * with a too big difference, then the vdso will fallback to calling
322 static __inline__ void timer_recalc_offset(u64 cur_tb)
324 unsigned long offset;
329 offset = cur_tb - do_gtod.varp->tb_orig_stamp;
330 if ((offset & 0x80000000u) == 0)
332 new_stamp_xsec = do_gtod.varp->stamp_xsec
333 + mulhdu(offset, do_gtod.varp->tb_to_xs);
334 update_gtod(cur_tb, new_stamp_xsec, do_gtod.varp->tb_to_xs);
338 unsigned long profile_pc(struct pt_regs *regs)
340 unsigned long pc = instruction_pointer(regs);
342 if (in_lock_functions(pc))
347 EXPORT_SYMBOL(profile_pc);
350 #ifdef CONFIG_PPC_ISERIES
353 * This function recalibrates the timebase based on the 49-bit time-of-day
354 * value in the Titan chip. The Titan is much more accurate than the value
355 * returned by the service processor for the timebase frequency.
358 static void iSeries_tb_recal(void)
360 struct div_result divres;
361 unsigned long titan, tb;
363 titan = HvCallXm_loadTod();
364 if ( iSeries_recal_titan ) {
365 unsigned long tb_ticks = tb - iSeries_recal_tb;
366 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
367 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
368 unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ;
369 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
371 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
372 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
374 if ( tick_diff < 0 ) {
375 tick_diff = -tick_diff;
379 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
380 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
381 new_tb_ticks_per_jiffy, sign, tick_diff );
382 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
383 tb_ticks_per_sec = new_tb_ticks_per_sec;
384 div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres );
385 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
386 tb_to_xs = divres.result_low;
387 do_gtod.varp->tb_to_xs = tb_to_xs;
388 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
389 vdso_data->tb_to_xs = tb_to_xs;
392 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
393 " new tb_ticks_per_jiffy = %lu\n"
394 " old tb_ticks_per_jiffy = %lu\n",
395 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
399 iSeries_recal_titan = titan;
400 iSeries_recal_tb = tb;
405 * For iSeries shared processors, we have to let the hypervisor
406 * set the hardware decrementer. We set a virtual decrementer
407 * in the lppaca and call the hypervisor if the virtual
408 * decrementer is less than the current value in the hardware
409 * decrementer. (almost always the new decrementer value will
410 * be greater than the current hardware decementer so the hypervisor
411 * call will not be needed)
415 * timer_interrupt - gets called when the decrementer overflows,
416 * with interrupts disabled.
418 void timer_interrupt(struct pt_regs * regs)
421 int cpu = smp_processor_id();
425 if (atomic_read(&ppc_n_lost_interrupts) != 0)
431 profile_tick(CPU_PROFILING, regs);
433 #ifdef CONFIG_PPC_ISERIES
434 get_lppaca()->int_dword.fields.decr_int = 0;
437 while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
438 >= tb_ticks_per_jiffy) {
439 /* Update last_jiffy */
440 per_cpu(last_jiffy, cpu) += tb_ticks_per_jiffy;
441 /* Handle RTCL overflow on 601 */
442 if (__USE_RTC() && per_cpu(last_jiffy, cpu) >= 1000000000)
443 per_cpu(last_jiffy, cpu) -= 1000000000;
446 * We cannot disable the decrementer, so in the period
447 * between this cpu's being marked offline in cpu_online_map
448 * and calling stop-self, it is taking timer interrupts.
449 * Avoid calling into the scheduler rebalancing code if this
452 if (!cpu_is_offline(cpu))
453 update_process_times(user_mode(regs));
456 * No need to check whether cpu is offline here; boot_cpuid
457 * should have been fixed up by now.
459 if (cpu != boot_cpuid)
462 write_seqlock(&xtime_lock);
463 tb_last_jiffy += tb_ticks_per_jiffy;
464 tb_last_stamp = per_cpu(last_jiffy, cpu);
465 timer_recalc_offset(tb_last_jiffy);
467 timer_sync_xtime(tb_last_jiffy);
469 write_sequnlock(&xtime_lock);
470 if (adjusting_time && (time_adjust == 0))
474 next_dec = tb_ticks_per_jiffy - ticks;
477 #ifdef CONFIG_PPC_ISERIES
478 if (hvlpevent_is_pending())
479 process_hvlpevents(regs);
483 /* collect purr register values often, for accurate calculations */
484 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
485 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
486 cu->current_tb = mfspr(SPRN_PURR);
493 void wakeup_decrementer(void)
497 set_dec(tb_ticks_per_jiffy);
499 * We don't expect this to be called on a machine with a 601,
500 * so using get_tbl is fine.
502 tb_last_stamp = tb_last_jiffy = get_tb();
504 per_cpu(last_jiffy, i) = tb_last_stamp;
508 void __init smp_space_timers(unsigned int max_cpus)
511 unsigned long offset = tb_ticks_per_jiffy / max_cpus;
512 unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
514 /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
515 previous_tb -= tb_ticks_per_jiffy;
517 if (i != boot_cpuid) {
518 previous_tb += offset;
519 per_cpu(last_jiffy, i) = previous_tb;
526 * Scheduler clock - returns current time in nanosec units.
528 * Note: mulhdu(a, b) (multiply high double unsigned) returns
529 * the high 64 bits of a * b, i.e. (a * b) >> 64, where a and b
530 * are 64-bit unsigned numbers.
532 unsigned long long sched_clock(void)
536 return mulhdu(get_tb(), tb_to_ns_scale) << tb_to_ns_shift;
539 int do_settimeofday(struct timespec *tv)
541 time_t wtm_sec, new_sec = tv->tv_sec;
542 long wtm_nsec, new_nsec = tv->tv_nsec;
545 u64 new_xsec, tb_delta_xs;
547 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
550 write_seqlock_irqsave(&xtime_lock, flags);
553 * Updating the RTC is not the job of this code. If the time is
554 * stepped under NTP, the RTC will be updated after STA_UNSYNC
555 * is cleared. Tools like clock/hwclock either copy the RTC
556 * to the system time, in which case there is no point in writing
557 * to the RTC again, or write to the RTC but then they don't call
558 * settimeofday to perform this operation.
560 #ifdef CONFIG_PPC_ISERIES
561 if (first_settimeofday) {
563 first_settimeofday = 0;
566 tb_delta = tb_ticks_since(tb_last_stamp);
567 tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy;
568 tb_delta_xs = mulhdu(tb_delta, do_gtod.varp->tb_to_xs);
570 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - new_sec);
571 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - new_nsec);
573 set_normalized_timespec(&xtime, new_sec, new_nsec);
574 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
576 /* In case of a large backwards jump in time with NTP, we want the
577 * clock to be updated as soon as the PLL is again in lock.
579 last_rtc_update = new_sec - 658;
585 new_xsec = (u64)new_nsec * XSEC_PER_SEC;
586 do_div(new_xsec, NSEC_PER_SEC);
588 new_xsec += (u64)new_sec * XSEC_PER_SEC - tb_delta_xs;
589 update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
591 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
592 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
594 write_sequnlock_irqrestore(&xtime_lock, flags);
599 EXPORT_SYMBOL(do_settimeofday);
601 void __init generic_calibrate_decr(void)
603 struct device_node *cpu;
608 * The cpu node should have a timebase-frequency property
609 * to tell us the rate at which the decrementer counts.
611 cpu = of_find_node_by_type(NULL, "cpu");
613 ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */
616 fp = (unsigned int *)get_property(cpu, "timebase-frequency",
624 printk(KERN_ERR "WARNING: Estimating decrementer frequency "
627 ppc_proc_freq = DEFAULT_PROC_FREQ;
630 fp = (unsigned int *)get_property(cpu, "clock-frequency",
638 /* Set the time base to zero */
642 /* Clear any pending timer interrupts */
643 mtspr(SPRN_TSR, TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS);
645 /* Enable decrementer interrupt */
646 mtspr(SPRN_TCR, TCR_DIE);
649 printk(KERN_ERR "WARNING: Estimating processor frequency "
655 unsigned long get_boot_time(void)
659 if (ppc_md.get_boot_time)
660 return ppc_md.get_boot_time();
661 if (!ppc_md.get_rtc_time)
663 ppc_md.get_rtc_time(&tm);
664 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
665 tm.tm_hour, tm.tm_min, tm.tm_sec);
668 /* This function is only called on the boot processor */
669 void __init time_init(void)
672 unsigned long tm = 0;
673 struct div_result res;
677 if (ppc_md.time_init != NULL)
678 timezone_offset = ppc_md.time_init();
681 /* 601 processor: dec counts down by 128 every 128ns */
682 ppc_tb_freq = 1000000000;
683 tb_last_stamp = get_rtcl();
684 tb_last_jiffy = tb_last_stamp;
686 /* Normal PowerPC with timebase register */
687 ppc_md.calibrate_decr();
688 printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n",
689 ppc_tb_freq / 1000000, ppc_tb_freq % 1000000);
690 printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n",
691 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000);
692 tb_last_stamp = tb_last_jiffy = get_tb();
695 tb_ticks_per_jiffy = ppc_tb_freq / HZ;
696 tb_ticks_per_sec = tb_ticks_per_jiffy * HZ;
697 tb_ticks_per_usec = ppc_tb_freq / 1000000;
698 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
699 div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res);
700 tb_to_xs = res.result_low;
703 * Compute scale factor for sched_clock.
704 * The calibrate_decr() function has set tb_ticks_per_sec,
705 * which is the timebase frequency.
706 * We compute 1e9 * 2^64 / tb_ticks_per_sec and interpret
707 * the 128-bit result as a 64.64 fixed-point number.
708 * We then shift that number right until it is less than 1.0,
709 * giving us the scale factor and shift count to use in
712 div128_by_32(1000000000, 0, tb_ticks_per_sec, &res);
713 scale = res.result_low;
714 for (shift = 0; res.result_high != 0; ++shift) {
715 scale = (scale >> 1) | (res.result_high << 63);
716 res.result_high >>= 1;
718 tb_to_ns_scale = scale;
719 tb_to_ns_shift = shift;
721 #ifdef CONFIG_PPC_ISERIES
722 if (!piranha_simulator)
724 tm = get_boot_time();
726 write_seqlock_irqsave(&xtime_lock, flags);
729 do_gtod.varp = &do_gtod.vars[0];
731 do_gtod.varp->tb_orig_stamp = tb_last_jiffy;
732 __get_cpu_var(last_jiffy) = tb_last_stamp;
733 do_gtod.varp->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
734 do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
735 do_gtod.varp->tb_to_xs = tb_to_xs;
736 do_gtod.tb_to_us = tb_to_us;
738 vdso_data->tb_orig_stamp = tb_last_jiffy;
739 vdso_data->tb_update_count = 0;
740 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
741 vdso_data->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
742 vdso_data->tb_to_xs = tb_to_xs;
746 /* If platform provided a timezone (pmac), we correct the time */
747 if (timezone_offset) {
748 sys_tz.tz_minuteswest = -timezone_offset / 60;
749 sys_tz.tz_dsttime = 0;
750 xtime.tv_sec -= timezone_offset;
753 last_rtc_update = xtime.tv_sec;
754 set_normalized_timespec(&wall_to_monotonic,
755 -xtime.tv_sec, -xtime.tv_nsec);
756 write_sequnlock_irqrestore(&xtime_lock, flags);
758 /* Not exact, but the timer interrupt takes care of this */
759 set_dec(tb_ticks_per_jiffy);
763 * After adjtimex is called, adjust the conversion of tb ticks
764 * to microseconds to keep do_gettimeofday synchronized
767 * Use the time_adjust, time_freq and time_offset computed by adjtimex to
768 * adjust the frequency.
771 /* #define DEBUG_PPC_ADJTIMEX 1 */
773 void ppc_adjtimex(void)
776 unsigned long den, new_tb_ticks_per_sec, tb_ticks, old_xsec,
777 new_tb_to_xs, new_xsec, new_stamp_xsec;
778 unsigned long tb_ticks_per_sec_delta;
779 long delta_freq, ltemp;
780 struct div_result divres;
782 long singleshot_ppm = 0;
785 * Compute parts per million frequency adjustment to
786 * accomplish the time adjustment implied by time_offset to be
787 * applied over the elapsed time indicated by time_constant.
788 * Use SHIFT_USEC to get it into the same units as
791 if ( time_offset < 0 ) {
792 ltemp = -time_offset;
793 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
794 ltemp >>= SHIFT_KG + time_constant;
798 ltemp <<= SHIFT_USEC - SHIFT_UPDATE;
799 ltemp >>= SHIFT_KG + time_constant;
802 /* If there is a single shot time adjustment in progress */
804 #ifdef DEBUG_PPC_ADJTIMEX
805 printk("ppc_adjtimex: ");
806 if ( adjusting_time == 0 )
808 printk("single shot time_adjust = %ld\n", time_adjust);
814 * Compute parts per million frequency adjustment
815 * to match time_adjust
817 singleshot_ppm = tickadj * HZ;
819 * The adjustment should be tickadj*HZ to match the code in
820 * linux/kernel/timer.c, but experiments show that this is too
821 * large. 3/4 of tickadj*HZ seems about right
823 singleshot_ppm -= singleshot_ppm / 4;
824 /* Use SHIFT_USEC to get it into the same units as time_freq */
825 singleshot_ppm <<= SHIFT_USEC;
826 if ( time_adjust < 0 )
827 singleshot_ppm = -singleshot_ppm;
830 #ifdef DEBUG_PPC_ADJTIMEX
831 if ( adjusting_time )
832 printk("ppc_adjtimex: ending single shot time_adjust\n");
837 /* Add up all of the frequency adjustments */
838 delta_freq = time_freq + ltemp + singleshot_ppm;
841 * Compute a new value for tb_ticks_per_sec based on
842 * the frequency adjustment
844 den = 1000000 * (1 << (SHIFT_USEC - 8));
845 if ( delta_freq < 0 ) {
846 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( (-delta_freq) >> (SHIFT_USEC - 8))) / den;
847 new_tb_ticks_per_sec = tb_ticks_per_sec + tb_ticks_per_sec_delta;
850 tb_ticks_per_sec_delta = ( tb_ticks_per_sec * ( delta_freq >> (SHIFT_USEC - 8))) / den;
851 new_tb_ticks_per_sec = tb_ticks_per_sec - tb_ticks_per_sec_delta;
854 #ifdef DEBUG_PPC_ADJTIMEX
855 printk("ppc_adjtimex: ltemp = %ld, time_freq = %ld, singleshot_ppm = %ld\n", ltemp, time_freq, singleshot_ppm);
856 printk("ppc_adjtimex: tb_ticks_per_sec - base = %ld new = %ld\n", tb_ticks_per_sec, new_tb_ticks_per_sec);
860 * Compute a new value of tb_to_xs (used to convert tb to
861 * microseconds) and a new value of stamp_xsec which is the
862 * time (in 1/2^20 second units) corresponding to
863 * tb_orig_stamp. This new value of stamp_xsec compensates
864 * for the change in frequency (implied by the new tb_to_xs)
865 * which guarantees that the current time remains the same.
867 write_seqlock_irqsave( &xtime_lock, flags );
868 tb_ticks = get_tb() - do_gtod.varp->tb_orig_stamp;
869 div128_by_32(1024*1024, 0, new_tb_ticks_per_sec, &divres);
870 new_tb_to_xs = divres.result_low;
871 new_xsec = mulhdu(tb_ticks, new_tb_to_xs);
873 old_xsec = mulhdu(tb_ticks, do_gtod.varp->tb_to_xs);
874 new_stamp_xsec = do_gtod.varp->stamp_xsec + old_xsec - new_xsec;
876 update_gtod(do_gtod.varp->tb_orig_stamp, new_stamp_xsec, new_tb_to_xs);
878 write_sequnlock_irqrestore( &xtime_lock, flags );
879 #endif /* CONFIG_PPC64 */
884 #define STARTOFTIME 1970
885 #define SECDAY 86400L
886 #define SECYR (SECDAY * 365)
887 #define leapyear(year) ((year) % 4 == 0 && \
888 ((year) % 100 != 0 || (year) % 400 == 0))
889 #define days_in_year(a) (leapyear(a) ? 366 : 365)
890 #define days_in_month(a) (month_days[(a) - 1])
892 static int month_days[12] = {
893 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
897 * This only works for the Gregorian calendar - i.e. after 1752 (in the UK)
899 void GregorianDay(struct rtc_time * tm)
904 int MonthOffset[] = { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334 };
906 lastYear = tm->tm_year - 1;
909 * Number of leap corrections to apply up to end of last year
911 leapsToDate = lastYear / 4 - lastYear / 100 + lastYear / 400;
914 * This year is a leap year if it is divisible by 4 except when it is
915 * divisible by 100 unless it is divisible by 400
917 * e.g. 1904 was a leap year, 1900 was not, 1996 is, and 2000 was
919 day = tm->tm_mon > 2 && leapyear(tm->tm_year);
921 day += lastYear*365 + leapsToDate + MonthOffset[tm->tm_mon-1] +
924 tm->tm_wday = day % 7;
927 void to_tm(int tim, struct rtc_time * tm)
930 register long hms, day;
935 /* Hours, minutes, seconds are easy */
936 tm->tm_hour = hms / 3600;
937 tm->tm_min = (hms % 3600) / 60;
938 tm->tm_sec = (hms % 3600) % 60;
940 /* Number of years in days */
941 for (i = STARTOFTIME; day >= days_in_year(i); i++)
942 day -= days_in_year(i);
945 /* Number of months in days left */
946 if (leapyear(tm->tm_year))
947 days_in_month(FEBRUARY) = 29;
948 for (i = 1; day >= days_in_month(i); i++)
949 day -= days_in_month(i);
950 days_in_month(FEBRUARY) = 28;
953 /* Days are what is left over (+1) from all that. */
954 tm->tm_mday = day + 1;
957 * Determine the day of week
962 /* Auxiliary function to compute scaling factors */
963 /* Actually the choice of a timebase running at 1/4 the of the bus
964 * frequency giving resolution of a few tens of nanoseconds is quite nice.
965 * It makes this computation very precise (27-28 bits typically) which
966 * is optimistic considering the stability of most processor clock
967 * oscillators and the precision with which the timebase frequency
968 * is measured but does not harm.
970 unsigned mulhwu_scale_factor(unsigned inscale, unsigned outscale)
972 unsigned mlt=0, tmp, err;
973 /* No concern for performance, it's done once: use a stupid
974 * but safe and compact method to find the multiplier.
977 for (tmp = 1U<<31; tmp != 0; tmp >>= 1) {
978 if (mulhwu(inscale, mlt|tmp) < outscale)
982 /* We might still be off by 1 for the best approximation.
983 * A side effect of this is that if outscale is too large
984 * the returned value will be zero.
985 * Many corner cases have been checked and seem to work,
986 * some might have been forgotten in the test however.
989 err = inscale * (mlt+1);
990 if (err <= inscale/2)
996 * Divide a 128-bit dividend by a 32-bit divisor, leaving a 128 bit
999 void div128_by_32(u64 dividend_high, u64 dividend_low,
1000 unsigned divisor, struct div_result *dr)
1002 unsigned long a, b, c, d;
1003 unsigned long w, x, y, z;
1006 a = dividend_high >> 32;
1007 b = dividend_high & 0xffffffff;
1008 c = dividend_low >> 32;
1009 d = dividend_low & 0xffffffff;
1012 ra = ((u64)(a - (w * divisor)) << 32) + b;
1014 rb = ((u64) do_div(ra, divisor) << 32) + c;
1017 rc = ((u64) do_div(rb, divisor) << 32) + d;
1020 do_div(rc, divisor);
1023 dr->result_high = ((u64)w << 32) + x;
1024 dr->result_low = ((u64)y << 32) + z;