2 * Xen time implementation.
4 * This is implemented in terms of a clocksource driver which uses
5 * the hypervisor clock as a nanosecond timebase, and a clockevent
6 * driver which uses the hypervisor's timer mechanism.
8 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/clocksource.h>
13 #include <linux/clockchips.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/math64.h>
17 #include <asm/xen/hypervisor.h>
18 #include <asm/xen/hypercall.h>
20 #include <xen/events.h>
21 #include <xen/interface/xen.h>
22 #include <xen/interface/vcpu.h>
28 /* Xen may fire a timer up to this many ns early */
29 #define TIMER_SLOP 100000
30 #define NS_PER_TICK (1000000000LL / HZ)
32 static cycle_t xen_clocksource_read(void);
34 /* These are perodically updated in shared_info, and then copied here. */
35 struct shadow_time_info {
36 u64 tsc_timestamp; /* TSC at last update of time vals. */
37 u64 system_timestamp; /* Time, in nanosecs, since boot. */
43 static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
45 /* runstate info updated by Xen */
46 static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
48 /* snapshots of runstate info */
49 static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot);
51 /* unused ns of stolen and blocked time */
52 static DEFINE_PER_CPU(u64, residual_stolen);
53 static DEFINE_PER_CPU(u64, residual_blocked);
55 /* return an consistent snapshot of 64-bit time/counter value */
56 static u64 get64(const u64 *p)
60 if (BITS_PER_LONG < 64) {
65 * Read high then low, and then make sure high is
66 * still the same; this will only loop if low wraps
67 * and carries into high.
68 * XXX some clean way to make this endian-proof?
75 } while (p32[1] != h);
77 ret = (((u64)h) << 32) | l;
87 static void get_runstate_snapshot(struct vcpu_runstate_info *res)
90 struct vcpu_runstate_info *state;
92 BUG_ON(preemptible());
94 state = &__get_cpu_var(runstate);
97 * The runstate info is always updated by the hypervisor on
98 * the current CPU, so there's no need to use anything
99 * stronger than a compiler barrier when fetching it.
102 state_time = get64(&state->state_entry_time);
106 } while (get64(&state->state_entry_time) != state_time);
109 /* return true when a vcpu could run but has no real cpu to run on */
110 bool xen_vcpu_stolen(int vcpu)
112 return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
115 static void setup_runstate_info(int cpu)
117 struct vcpu_register_runstate_memory_area area;
119 area.addr.v = &per_cpu(runstate, cpu);
121 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
126 static void do_stolen_accounting(void)
128 struct vcpu_runstate_info state;
129 struct vcpu_runstate_info *snap;
130 s64 blocked, runnable, offline, stolen;
133 get_runstate_snapshot(&state);
135 WARN_ON(state.state != RUNSTATE_running);
137 snap = &__get_cpu_var(runstate_snapshot);
139 /* work out how much time the VCPU has not been runn*ing* */
140 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
141 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
142 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
146 /* Add the appropriate number of ticks of stolen time,
147 including any left-overs from last time. Passing NULL to
148 account_steal_time accounts the time as stolen. */
149 stolen = runnable + offline + __get_cpu_var(residual_stolen);
154 ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
155 __get_cpu_var(residual_stolen) = stolen;
156 account_steal_time(NULL, ticks);
158 /* Add the appropriate number of ticks of blocked time,
159 including any left-overs from last time. Passing idle to
160 account_steal_time accounts the time as idle/wait. */
161 blocked += __get_cpu_var(residual_blocked);
166 ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked);
167 __get_cpu_var(residual_blocked) = blocked;
168 account_steal_time(idle_task(smp_processor_id()), ticks);
172 * Xen sched_clock implementation. Returns the number of unstolen
173 * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
176 unsigned long long xen_sched_clock(void)
178 struct vcpu_runstate_info state;
184 * Ideally sched_clock should be called on a per-cpu basis
185 * anyway, so preempt should already be disabled, but that's
186 * not current practice at the moment.
190 now = xen_clocksource_read();
192 get_runstate_snapshot(&state);
194 WARN_ON(state.state != RUNSTATE_running);
196 offset = now - state.state_entry_time;
200 ret = state.time[RUNSTATE_blocked] +
201 state.time[RUNSTATE_running] +
210 /* Get the CPU speed from Xen */
211 unsigned long xen_cpu_khz(void)
213 u64 xen_khz = 1000000ULL << 32;
214 const struct vcpu_time_info *info =
215 &HYPERVISOR_shared_info->vcpu_info[0].time;
217 do_div(xen_khz, info->tsc_to_system_mul);
218 if (info->tsc_shift < 0)
219 xen_khz <<= -info->tsc_shift;
221 xen_khz >>= info->tsc_shift;
227 * Reads a consistent set of time-base values from Xen, into a shadow data
230 static unsigned get_time_values_from_xen(void)
232 struct vcpu_time_info *src;
233 struct shadow_time_info *dst;
235 /* src is shared memory with the hypervisor, so we need to
236 make sure we get a consistent snapshot, even in the face of
238 src = &__get_cpu_var(xen_vcpu)->time;
239 dst = &__get_cpu_var(shadow_time);
242 dst->version = src->version;
243 rmb(); /* fetch version before data */
244 dst->tsc_timestamp = src->tsc_timestamp;
245 dst->system_timestamp = src->system_time;
246 dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
247 dst->tsc_shift = src->tsc_shift;
248 rmb(); /* test version after fetching data */
249 } while ((src->version & 1) | (dst->version ^ src->version));
255 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
256 * yielding a 64-bit result.
258 static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
279 : "=A" (product), "=r" (tmp1), "=r" (tmp2)
280 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
283 "mul %%rdx ; shrd $32,%%rdx,%%rax"
284 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
292 static u64 get_nsec_offset(struct shadow_time_info *shadow)
295 now = native_read_tsc();
296 delta = now - shadow->tsc_timestamp;
297 return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
300 static cycle_t xen_clocksource_read(void)
302 struct shadow_time_info *shadow = &get_cpu_var(shadow_time);
307 version = get_time_values_from_xen();
309 ret = shadow->system_timestamp + get_nsec_offset(shadow);
311 } while (version != __get_cpu_var(xen_vcpu)->time.version);
313 put_cpu_var(shadow_time);
318 static void xen_read_wallclock(struct timespec *ts)
320 const struct shared_info *s = HYPERVISOR_shared_info;
325 /* get wallclock at system boot */
327 version = s->wc_version;
328 rmb(); /* fetch version before time */
329 now.tv_sec = s->wc_sec;
330 now.tv_nsec = s->wc_nsec;
331 rmb(); /* fetch time before checking version */
332 } while ((s->wc_version & 1) | (version ^ s->wc_version));
334 delta = xen_clocksource_read(); /* time since system boot */
335 delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
337 now.tv_nsec = do_div(delta, NSEC_PER_SEC);
340 set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
343 unsigned long xen_get_wallclock(void)
347 xen_read_wallclock(&ts);
352 int xen_set_wallclock(unsigned long now)
354 /* do nothing for domU */
358 static struct clocksource xen_clocksource __read_mostly = {
361 .read = xen_clocksource_read,
363 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
365 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
369 Xen clockevent implementation
371 Xen has two clockevent implementations:
373 The old timer_op one works with all released versions of Xen prior
374 to version 3.0.4. This version of the hypervisor provides a
375 single-shot timer with nanosecond resolution. However, sharing the
376 same event channel is a 100Hz tick which is delivered while the
377 vcpu is running. We don't care about or use this tick, but it will
378 cause the core time code to think the timer fired too soon, and
379 will end up resetting it each time. It could be filtered, but
380 doing so has complications when the ktime clocksource is not yet
381 the xen clocksource (ie, at boot time).
383 The new vcpu_op-based timer interface allows the tick timer period
384 to be changed or turned off. The tick timer is not useful as a
385 periodic timer because events are only delivered to running vcpus.
386 The one-shot timer can report when a timeout is in the past, so
387 set_next_event is capable of returning -ETIME when appropriate.
388 This interface is used when available.
393 Get a hypervisor absolute time. In theory we could maintain an
394 offset between the kernel's time and the hypervisor's time, and
395 apply that to a kernel's absolute timeout. Unfortunately the
396 hypervisor and kernel times can drift even if the kernel is using
397 the Xen clocksource, because ntp can warp the kernel's clocksource.
399 static s64 get_abs_timeout(unsigned long delta)
401 return xen_clocksource_read() + delta;
404 static void xen_timerop_set_mode(enum clock_event_mode mode,
405 struct clock_event_device *evt)
408 case CLOCK_EVT_MODE_PERIODIC:
413 case CLOCK_EVT_MODE_ONESHOT:
414 case CLOCK_EVT_MODE_RESUME:
417 case CLOCK_EVT_MODE_UNUSED:
418 case CLOCK_EVT_MODE_SHUTDOWN:
419 HYPERVISOR_set_timer_op(0); /* cancel timeout */
424 static int xen_timerop_set_next_event(unsigned long delta,
425 struct clock_event_device *evt)
427 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
429 if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
432 /* We may have missed the deadline, but there's no real way of
433 knowing for sure. If the event was in the past, then we'll
434 get an immediate interrupt. */
439 static const struct clock_event_device xen_timerop_clockevent = {
441 .features = CLOCK_EVT_FEAT_ONESHOT,
443 .max_delta_ns = 0xffffffff,
444 .min_delta_ns = TIMER_SLOP,
450 .set_mode = xen_timerop_set_mode,
451 .set_next_event = xen_timerop_set_next_event,
456 static void xen_vcpuop_set_mode(enum clock_event_mode mode,
457 struct clock_event_device *evt)
459 int cpu = smp_processor_id();
462 case CLOCK_EVT_MODE_PERIODIC:
463 WARN_ON(1); /* unsupported */
466 case CLOCK_EVT_MODE_ONESHOT:
467 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
471 case CLOCK_EVT_MODE_UNUSED:
472 case CLOCK_EVT_MODE_SHUTDOWN:
473 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
474 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
477 case CLOCK_EVT_MODE_RESUME:
482 static int xen_vcpuop_set_next_event(unsigned long delta,
483 struct clock_event_device *evt)
485 int cpu = smp_processor_id();
486 struct vcpu_set_singleshot_timer single;
489 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
491 single.timeout_abs_ns = get_abs_timeout(delta);
492 single.flags = VCPU_SSHOTTMR_future;
494 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
496 BUG_ON(ret != 0 && ret != -ETIME);
501 static const struct clock_event_device xen_vcpuop_clockevent = {
503 .features = CLOCK_EVT_FEAT_ONESHOT,
505 .max_delta_ns = 0xffffffff,
506 .min_delta_ns = TIMER_SLOP,
512 .set_mode = xen_vcpuop_set_mode,
513 .set_next_event = xen_vcpuop_set_next_event,
516 static const struct clock_event_device *xen_clockevent =
517 &xen_timerop_clockevent;
518 static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events);
520 static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
522 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events);
526 if (evt->event_handler) {
527 evt->event_handler(evt);
531 do_stolen_accounting();
536 void xen_setup_timer(int cpu)
539 struct clock_event_device *evt;
542 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
544 name = kasprintf(GFP_KERNEL, "timer%d", cpu);
546 name = "<timer kasprintf failed>";
548 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
549 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
552 evt = &per_cpu(xen_clock_events, cpu);
553 memcpy(evt, xen_clockevent, sizeof(*evt));
555 evt->cpumask = cpumask_of_cpu(cpu);
558 setup_runstate_info(cpu);
561 void xen_setup_cpu_clockevents(void)
563 BUG_ON(preemptible());
565 clockevents_register_device(&__get_cpu_var(xen_clock_events));
568 __init void xen_time_init(void)
570 int cpu = smp_processor_id();
572 get_time_values_from_xen();
574 clocksource_register(&xen_clocksource);
576 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
577 /* Successfully turned off 100Hz tick, so we have the
578 vcpuop-based timer interface */
579 printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
580 xen_clockevent = &xen_vcpuop_clockevent;
583 /* Set initial system time with full resolution */
584 xen_read_wallclock(&xtime);
585 set_normalized_timespec(&wall_to_monotonic,
586 -xtime.tv_sec, -xtime.tv_nsec);
588 setup_force_cpu_cap(X86_FEATURE_TSC);
590 xen_setup_timer(cpu);
591 xen_setup_cpu_clockevents();