2 * Xen time implementation.
4 * This is implemented in terms of a clocksource driver which uses
5 * the hypervisor clock as a nanosecond timebase, and a clockevent
6 * driver which uses the hypervisor's timer mechanism.
8 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
10 #include <linux/kernel.h>
11 #include <linux/interrupt.h>
12 #include <linux/clocksource.h>
13 #include <linux/clockchips.h>
14 #include <linux/kernel_stat.h>
16 #include <asm/xen/hypervisor.h>
17 #include <asm/xen/hypercall.h>
19 #include <xen/events.h>
20 #include <xen/interface/xen.h>
21 #include <xen/interface/vcpu.h>
27 /* Xen may fire a timer up to this many ns early */
28 #define TIMER_SLOP 100000
29 #define NS_PER_TICK (1000000000LL / HZ)
31 static cycle_t xen_clocksource_read(void);
33 /* These are perodically updated in shared_info, and then copied here. */
34 struct shadow_time_info {
35 u64 tsc_timestamp; /* TSC at last update of time vals. */
36 u64 system_timestamp; /* Time, in nanosecs, since boot. */
42 static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
44 /* runstate info updated by Xen */
45 static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
47 /* snapshots of runstate info */
48 static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate_snapshot);
50 /* unused ns of stolen and blocked time */
51 static DEFINE_PER_CPU(u64, residual_stolen);
52 static DEFINE_PER_CPU(u64, residual_blocked);
54 /* return an consistent snapshot of 64-bit time/counter value */
55 static u64 get64(const u64 *p)
59 if (BITS_PER_LONG < 64) {
64 * Read high then low, and then make sure high is
65 * still the same; this will only loop if low wraps
66 * and carries into high.
67 * XXX some clean way to make this endian-proof?
74 } while (p32[1] != h);
76 ret = (((u64)h) << 32) | l;
86 static void get_runstate_snapshot(struct vcpu_runstate_info *res)
89 struct vcpu_runstate_info *state;
91 BUG_ON(preemptible());
93 state = &__get_cpu_var(runstate);
96 * The runstate info is always updated by the hypervisor on
97 * the current CPU, so there's no need to use anything
98 * stronger than a compiler barrier when fetching it.
101 state_time = get64(&state->state_entry_time);
105 } while (get64(&state->state_entry_time) != state_time);
108 /* return true when a vcpu could run but has no real cpu to run on */
109 bool xen_vcpu_stolen(int vcpu)
111 return per_cpu(runstate, vcpu).state == RUNSTATE_runnable;
114 static void setup_runstate_info(int cpu)
116 struct vcpu_register_runstate_memory_area area;
118 area.addr.v = &per_cpu(runstate, cpu);
120 if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
125 static void do_stolen_accounting(void)
127 struct vcpu_runstate_info state;
128 struct vcpu_runstate_info *snap;
129 s64 blocked, runnable, offline, stolen;
132 get_runstate_snapshot(&state);
134 WARN_ON(state.state != RUNSTATE_running);
136 snap = &__get_cpu_var(runstate_snapshot);
138 /* work out how much time the VCPU has not been runn*ing* */
139 blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked];
140 runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
141 offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
145 /* Add the appropriate number of ticks of stolen time,
146 including any left-overs from last time. Passing NULL to
147 account_steal_time accounts the time as stolen. */
148 stolen = runnable + offline + __get_cpu_var(residual_stolen);
154 while (stolen >= NS_PER_TICK) {
156 stolen -= NS_PER_TICK;
158 __get_cpu_var(residual_stolen) = stolen;
159 account_steal_time(NULL, ticks);
161 /* Add the appropriate number of ticks of blocked time,
162 including any left-overs from last time. Passing idle to
163 account_steal_time accounts the time as idle/wait. */
164 blocked += __get_cpu_var(residual_blocked);
170 while (blocked >= NS_PER_TICK) {
172 blocked -= NS_PER_TICK;
174 __get_cpu_var(residual_blocked) = blocked;
175 account_steal_time(idle_task(smp_processor_id()), ticks);
179 * Xen sched_clock implementation. Returns the number of unstolen
180 * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
183 unsigned long long xen_sched_clock(void)
185 struct vcpu_runstate_info state;
191 * Ideally sched_clock should be called on a per-cpu basis
192 * anyway, so preempt should already be disabled, but that's
193 * not current practice at the moment.
197 now = xen_clocksource_read();
199 get_runstate_snapshot(&state);
201 WARN_ON(state.state != RUNSTATE_running);
203 offset = now - state.state_entry_time;
207 ret = state.time[RUNSTATE_blocked] +
208 state.time[RUNSTATE_running] +
217 /* Get the CPU speed from Xen */
218 unsigned long xen_cpu_khz(void)
220 u64 xen_khz = 1000000ULL << 32;
221 const struct vcpu_time_info *info =
222 &HYPERVISOR_shared_info->vcpu_info[0].time;
224 do_div(xen_khz, info->tsc_to_system_mul);
225 if (info->tsc_shift < 0)
226 xen_khz <<= -info->tsc_shift;
228 xen_khz >>= info->tsc_shift;
234 * Reads a consistent set of time-base values from Xen, into a shadow data
237 static unsigned get_time_values_from_xen(void)
239 struct vcpu_time_info *src;
240 struct shadow_time_info *dst;
242 /* src is shared memory with the hypervisor, so we need to
243 make sure we get a consistent snapshot, even in the face of
245 src = &__get_cpu_var(xen_vcpu)->time;
246 dst = &__get_cpu_var(shadow_time);
249 dst->version = src->version;
250 rmb(); /* fetch version before data */
251 dst->tsc_timestamp = src->tsc_timestamp;
252 dst->system_timestamp = src->system_time;
253 dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
254 dst->tsc_shift = src->tsc_shift;
255 rmb(); /* test version after fetching data */
256 } while ((src->version & 1) | (dst->version ^ src->version));
262 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
263 * yielding a 64-bit result.
265 static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
286 : "=A" (product), "=r" (tmp1), "=r" (tmp2)
287 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
290 "mul %%rdx ; shrd $32,%%rdx,%%rax"
291 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
299 static u64 get_nsec_offset(struct shadow_time_info *shadow)
302 now = native_read_tsc();
303 delta = now - shadow->tsc_timestamp;
304 return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
307 static cycle_t xen_clocksource_read(void)
309 struct shadow_time_info *shadow = &get_cpu_var(shadow_time);
314 version = get_time_values_from_xen();
316 ret = shadow->system_timestamp + get_nsec_offset(shadow);
318 } while (version != __get_cpu_var(xen_vcpu)->time.version);
320 put_cpu_var(shadow_time);
325 static void xen_read_wallclock(struct timespec *ts)
327 const struct shared_info *s = HYPERVISOR_shared_info;
332 /* get wallclock at system boot */
334 version = s->wc_version;
335 rmb(); /* fetch version before time */
336 now.tv_sec = s->wc_sec;
337 now.tv_nsec = s->wc_nsec;
338 rmb(); /* fetch time before checking version */
339 } while ((s->wc_version & 1) | (version ^ s->wc_version));
341 delta = xen_clocksource_read(); /* time since system boot */
342 delta += now.tv_sec * (u64)NSEC_PER_SEC + now.tv_nsec;
344 now.tv_nsec = do_div(delta, NSEC_PER_SEC);
347 set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
350 unsigned long xen_get_wallclock(void)
354 xen_read_wallclock(&ts);
359 int xen_set_wallclock(unsigned long now)
361 /* do nothing for domU */
365 static struct clocksource xen_clocksource __read_mostly = {
368 .read = xen_clocksource_read,
370 .mult = 1<<XEN_SHIFT, /* time directly in nanoseconds */
372 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
376 Xen clockevent implementation
378 Xen has two clockevent implementations:
380 The old timer_op one works with all released versions of Xen prior
381 to version 3.0.4. This version of the hypervisor provides a
382 single-shot timer with nanosecond resolution. However, sharing the
383 same event channel is a 100Hz tick which is delivered while the
384 vcpu is running. We don't care about or use this tick, but it will
385 cause the core time code to think the timer fired too soon, and
386 will end up resetting it each time. It could be filtered, but
387 doing so has complications when the ktime clocksource is not yet
388 the xen clocksource (ie, at boot time).
390 The new vcpu_op-based timer interface allows the tick timer period
391 to be changed or turned off. The tick timer is not useful as a
392 periodic timer because events are only delivered to running vcpus.
393 The one-shot timer can report when a timeout is in the past, so
394 set_next_event is capable of returning -ETIME when appropriate.
395 This interface is used when available.
400 Get a hypervisor absolute time. In theory we could maintain an
401 offset between the kernel's time and the hypervisor's time, and
402 apply that to a kernel's absolute timeout. Unfortunately the
403 hypervisor and kernel times can drift even if the kernel is using
404 the Xen clocksource, because ntp can warp the kernel's clocksource.
406 static s64 get_abs_timeout(unsigned long delta)
408 return xen_clocksource_read() + delta;
411 static void xen_timerop_set_mode(enum clock_event_mode mode,
412 struct clock_event_device *evt)
415 case CLOCK_EVT_MODE_PERIODIC:
420 case CLOCK_EVT_MODE_ONESHOT:
421 case CLOCK_EVT_MODE_RESUME:
424 case CLOCK_EVT_MODE_UNUSED:
425 case CLOCK_EVT_MODE_SHUTDOWN:
426 HYPERVISOR_set_timer_op(0); /* cancel timeout */
431 static int xen_timerop_set_next_event(unsigned long delta,
432 struct clock_event_device *evt)
434 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
436 if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
439 /* We may have missed the deadline, but there's no real way of
440 knowing for sure. If the event was in the past, then we'll
441 get an immediate interrupt. */
446 static const struct clock_event_device xen_timerop_clockevent = {
448 .features = CLOCK_EVT_FEAT_ONESHOT,
450 .max_delta_ns = 0xffffffff,
451 .min_delta_ns = TIMER_SLOP,
457 .set_mode = xen_timerop_set_mode,
458 .set_next_event = xen_timerop_set_next_event,
463 static void xen_vcpuop_set_mode(enum clock_event_mode mode,
464 struct clock_event_device *evt)
466 int cpu = smp_processor_id();
469 case CLOCK_EVT_MODE_PERIODIC:
470 WARN_ON(1); /* unsupported */
473 case CLOCK_EVT_MODE_ONESHOT:
474 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
478 case CLOCK_EVT_MODE_UNUSED:
479 case CLOCK_EVT_MODE_SHUTDOWN:
480 if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
481 HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
484 case CLOCK_EVT_MODE_RESUME:
489 static int xen_vcpuop_set_next_event(unsigned long delta,
490 struct clock_event_device *evt)
492 int cpu = smp_processor_id();
493 struct vcpu_set_singleshot_timer single;
496 WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
498 single.timeout_abs_ns = get_abs_timeout(delta);
499 single.flags = VCPU_SSHOTTMR_future;
501 ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
503 BUG_ON(ret != 0 && ret != -ETIME);
508 static const struct clock_event_device xen_vcpuop_clockevent = {
510 .features = CLOCK_EVT_FEAT_ONESHOT,
512 .max_delta_ns = 0xffffffff,
513 .min_delta_ns = TIMER_SLOP,
519 .set_mode = xen_vcpuop_set_mode,
520 .set_next_event = xen_vcpuop_set_next_event,
523 static const struct clock_event_device *xen_clockevent =
524 &xen_timerop_clockevent;
525 static DEFINE_PER_CPU(struct clock_event_device, xen_clock_events);
527 static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
529 struct clock_event_device *evt = &__get_cpu_var(xen_clock_events);
533 if (evt->event_handler) {
534 evt->event_handler(evt);
538 do_stolen_accounting();
543 void xen_setup_timer(int cpu)
546 struct clock_event_device *evt;
549 printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
551 name = kasprintf(GFP_KERNEL, "timer%d", cpu);
553 name = "<timer kasprintf failed>";
555 irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
556 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
559 evt = &per_cpu(xen_clock_events, cpu);
560 memcpy(evt, xen_clockevent, sizeof(*evt));
562 evt->cpumask = cpumask_of_cpu(cpu);
565 setup_runstate_info(cpu);
568 void xen_setup_cpu_clockevents(void)
570 BUG_ON(preemptible());
572 clockevents_register_device(&__get_cpu_var(xen_clock_events));
575 __init void xen_time_init(void)
577 int cpu = smp_processor_id();
579 get_time_values_from_xen();
581 clocksource_register(&xen_clocksource);
583 if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
584 /* Successfully turned off 100Hz tick, so we have the
585 vcpuop-based timer interface */
586 printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
587 xen_clockevent = &xen_vcpuop_clockevent;
590 /* Set initial system time with full resolution */
591 xen_read_wallclock(&xtime);
592 set_normalized_timespec(&wall_to_monotonic,
593 -xtime.tv_sec, -xtime.tv_nsec);
595 setup_force_cpu_cap(X86_FEATURE_TSC);
597 xen_setup_timer(cpu);
598 xen_setup_cpu_clockevents();