2 * A hypervisor allows multiple Operating Systems to run on a single machine.
3 * To quote David Wheeler: "Any problem in computer science can be solved with
4 * another layer of indirection."
6 * We keep things simple in two ways. First, we start with a normal Linux
7 * kernel and insert a module (lg.ko) which allows us to run other Linux
8 * kernels the same way we'd run processes. We call the first kernel the Host,
9 * and the others the Guests. The program which sets up and configures Guests
10 * (such as the example in Documentation/lguest/lguest.c) is called the
13 * Secondly, we only run specially modified Guests, not normal kernels: setting
14 * CONFIG_LGUEST_GUEST to "y" compiles this file into the kernel so it knows
15 * how to be a Guest at boot time. This means that you can use the same kernel
16 * you boot normally (ie. as a Host) as a Guest.
18 * These Guests know that they cannot do privileged operations, such as disable
19 * interrupts, and that they have to ask the Host to do such things explicitly.
20 * This file consists of all the replacements for such low-level native
21 * hardware operations: these special Guest versions call the Host.
23 * So how does the kernel know it's a Guest? We'll see that later, but let's
24 * just say that we end up here where we replace the native functions various
25 * "paravirt" structures with our Guest versions, then boot like normal. :*/
28 * Copyright (C) 2006, Rusty Russell <rusty@rustcorp.com.au> IBM Corporation.
30 * This program is free software; you can redistribute it and/or modify
31 * it under the terms of the GNU General Public License as published by
32 * the Free Software Foundation; either version 2 of the License, or
33 * (at your option) any later version.
35 * This program is distributed in the hope that it will be useful, but
36 * WITHOUT ANY WARRANTY; without even the implied warranty of
37 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
38 * NON INFRINGEMENT. See the GNU General Public License for more
41 * You should have received a copy of the GNU General Public License
42 * along with this program; if not, write to the Free Software
43 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
45 #include <linux/kernel.h>
46 #include <linux/start_kernel.h>
47 #include <linux/string.h>
48 #include <linux/console.h>
49 #include <linux/screen_info.h>
50 #include <linux/irq.h>
51 #include <linux/interrupt.h>
52 #include <linux/clocksource.h>
53 #include <linux/clockchips.h>
54 #include <linux/lguest.h>
55 #include <linux/lguest_launcher.h>
56 #include <linux/virtio_console.h>
59 #include <asm/lguest.h>
60 #include <asm/paravirt.h>
61 #include <asm/param.h>
63 #include <asm/pgtable.h>
65 #include <asm/setup.h>
70 #include <asm/reboot.h> /* for struct machine_ops */
72 /*G:010 Welcome to the Guest!
74 * The Guest in our tale is a simple creature: identical to the Host but
75 * behaving in simplified but equivalent ways. In particular, the Guest is the
76 * same kernel as the Host (or at least, built from the same source code). :*/
78 struct lguest_data lguest_data = {
79 .hcall_status = { [0 ... LHCALL_RING_SIZE-1] = 0xFF },
80 .noirq_start = (u32)lguest_noirq_start,
81 .noirq_end = (u32)lguest_noirq_end,
82 .kernel_address = PAGE_OFFSET,
83 .blocked_interrupts = { 1 }, /* Block timer interrupts */
84 .syscall_vec = SYSCALL_VECTOR,
87 /*G:037 async_hcall() is pretty simple: I'm quite proud of it really. We have a
88 * ring buffer of stored hypercalls which the Host will run though next time we
89 * do a normal hypercall. Each entry in the ring has 4 slots for the hypercall
90 * arguments, and a "hcall_status" word which is 0 if the call is ready to go,
91 * and 255 once the Host has finished with it.
93 * If we come around to a slot which hasn't been finished, then the table is
94 * full and we just make the hypercall directly. This has the nice side
95 * effect of causing the Host to run all the stored calls in the ring buffer
96 * which empties it for next time! */
97 static void async_hcall(unsigned long call, unsigned long arg1,
98 unsigned long arg2, unsigned long arg3)
100 /* Note: This code assumes we're uniprocessor. */
101 static unsigned int next_call;
104 /* Disable interrupts if not already disabled: we don't want an
105 * interrupt handler making a hypercall while we're already doing
107 local_irq_save(flags);
108 if (lguest_data.hcall_status[next_call] != 0xFF) {
109 /* Table full, so do normal hcall which will flush table. */
110 hcall(call, arg1, arg2, arg3);
112 lguest_data.hcalls[next_call].arg0 = call;
113 lguest_data.hcalls[next_call].arg1 = arg1;
114 lguest_data.hcalls[next_call].arg2 = arg2;
115 lguest_data.hcalls[next_call].arg3 = arg3;
116 /* Arguments must all be written before we mark it to go */
118 lguest_data.hcall_status[next_call] = 0;
119 if (++next_call == LHCALL_RING_SIZE)
122 local_irq_restore(flags);
125 /*G:035 Notice the lazy_hcall() above, rather than hcall(). This is our first
126 * real optimization trick!
128 * When lazy_mode is set, it means we're allowed to defer all hypercalls and do
129 * them as a batch when lazy_mode is eventually turned off. Because hypercalls
130 * are reasonably expensive, batching them up makes sense. For example, a
131 * large munmap might update dozens of page table entries: that code calls
132 * paravirt_enter_lazy_mmu(), does the dozen updates, then calls
133 * lguest_leave_lazy_mode().
135 * So, when we're in lazy mode, we call async_hcall() to store the call for
136 * future processing: */
137 static void lazy_hcall(unsigned long call,
142 if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE)
143 hcall(call, arg1, arg2, arg3);
145 async_hcall(call, arg1, arg2, arg3);
148 /* When lazy mode is turned off reset the per-cpu lazy mode variable and then
149 * issue the do-nothing hypercall to flush any stored calls. */
150 static void lguest_leave_lazy_mode(void)
152 paravirt_leave_lazy(paravirt_get_lazy_mode());
153 hcall(LHCALL_FLUSH_ASYNC, 0, 0, 0);
157 * After that diversion we return to our first native-instruction
158 * replacements: four functions for interrupt control.
160 * The simplest way of implementing these would be to have "turn interrupts
161 * off" and "turn interrupts on" hypercalls. Unfortunately, this is too slow:
162 * these are by far the most commonly called functions of those we override.
164 * So instead we keep an "irq_enabled" field inside our "struct lguest_data",
165 * which the Guest can update with a single instruction. The Host knows to
166 * check there before it tries to deliver an interrupt.
169 /* save_flags() is expected to return the processor state (ie. "flags"). The
170 * flags word contains all kind of stuff, but in practice Linux only cares
171 * about the interrupt flag. Our "save_flags()" just returns that. */
172 static unsigned long save_fl(void)
174 return lguest_data.irq_enabled;
177 /* restore_flags() just sets the flags back to the value given. */
178 static void restore_fl(unsigned long flags)
180 lguest_data.irq_enabled = flags;
183 /* Interrupts go off... */
184 static void irq_disable(void)
186 lguest_data.irq_enabled = 0;
189 /* Interrupts go on... */
190 static void irq_enable(void)
192 lguest_data.irq_enabled = X86_EFLAGS_IF;
195 /*M:003 Note that we don't check for outstanding interrupts when we re-enable
196 * them (or when we unmask an interrupt). This seems to work for the moment,
197 * since interrupts are rare and we'll just get the interrupt on the next timer
198 * tick, but now we can run with CONFIG_NO_HZ, we should revisit this. One way
199 * would be to put the "irq_enabled" field in a page by itself, and have the
200 * Host write-protect it when an interrupt comes in when irqs are disabled.
201 * There will then be a page fault as soon as interrupts are re-enabled.
203 * A better method is to implement soft interrupt disable generally for x86:
204 * instead of disabling interrupts, we set a flag. If an interrupt does come
205 * in, we then disable them for real. This is uncommon, so we could simply use
206 * a hypercall for interrupt control and not worry about efficiency. :*/
209 * The Interrupt Descriptor Table (IDT).
211 * The IDT tells the processor what to do when an interrupt comes in. Each
212 * entry in the table is a 64-bit descriptor: this holds the privilege level,
213 * address of the handler, and... well, who cares? The Guest just asks the
214 * Host to make the change anyway, because the Host controls the real IDT.
216 static void lguest_write_idt_entry(gate_desc *dt,
217 int entrynum, const gate_desc *g)
219 /* The gate_desc structure is 8 bytes long: we hand it to the Host in
220 * two 32-bit chunks. The whole 32-bit kernel used to hand descriptors
221 * around like this; typesafety wasn't a big concern in Linux's early
223 u32 *desc = (u32 *)g;
224 /* Keep the local copy up to date. */
225 native_write_idt_entry(dt, entrynum, g);
226 /* Tell Host about this new entry. */
227 hcall(LHCALL_LOAD_IDT_ENTRY, entrynum, desc[0], desc[1]);
230 /* Changing to a different IDT is very rare: we keep the IDT up-to-date every
231 * time it is written, so we can simply loop through all entries and tell the
232 * Host about them. */
233 static void lguest_load_idt(const struct desc_ptr *desc)
236 struct desc_struct *idt = (void *)desc->address;
238 for (i = 0; i < (desc->size+1)/8; i++)
239 hcall(LHCALL_LOAD_IDT_ENTRY, i, idt[i].a, idt[i].b);
243 * The Global Descriptor Table.
245 * The Intel architecture defines another table, called the Global Descriptor
246 * Table (GDT). You tell the CPU where it is (and its size) using the "lgdt"
247 * instruction, and then several other instructions refer to entries in the
248 * table. There are three entries which the Switcher needs, so the Host simply
249 * controls the entire thing and the Guest asks it to make changes using the
250 * LOAD_GDT hypercall.
252 * This is the opposite of the IDT code where we have a LOAD_IDT_ENTRY
253 * hypercall and use that repeatedly to load a new IDT. I don't think it
254 * really matters, but wouldn't it be nice if they were the same? Wouldn't
255 * it be even better if you were the one to send the patch to fix it?
257 static void lguest_load_gdt(const struct desc_ptr *desc)
259 BUG_ON((desc->size+1)/8 != GDT_ENTRIES);
260 hcall(LHCALL_LOAD_GDT, __pa(desc->address), GDT_ENTRIES, 0);
263 /* For a single GDT entry which changes, we do the lazy thing: alter our GDT,
264 * then tell the Host to reload the entire thing. This operation is so rare
265 * that this naive implementation is reasonable. */
266 static void lguest_write_gdt_entry(struct desc_struct *dt, int entrynum,
267 const void *desc, int type)
269 native_write_gdt_entry(dt, entrynum, desc, type);
270 hcall(LHCALL_LOAD_GDT, __pa(dt), GDT_ENTRIES, 0);
273 /* OK, I lied. There are three "thread local storage" GDT entries which change
274 * on every context switch (these three entries are how glibc implements
275 * __thread variables). So we have a hypercall specifically for this case. */
276 static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
278 /* There's one problem which normal hardware doesn't have: the Host
279 * can't handle us removing entries we're currently using. So we clear
280 * the GS register here: if it's needed it'll be reloaded anyway. */
282 lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
285 /*G:038 That's enough excitement for now, back to ploughing through each of
286 * the different pv_ops structures (we're about 1/3 of the way through).
288 * This is the Local Descriptor Table, another weird Intel thingy. Linux only
289 * uses this for some strange applications like Wine. We don't do anything
290 * here, so they'll get an informative and friendly Segmentation Fault. */
291 static void lguest_set_ldt(const void *addr, unsigned entries)
295 /* This loads a GDT entry into the "Task Register": that entry points to a
296 * structure called the Task State Segment. Some comments scattered though the
297 * kernel code indicate that this used for task switching in ages past, along
298 * with blood sacrifice and astrology.
300 * Now there's nothing interesting in here that we don't get told elsewhere.
301 * But the native version uses the "ltr" instruction, which makes the Host
302 * complain to the Guest about a Segmentation Fault and it'll oops. So we
303 * override the native version with a do-nothing version. */
304 static void lguest_load_tr_desc(void)
308 /* The "cpuid" instruction is a way of querying both the CPU identity
309 * (manufacturer, model, etc) and its features. It was introduced before the
310 * Pentium in 1993 and keeps getting extended by both Intel, AMD and others.
311 * As you might imagine, after a decade and a half this treatment, it is now a
312 * giant ball of hair. Its entry in the current Intel manual runs to 28 pages.
314 * This instruction even it has its own Wikipedia entry. The Wikipedia entry
315 * has been translated into 4 languages. I am not making this up!
317 * We could get funky here and identify ourselves as "GenuineLguest", but
318 * instead we just use the real "cpuid" instruction. Then I pretty much turned
319 * off feature bits until the Guest booted. (Don't say that: you'll damage
320 * lguest sales!) Shut up, inner voice! (Hey, just pointing out that this is
321 * hardly future proof.) Noone's listening! They don't like you anyway,
322 * parenthetic weirdo!
324 * Replacing the cpuid so we can turn features off is great for the kernel, but
325 * anyone (including userspace) can just use the raw "cpuid" instruction and
326 * the Host won't even notice since it isn't privileged. So we try not to get
327 * too worked up about it. */
328 static void lguest_cpuid(unsigned int *ax, unsigned int *bx,
329 unsigned int *cx, unsigned int *dx)
333 native_cpuid(ax, bx, cx, dx);
335 case 1: /* Basic feature request. */
336 /* We only allow kernel to see SSE3, CMPXCHG16B and SSSE3 */
338 /* SSE, SSE2, FXSR, MMX, CMOV, CMPXCHG8B, TSC, FPU. */
340 /* The Host can do a nice optimization if it knows that the
341 * kernel mappings (addresses above 0xC0000000 or whatever
342 * PAGE_OFFSET is set to) haven't changed. But Linux calls
343 * flush_tlb_user() for both user and kernel mappings unless
344 * the Page Global Enable (PGE) feature bit is set. */
346 /* We also lie, and say we're family id 5. 6 or greater
347 * leads to a rdmsr in early_init_intel which we can't handle.
348 * Family ID is returned as bits 8-12 in ax. */
353 /* Futureproof this a little: if they ask how much extended
354 * processor information there is, limit it to known fields. */
355 if (*ax > 0x80000008)
361 /* Intel has four control registers, imaginatively named cr0, cr2, cr3 and cr4.
362 * I assume there's a cr1, but it hasn't bothered us yet, so we'll not bother
363 * it. The Host needs to know when the Guest wants to change them, so we have
364 * a whole series of functions like read_cr0() and write_cr0().
366 * We start with cr0. cr0 allows you to turn on and off all kinds of basic
367 * features, but Linux only really cares about one: the horrifically-named Task
368 * Switched (TS) bit at bit 3 (ie. 8)
370 * What does the TS bit do? Well, it causes the CPU to trap (interrupt 7) if
371 * the floating point unit is used. Which allows us to restore FPU state
372 * lazily after a task switch, and Linux uses that gratefully, but wouldn't a
373 * name like "FPUTRAP bit" be a little less cryptic?
375 * We store cr0 locally because the Host never changes it. The Guest sometimes
376 * wants to read it and we'd prefer not to bother the Host unnecessarily. */
377 static unsigned long current_cr0;
378 static void lguest_write_cr0(unsigned long val)
380 lazy_hcall(LHCALL_TS, val & X86_CR0_TS, 0, 0);
384 static unsigned long lguest_read_cr0(void)
389 /* Intel provided a special instruction to clear the TS bit for people too cool
390 * to use write_cr0() to do it. This "clts" instruction is faster, because all
391 * the vowels have been optimized out. */
392 static void lguest_clts(void)
394 lazy_hcall(LHCALL_TS, 0, 0, 0);
395 current_cr0 &= ~X86_CR0_TS;
398 /* cr2 is the virtual address of the last page fault, which the Guest only ever
399 * reads. The Host kindly writes this into our "struct lguest_data", so we
400 * just read it out of there. */
401 static unsigned long lguest_read_cr2(void)
403 return lguest_data.cr2;
406 /* See lguest_set_pte() below. */
407 static bool cr3_changed = false;
409 /* cr3 is the current toplevel pagetable page: the principle is the same as
410 * cr0. Keep a local copy, and tell the Host when it changes. The only
411 * difference is that our local copy is in lguest_data because the Host needs
412 * to set it upon our initial hypercall. */
413 static void lguest_write_cr3(unsigned long cr3)
415 lguest_data.pgdir = cr3;
416 lazy_hcall(LHCALL_NEW_PGTABLE, cr3, 0, 0);
420 static unsigned long lguest_read_cr3(void)
422 return lguest_data.pgdir;
425 /* cr4 is used to enable and disable PGE, but we don't care. */
426 static unsigned long lguest_read_cr4(void)
431 static void lguest_write_cr4(unsigned long val)
436 * Page Table Handling.
438 * Now would be a good time to take a rest and grab a coffee or similarly
439 * relaxing stimulant. The easy parts are behind us, and the trek gradually
440 * winds uphill from here.
442 * Quick refresher: memory is divided into "pages" of 4096 bytes each. The CPU
443 * maps virtual addresses to physical addresses using "page tables". We could
444 * use one huge index of 1 million entries: each address is 4 bytes, so that's
445 * 1024 pages just to hold the page tables. But since most virtual addresses
446 * are unused, we use a two level index which saves space. The cr3 register
447 * contains the physical address of the top level "page directory" page, which
448 * contains physical addresses of up to 1024 second-level pages. Each of these
449 * second level pages contains up to 1024 physical addresses of actual pages,
450 * or Page Table Entries (PTEs).
452 * Here's a diagram, where arrows indicate physical addresses:
454 * cr3 ---> +---------+
455 * | --------->+---------+
457 * Top-level | | PADDR2 |
464 * So to convert a virtual address to a physical address, we look up the top
465 * level, which points us to the second level, which gives us the physical
466 * address of that page. If the top level entry was not present, or the second
467 * level entry was not present, then the virtual address is invalid (we
468 * say "the page was not mapped").
470 * Put another way, a 32-bit virtual address is divided up like so:
472 * 1 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
473 * |<---- 10 bits ---->|<---- 10 bits ---->|<------ 12 bits ------>|
474 * Index into top Index into second Offset within page
475 * page directory page pagetable page
477 * The kernel spends a lot of time changing both the top-level page directory
478 * and lower-level pagetable pages. The Guest doesn't know physical addresses,
479 * so while it maintains these page tables exactly like normal, it also needs
480 * to keep the Host informed whenever it makes a change: the Host will create
481 * the real page tables based on the Guests'.
484 /* The Guest calls this to set a second-level entry (pte), ie. to map a page
485 * into a process' address space. We set the entry then tell the Host the
486 * toplevel and address this corresponds to. The Guest uses one pagetable per
487 * process, so we need to tell the Host which one we're changing (mm->pgd). */
488 static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr,
489 pte_t *ptep, pte_t pteval)
492 lazy_hcall(LHCALL_SET_PTE, __pa(mm->pgd), addr, pteval.pte_low);
495 /* The Guest calls this to set a top-level entry. Again, we set the entry then
496 * tell the Host which top-level page we changed, and the index of the entry we
498 static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval)
501 lazy_hcall(LHCALL_SET_PMD, __pa(pmdp)&PAGE_MASK,
502 (__pa(pmdp)&(PAGE_SIZE-1))/4, 0);
505 /* There are a couple of legacy places where the kernel sets a PTE, but we
506 * don't know the top level any more. This is useless for us, since we don't
507 * know which pagetable is changing or what address, so we just tell the Host
508 * to forget all of them. Fortunately, this is very rare.
510 * ... except in early boot when the kernel sets up the initial pagetables,
511 * which makes booting astonishingly slow: 1.83 seconds! So we don't even tell
512 * the Host anything changed until we've done the first page table switch,
513 * which brings boot back to 0.25 seconds. */
514 static void lguest_set_pte(pte_t *ptep, pte_t pteval)
518 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
521 /* Unfortunately for Lguest, the pv_mmu_ops for page tables were based on
522 * native page table operations. On native hardware you can set a new page
523 * table entry whenever you want, but if you want to remove one you have to do
524 * a TLB flush (a TLB is a little cache of page table entries kept by the CPU).
526 * So the lguest_set_pte_at() and lguest_set_pmd() functions above are only
527 * called when a valid entry is written, not when it's removed (ie. marked not
528 * present). Instead, this is where we come when the Guest wants to remove a
529 * page table entry: we tell the Host to set that entry to 0 (ie. the present
531 static void lguest_flush_tlb_single(unsigned long addr)
533 /* Simply set it to zero: if it was not, it will fault back in. */
534 lazy_hcall(LHCALL_SET_PTE, lguest_data.pgdir, addr, 0);
537 /* This is what happens after the Guest has removed a large number of entries.
538 * This tells the Host that any of the page table entries for userspace might
539 * have changed, ie. virtual addresses below PAGE_OFFSET. */
540 static void lguest_flush_tlb_user(void)
542 lazy_hcall(LHCALL_FLUSH_TLB, 0, 0, 0);
545 /* This is called when the kernel page tables have changed. That's not very
546 * common (unless the Guest is using highmem, which makes the Guest extremely
547 * slow), so it's worth separating this from the user flushing above. */
548 static void lguest_flush_tlb_kernel(void)
550 lazy_hcall(LHCALL_FLUSH_TLB, 1, 0, 0);
554 * The Unadvanced Programmable Interrupt Controller.
556 * This is an attempt to implement the simplest possible interrupt controller.
557 * I spent some time looking though routines like set_irq_chip_and_handler,
558 * set_irq_chip_and_handler_name, set_irq_chip_data and set_phasers_to_stun and
559 * I *think* this is as simple as it gets.
561 * We can tell the Host what interrupts we want blocked ready for using the
562 * lguest_data.interrupts bitmap, so disabling (aka "masking") them is as
563 * simple as setting a bit. We don't actually "ack" interrupts as such, we
564 * just mask and unmask them. I wonder if we should be cleverer?
566 static void disable_lguest_irq(unsigned int irq)
568 set_bit(irq, lguest_data.blocked_interrupts);
571 static void enable_lguest_irq(unsigned int irq)
573 clear_bit(irq, lguest_data.blocked_interrupts);
576 /* This structure describes the lguest IRQ controller. */
577 static struct irq_chip lguest_irq_controller = {
579 .mask = disable_lguest_irq,
580 .mask_ack = disable_lguest_irq,
581 .unmask = enable_lguest_irq,
584 /* This sets up the Interrupt Descriptor Table (IDT) entry for each hardware
585 * interrupt (except 128, which is used for system calls), and then tells the
586 * Linux infrastructure that each interrupt is controlled by our level-based
587 * lguest interrupt controller. */
588 static void __init lguest_init_IRQ(void)
592 for (i = 0; i < LGUEST_IRQS; i++) {
593 int vector = FIRST_EXTERNAL_VECTOR + i;
594 /* Some systems map "vectors" to interrupts weirdly. Lguest has
595 * a straightforward 1 to 1 mapping, so force that here. */
596 __get_cpu_var(vector_irq)[vector] = i;
597 if (vector != SYSCALL_VECTOR)
598 set_intr_gate(vector, interrupt[i]);
600 /* This call is required to set up for 4k stacks, where we have
601 * separate stacks for hard and soft interrupts. */
602 irq_ctx_init(smp_processor_id());
605 void lguest_setup_irq(unsigned int irq)
607 irq_to_desc_alloc_cpu(irq, 0);
608 set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
609 handle_level_irq, "level");
615 * It would be far better for everyone if the Guest had its own clock, but
616 * until then the Host gives us the time on every interrupt.
618 static unsigned long lguest_get_wallclock(void)
620 return lguest_data.time.tv_sec;
623 /* The TSC is an Intel thing called the Time Stamp Counter. The Host tells us
624 * what speed it runs at, or 0 if it's unusable as a reliable clock source.
625 * This matches what we want here: if we return 0 from this function, the x86
626 * TSC clock will give up and not register itself. */
627 static unsigned long lguest_tsc_khz(void)
629 return lguest_data.tsc_khz;
632 /* If we can't use the TSC, the kernel falls back to our lower-priority
633 * "lguest_clock", where we read the time value given to us by the Host. */
634 static cycle_t lguest_clock_read(void)
636 unsigned long sec, nsec;
638 /* Since the time is in two parts (seconds and nanoseconds), we risk
639 * reading it just as it's changing from 99 & 0.999999999 to 100 and 0,
640 * and getting 99 and 0. As Linux tends to come apart under the stress
641 * of time travel, we must be careful: */
643 /* First we read the seconds part. */
644 sec = lguest_data.time.tv_sec;
645 /* This read memory barrier tells the compiler and the CPU that
646 * this can't be reordered: we have to complete the above
647 * before going on. */
649 /* Now we read the nanoseconds part. */
650 nsec = lguest_data.time.tv_nsec;
651 /* Make sure we've done that. */
653 /* Now if the seconds part has changed, try again. */
654 } while (unlikely(lguest_data.time.tv_sec != sec));
656 /* Our lguest clock is in real nanoseconds. */
657 return sec*1000000000ULL + nsec;
660 /* This is the fallback clocksource: lower priority than the TSC clocksource. */
661 static struct clocksource lguest_clock = {
664 .read = lguest_clock_read,
665 .mask = CLOCKSOURCE_MASK(64),
668 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
671 /* We also need a "struct clock_event_device": Linux asks us to set it to go
672 * off some time in the future. Actually, James Morris figured all this out, I
673 * just applied the patch. */
674 static int lguest_clockevent_set_next_event(unsigned long delta,
675 struct clock_event_device *evt)
677 /* FIXME: I don't think this can ever happen, but James tells me he had
678 * to put this code in. Maybe we should remove it now. Anyone? */
679 if (delta < LG_CLOCK_MIN_DELTA) {
680 if (printk_ratelimit())
681 printk(KERN_DEBUG "%s: small delta %lu ns\n",
686 /* Please wake us this far in the future. */
687 hcall(LHCALL_SET_CLOCKEVENT, delta, 0, 0);
691 static void lguest_clockevent_set_mode(enum clock_event_mode mode,
692 struct clock_event_device *evt)
695 case CLOCK_EVT_MODE_UNUSED:
696 case CLOCK_EVT_MODE_SHUTDOWN:
697 /* A 0 argument shuts the clock down. */
698 hcall(LHCALL_SET_CLOCKEVENT, 0, 0, 0);
700 case CLOCK_EVT_MODE_ONESHOT:
701 /* This is what we expect. */
703 case CLOCK_EVT_MODE_PERIODIC:
705 case CLOCK_EVT_MODE_RESUME:
710 /* This describes our primitive timer chip. */
711 static struct clock_event_device lguest_clockevent = {
713 .features = CLOCK_EVT_FEAT_ONESHOT,
714 .set_next_event = lguest_clockevent_set_next_event,
715 .set_mode = lguest_clockevent_set_mode,
719 .min_delta_ns = LG_CLOCK_MIN_DELTA,
720 .max_delta_ns = LG_CLOCK_MAX_DELTA,
723 /* This is the Guest timer interrupt handler (hardware interrupt 0). We just
724 * call the clockevent infrastructure and it does whatever needs doing. */
725 static void lguest_time_irq(unsigned int irq, struct irq_desc *desc)
729 /* Don't interrupt us while this is running. */
730 local_irq_save(flags);
731 lguest_clockevent.event_handler(&lguest_clockevent);
732 local_irq_restore(flags);
735 /* At some point in the boot process, we get asked to set up our timing
736 * infrastructure. The kernel doesn't expect timer interrupts before this, but
737 * we cleverly initialized the "blocked_interrupts" field of "struct
738 * lguest_data" so that timer interrupts were blocked until now. */
739 static void lguest_time_init(void)
741 /* Set up the timer interrupt (0) to go to our simple timer routine */
742 set_irq_handler(0, lguest_time_irq);
744 clocksource_register(&lguest_clock);
746 /* We can't set cpumask in the initializer: damn C limitations! Set it
747 * here and register our timer device. */
748 lguest_clockevent.cpumask = cpumask_of(0);
749 clockevents_register_device(&lguest_clockevent);
751 /* Finally, we unblock the timer interrupt. */
752 enable_lguest_irq(0);
756 * Miscellaneous bits and pieces.
758 * Here is an oddball collection of functions which the Guest needs for things
759 * to work. They're pretty simple.
762 /* The Guest needs to tell the Host what stack it expects traps to use. For
763 * native hardware, this is part of the Task State Segment mentioned above in
764 * lguest_load_tr_desc(), but to help hypervisors there's this special call.
766 * We tell the Host the segment we want to use (__KERNEL_DS is the kernel data
767 * segment), the privilege level (we're privilege level 1, the Host is 0 and
768 * will not tolerate us trying to use that), the stack pointer, and the number
769 * of pages in the stack. */
770 static void lguest_load_sp0(struct tss_struct *tss,
771 struct thread_struct *thread)
773 lazy_hcall(LHCALL_SET_STACK, __KERNEL_DS|0x1, thread->sp0,
774 THREAD_SIZE/PAGE_SIZE);
777 /* Let's just say, I wouldn't do debugging under a Guest. */
778 static void lguest_set_debugreg(int regno, unsigned long value)
780 /* FIXME: Implement */
783 /* There are times when the kernel wants to make sure that no memory writes are
784 * caught in the cache (that they've all reached real hardware devices). This
785 * doesn't matter for the Guest which has virtual hardware.
787 * On the Pentium 4 and above, cpuid() indicates that the Cache Line Flush
788 * (clflush) instruction is available and the kernel uses that. Otherwise, it
789 * uses the older "Write Back and Invalidate Cache" (wbinvd) instruction.
790 * Unlike clflush, wbinvd can only be run at privilege level 0. So we can
791 * ignore clflush, but replace wbinvd.
793 static void lguest_wbinvd(void)
797 /* If the Guest expects to have an Advanced Programmable Interrupt Controller,
798 * we play dumb by ignoring writes and returning 0 for reads. So it's no
799 * longer Programmable nor Controlling anything, and I don't think 8 lines of
800 * code qualifies for Advanced. It will also never interrupt anything. It
801 * does, however, allow us to get through the Linux boot code. */
802 #ifdef CONFIG_X86_LOCAL_APIC
803 static void lguest_apic_write(u32 reg, u32 v)
807 static u32 lguest_apic_read(u32 reg)
812 static u64 lguest_apic_icr_read(void)
817 static void lguest_apic_icr_write(u32 low, u32 id)
819 /* Warn to see if there's any stray references */
823 static void lguest_apic_wait_icr_idle(void)
828 static u32 lguest_apic_safe_wait_icr_idle(void)
833 static struct apic_ops lguest_basic_apic_ops = {
834 .read = lguest_apic_read,
835 .write = lguest_apic_write,
836 .icr_read = lguest_apic_icr_read,
837 .icr_write = lguest_apic_icr_write,
838 .wait_icr_idle = lguest_apic_wait_icr_idle,
839 .safe_wait_icr_idle = lguest_apic_safe_wait_icr_idle,
843 /* STOP! Until an interrupt comes in. */
844 static void lguest_safe_halt(void)
846 hcall(LHCALL_HALT, 0, 0, 0);
849 /* The SHUTDOWN hypercall takes a string to describe what's happening, and
850 * an argument which says whether this to restart (reboot) the Guest or not.
852 * Note that the Host always prefers that the Guest speak in physical addresses
853 * rather than virtual addresses, so we use __pa() here. */
854 static void lguest_power_off(void)
856 hcall(LHCALL_SHUTDOWN, __pa("Power down"), LGUEST_SHUTDOWN_POWEROFF, 0);
862 * Don't. But if you did, this is what happens.
864 static int lguest_panic(struct notifier_block *nb, unsigned long l, void *p)
866 hcall(LHCALL_SHUTDOWN, __pa(p), LGUEST_SHUTDOWN_POWEROFF, 0);
867 /* The hcall won't return, but to keep gcc happy, we're "done". */
871 static struct notifier_block paniced = {
872 .notifier_call = lguest_panic
875 /* Setting up memory is fairly easy. */
876 static __init char *lguest_memory_setup(void)
878 /* We do this here and not earlier because lockcheck used to barf if we
879 * did it before start_kernel(). I think we fixed that, so it'd be
880 * nice to move it back to lguest_init. Patch welcome... */
881 atomic_notifier_chain_register(&panic_notifier_list, &paniced);
883 /* The Linux bootloader header contains an "e820" memory map: the
884 * Launcher populated the first entry with our memory limit. */
885 e820_add_region(boot_params.e820_map[0].addr,
886 boot_params.e820_map[0].size,
887 boot_params.e820_map[0].type);
889 /* This string is for the boot messages. */
893 /* We will eventually use the virtio console device to produce console output,
894 * but before that is set up we use LHCALL_NOTIFY on normal memory to produce
896 static __init int early_put_chars(u32 vtermno, const char *buf, int count)
899 unsigned int len = count;
901 /* We use a nul-terminated string, so we have to make a copy. Icky,
903 if (len > sizeof(scratch) - 1)
904 len = sizeof(scratch) - 1;
906 memcpy(scratch, buf, len);
907 hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0);
909 /* This routine returns the number of bytes actually written. */
913 /* Rebooting also tells the Host we're finished, but the RESTART flag tells the
914 * Launcher to reboot us. */
915 static void lguest_restart(char *reason)
917 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0);
921 * Patching (Powerfully Placating Performance Pedants)
923 * We have already seen that pv_ops structures let us replace simple native
924 * instructions with calls to the appropriate back end all throughout the
925 * kernel. This allows the same kernel to run as a Guest and as a native
926 * kernel, but it's slow because of all the indirect branches.
928 * Remember that David Wheeler quote about "Any problem in computer science can
929 * be solved with another layer of indirection"? The rest of that quote is
930 * "... But that usually will create another problem." This is the first of
933 * Our current solution is to allow the paravirt back end to optionally patch
934 * over the indirect calls to replace them with something more efficient. We
935 * patch the four most commonly called functions: disable interrupts, enable
936 * interrupts, restore interrupts and save interrupts. We usually have 6 or 10
937 * bytes to patch into: the Guest versions of these operations are small enough
938 * that we can fit comfortably.
940 * First we need assembly templates of each of the patchable Guest operations,
941 * and these are in i386_head.S. */
943 /*G:060 We construct a table from the assembler templates: */
944 static const struct lguest_insns
946 const char *start, *end;
948 [PARAVIRT_PATCH(pv_irq_ops.irq_disable)] = { lgstart_cli, lgend_cli },
949 [PARAVIRT_PATCH(pv_irq_ops.irq_enable)] = { lgstart_sti, lgend_sti },
950 [PARAVIRT_PATCH(pv_irq_ops.restore_fl)] = { lgstart_popf, lgend_popf },
951 [PARAVIRT_PATCH(pv_irq_ops.save_fl)] = { lgstart_pushf, lgend_pushf },
954 /* Now our patch routine is fairly simple (based on the native one in
955 * paravirt.c). If we have a replacement, we copy it in and return how much of
956 * the available space we used. */
957 static unsigned lguest_patch(u8 type, u16 clobber, void *ibuf,
958 unsigned long addr, unsigned len)
960 unsigned int insn_len;
962 /* Don't do anything special if we don't have a replacement */
963 if (type >= ARRAY_SIZE(lguest_insns) || !lguest_insns[type].start)
964 return paravirt_patch_default(type, clobber, ibuf, addr, len);
966 insn_len = lguest_insns[type].end - lguest_insns[type].start;
968 /* Similarly if we can't fit replacement (shouldn't happen, but let's
971 return paravirt_patch_default(type, clobber, ibuf, addr, len);
973 /* Copy in our instructions. */
974 memcpy(ibuf, lguest_insns[type].start, insn_len);
978 /*G:030 Once we get to lguest_init(), we know we're a Guest. The various
979 * pv_ops structures in the kernel provide points for (almost) every routine we
980 * have to override to avoid privileged instructions. */
981 __init void lguest_init(void)
983 /* We're under lguest, paravirt is enabled, and we're running at
984 * privilege level 1, not 0 as normal. */
985 pv_info.name = "lguest";
986 pv_info.paravirt_enabled = 1;
987 pv_info.kernel_rpl = 1;
989 /* We set up all the lguest overrides for sensitive operations. These
990 * are detailed with the operations themselves. */
992 /* interrupt-related operations */
993 pv_irq_ops.init_IRQ = lguest_init_IRQ;
994 pv_irq_ops.save_fl = save_fl;
995 pv_irq_ops.restore_fl = restore_fl;
996 pv_irq_ops.irq_disable = irq_disable;
997 pv_irq_ops.irq_enable = irq_enable;
998 pv_irq_ops.safe_halt = lguest_safe_halt;
1000 /* init-time operations */
1001 pv_init_ops.memory_setup = lguest_memory_setup;
1002 pv_init_ops.patch = lguest_patch;
1004 /* Intercepts of various cpu instructions */
1005 pv_cpu_ops.load_gdt = lguest_load_gdt;
1006 pv_cpu_ops.cpuid = lguest_cpuid;
1007 pv_cpu_ops.load_idt = lguest_load_idt;
1008 pv_cpu_ops.iret = lguest_iret;
1009 pv_cpu_ops.load_sp0 = lguest_load_sp0;
1010 pv_cpu_ops.load_tr_desc = lguest_load_tr_desc;
1011 pv_cpu_ops.set_ldt = lguest_set_ldt;
1012 pv_cpu_ops.load_tls = lguest_load_tls;
1013 pv_cpu_ops.set_debugreg = lguest_set_debugreg;
1014 pv_cpu_ops.clts = lguest_clts;
1015 pv_cpu_ops.read_cr0 = lguest_read_cr0;
1016 pv_cpu_ops.write_cr0 = lguest_write_cr0;
1017 pv_cpu_ops.read_cr4 = lguest_read_cr4;
1018 pv_cpu_ops.write_cr4 = lguest_write_cr4;
1019 pv_cpu_ops.write_gdt_entry = lguest_write_gdt_entry;
1020 pv_cpu_ops.write_idt_entry = lguest_write_idt_entry;
1021 pv_cpu_ops.wbinvd = lguest_wbinvd;
1022 pv_cpu_ops.lazy_mode.enter = paravirt_enter_lazy_cpu;
1023 pv_cpu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1025 /* pagetable management */
1026 pv_mmu_ops.write_cr3 = lguest_write_cr3;
1027 pv_mmu_ops.flush_tlb_user = lguest_flush_tlb_user;
1028 pv_mmu_ops.flush_tlb_single = lguest_flush_tlb_single;
1029 pv_mmu_ops.flush_tlb_kernel = lguest_flush_tlb_kernel;
1030 pv_mmu_ops.set_pte = lguest_set_pte;
1031 pv_mmu_ops.set_pte_at = lguest_set_pte_at;
1032 pv_mmu_ops.set_pmd = lguest_set_pmd;
1033 pv_mmu_ops.read_cr2 = lguest_read_cr2;
1034 pv_mmu_ops.read_cr3 = lguest_read_cr3;
1035 pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
1036 pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mode;
1038 #ifdef CONFIG_X86_LOCAL_APIC
1039 /* apic read/write intercepts */
1040 apic_ops = &lguest_basic_apic_ops;
1043 /* time operations */
1044 pv_time_ops.get_wallclock = lguest_get_wallclock;
1045 pv_time_ops.time_init = lguest_time_init;
1046 pv_time_ops.get_tsc_khz = lguest_tsc_khz;
1048 /* Now is a good time to look at the implementations of these functions
1049 * before returning to the rest of lguest_init(). */
1051 /*G:070 Now we've seen all the paravirt_ops, we return to
1052 * lguest_init() where the rest of the fairly chaotic boot setup
1055 /* The native boot code sets up initial page tables immediately after
1056 * the kernel itself, and sets init_pg_tables_end so they're not
1057 * clobbered. The Launcher places our initial pagetables somewhere at
1058 * the top of our physical memory, so we don't need extra space: set
1059 * init_pg_tables_end to the end of the kernel. */
1060 init_pg_tables_start = __pa(pg0);
1061 init_pg_tables_end = __pa(pg0);
1063 /* As described in head_32.S, we map the first 128M of memory. */
1064 max_pfn_mapped = (128*1024*1024) >> PAGE_SHIFT;
1066 /* Load the %fs segment register (the per-cpu segment register) with
1067 * the normal data segment to get through booting. */
1068 asm volatile ("mov %0, %%fs" : : "r" (__KERNEL_DS) : "memory");
1070 /* The Host<->Guest Switcher lives at the top of our address space, and
1071 * the Host told us how big it is when we made LGUEST_INIT hypercall:
1072 * it put the answer in lguest_data.reserve_mem */
1073 reserve_top_address(lguest_data.reserve_mem);
1075 /* If we don't initialize the lock dependency checker now, it crashes
1076 * paravirt_disable_iospace. */
1079 /* The IDE code spends about 3 seconds probing for disks: if we reserve
1080 * all the I/O ports up front it can't get them and so doesn't probe.
1081 * Other device drivers are similar (but less severe). This cuts the
1082 * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. */
1083 paravirt_disable_iospace();
1085 /* This is messy CPU setup stuff which the native boot code does before
1086 * start_kernel, so we have to do, too: */
1087 cpu_detect(&new_cpu_data);
1088 /* head.S usually sets up the first capability word, so do it here. */
1089 new_cpu_data.x86_capability[0] = cpuid_edx(1);
1091 /* Math is always hard! */
1092 new_cpu_data.hard_math = 1;
1094 /* We don't have features. We have puppies! Puppies! */
1095 #ifdef CONFIG_X86_MCE
1103 /* We set the preferred console to "hvc". This is the "hypervisor
1104 * virtual console" driver written by the PowerPC people, which we also
1105 * adapted for lguest's use. */
1106 add_preferred_console("hvc", 0, NULL);
1108 /* Register our very early console. */
1109 virtio_cons_early_init(early_put_chars);
1111 /* Last of all, we set the power management poweroff hook to point to
1112 * the Guest routine to power off, and the reboot hook to our restart
1114 pm_power_off = lguest_power_off;
1115 machine_ops.restart = lguest_restart;
1117 /* Now we're set up, call i386_start_kernel() in head32.c and we proceed
1118 * to boot as normal. It never returns. */
1119 i386_start_kernel();
1122 * This marks the end of stage II of our journey, The Guest.
1124 * It is now time for us to explore the layer of virtual drivers and complete
1125 * our understanding of the Guest in "make Drivers".