Merge branch 'dma' into devel
[linux-2.6] / arch / x86 / xen / enlighten.c
1 /*
2  * Core of Xen paravirt_ops implementation.
3  *
4  * This file contains the xen_paravirt_ops structure itself, and the
5  * implementations for:
6  * - privileged instructions
7  * - interrupt flags
8  * - segment operations
9  * - booting and setup
10  *
11  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/smp.h>
17 #include <linux/preempt.h>
18 #include <linux/hardirq.h>
19 #include <linux/percpu.h>
20 #include <linux/delay.h>
21 #include <linux/start_kernel.h>
22 #include <linux/sched.h>
23 #include <linux/bootmem.h>
24 #include <linux/module.h>
25 #include <linux/mm.h>
26 #include <linux/page-flags.h>
27 #include <linux/highmem.h>
28 #include <linux/console.h>
29
30 #include <xen/interface/xen.h>
31 #include <xen/interface/version.h>
32 #include <xen/interface/physdev.h>
33 #include <xen/interface/vcpu.h>
34 #include <xen/features.h>
35 #include <xen/page.h>
36 #include <xen/hvc-console.h>
37
38 #include <asm/paravirt.h>
39 #include <asm/apic.h>
40 #include <asm/page.h>
41 #include <asm/xen/hypercall.h>
42 #include <asm/xen/hypervisor.h>
43 #include <asm/fixmap.h>
44 #include <asm/processor.h>
45 #include <asm/msr-index.h>
46 #include <asm/setup.h>
47 #include <asm/desc.h>
48 #include <asm/pgtable.h>
49 #include <asm/tlbflush.h>
50 #include <asm/reboot.h>
51
52 #include "xen-ops.h"
53 #include "mmu.h"
54 #include "multicalls.h"
55
56 EXPORT_SYMBOL_GPL(hypercall_page);
57
58 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
59 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
60
61 enum xen_domain_type xen_domain_type = XEN_NATIVE;
62 EXPORT_SYMBOL_GPL(xen_domain_type);
63
64 /*
65  * Identity map, in addition to plain kernel map.  This needs to be
66  * large enough to allocate page table pages to allocate the rest.
67  * Each page can map 2MB.
68  */
69 static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
70
71 #ifdef CONFIG_X86_64
72 /* l3 pud for userspace vsyscall mapping */
73 static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss;
74 #endif /* CONFIG_X86_64 */
75
76 /*
77  * Note about cr3 (pagetable base) values:
78  *
79  * xen_cr3 contains the current logical cr3 value; it contains the
80  * last set cr3.  This may not be the current effective cr3, because
81  * its update may be being lazily deferred.  However, a vcpu looking
82  * at its own cr3 can use this value knowing that it everything will
83  * be self-consistent.
84  *
85  * xen_current_cr3 contains the actual vcpu cr3; it is set once the
86  * hypercall to set the vcpu cr3 is complete (so it may be a little
87  * out of date, but it will never be set early).  If one vcpu is
88  * looking at another vcpu's cr3 value, it should use this variable.
89  */
90 DEFINE_PER_CPU(unsigned long, xen_cr3);  /* cr3 stored as physaddr */
91 DEFINE_PER_CPU(unsigned long, xen_current_cr3);  /* actual vcpu cr3 */
92
93 struct start_info *xen_start_info;
94 EXPORT_SYMBOL_GPL(xen_start_info);
95
96 struct shared_info xen_dummy_shared_info;
97
98 /*
99  * Point at some empty memory to start with. We map the real shared_info
100  * page as soon as fixmap is up and running.
101  */
102 struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
103
104 /*
105  * Flag to determine whether vcpu info placement is available on all
106  * VCPUs.  We assume it is to start with, and then set it to zero on
107  * the first failure.  This is because it can succeed on some VCPUs
108  * and not others, since it can involve hypervisor memory allocation,
109  * or because the guest failed to guarantee all the appropriate
110  * constraints on all VCPUs (ie buffer can't cross a page boundary).
111  *
112  * Note that any particular CPU may be using a placed vcpu structure,
113  * but we can only optimise if the all are.
114  *
115  * 0: not available, 1: available
116  */
117 static int have_vcpu_info_placement =
118 #ifdef CONFIG_X86_32
119         1
120 #else
121         0
122 #endif
123         ;
124
125
126 static void xen_vcpu_setup(int cpu)
127 {
128         struct vcpu_register_vcpu_info info;
129         int err;
130         struct vcpu_info *vcpup;
131
132         BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
133         per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
134
135         if (!have_vcpu_info_placement)
136                 return;         /* already tested, not available */
137
138         vcpup = &per_cpu(xen_vcpu_info, cpu);
139
140         info.mfn = virt_to_mfn(vcpup);
141         info.offset = offset_in_page(vcpup);
142
143         printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n",
144                cpu, vcpup, info.mfn, info.offset);
145
146         /* Check to see if the hypervisor will put the vcpu_info
147            structure where we want it, which allows direct access via
148            a percpu-variable. */
149         err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
150
151         if (err) {
152                 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
153                 have_vcpu_info_placement = 0;
154         } else {
155                 /* This cpu is using the registered vcpu info, even if
156                    later ones fail to. */
157                 per_cpu(xen_vcpu, cpu) = vcpup;
158
159                 printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n",
160                        cpu, vcpup);
161         }
162 }
163
164 /*
165  * On restore, set the vcpu placement up again.
166  * If it fails, then we're in a bad state, since
167  * we can't back out from using it...
168  */
169 void xen_vcpu_restore(void)
170 {
171         if (have_vcpu_info_placement) {
172                 int cpu;
173
174                 for_each_online_cpu(cpu) {
175                         bool other_cpu = (cpu != smp_processor_id());
176
177                         if (other_cpu &&
178                             HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
179                                 BUG();
180
181                         xen_vcpu_setup(cpu);
182
183                         if (other_cpu &&
184                             HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
185                                 BUG();
186                 }
187
188                 BUG_ON(!have_vcpu_info_placement);
189         }
190 }
191
192 static void __init xen_banner(void)
193 {
194         unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
195         struct xen_extraversion extra;
196         HYPERVISOR_xen_version(XENVER_extraversion, &extra);
197
198         printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
199                pv_info.name);
200         printk(KERN_INFO "Xen version: %d.%d%s%s\n",
201                version >> 16, version & 0xffff, extra.extraversion,
202                xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
203 }
204
205 static void xen_cpuid(unsigned int *ax, unsigned int *bx,
206                       unsigned int *cx, unsigned int *dx)
207 {
208         unsigned maskedx = ~0;
209
210         /*
211          * Mask out inconvenient features, to try and disable as many
212          * unsupported kernel subsystems as possible.
213          */
214         if (*ax == 1)
215                 maskedx = ~((1 << X86_FEATURE_APIC) |  /* disable APIC */
216                             (1 << X86_FEATURE_ACPI) |  /* disable ACPI */
217                             (1 << X86_FEATURE_MCE)  |  /* disable MCE */
218                             (1 << X86_FEATURE_MCA)  |  /* disable MCA */
219                             (1 << X86_FEATURE_ACC));   /* thermal monitoring */
220
221         asm(XEN_EMULATE_PREFIX "cpuid"
222                 : "=a" (*ax),
223                   "=b" (*bx),
224                   "=c" (*cx),
225                   "=d" (*dx)
226                 : "0" (*ax), "2" (*cx));
227         *dx &= maskedx;
228 }
229
230 static void xen_set_debugreg(int reg, unsigned long val)
231 {
232         HYPERVISOR_set_debugreg(reg, val);
233 }
234
235 static unsigned long xen_get_debugreg(int reg)
236 {
237         return HYPERVISOR_get_debugreg(reg);
238 }
239
240 static void xen_leave_lazy(void)
241 {
242         paravirt_leave_lazy(paravirt_get_lazy_mode());
243         xen_mc_flush();
244 }
245
246 static unsigned long xen_store_tr(void)
247 {
248         return 0;
249 }
250
251 /*
252  * Set the page permissions for a particular virtual address.  If the
253  * address is a vmalloc mapping (or other non-linear mapping), then
254  * find the linear mapping of the page and also set its protections to
255  * match.
256  */
257 static void set_aliased_prot(void *v, pgprot_t prot)
258 {
259         int level;
260         pte_t *ptep;
261         pte_t pte;
262         unsigned long pfn;
263         struct page *page;
264
265         ptep = lookup_address((unsigned long)v, &level);
266         BUG_ON(ptep == NULL);
267
268         pfn = pte_pfn(*ptep);
269         page = pfn_to_page(pfn);
270
271         pte = pfn_pte(pfn, prot);
272
273         if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
274                 BUG();
275
276         if (!PageHighMem(page)) {
277                 void *av = __va(PFN_PHYS(pfn));
278
279                 if (av != v)
280                         if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
281                                 BUG();
282         } else
283                 kmap_flush_unused();
284 }
285
286 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
287 {
288         const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
289         int i;
290
291         for(i = 0; i < entries; i += entries_per_page)
292                 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
293 }
294
295 static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
296 {
297         const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
298         int i;
299
300         for(i = 0; i < entries; i += entries_per_page)
301                 set_aliased_prot(ldt + i, PAGE_KERNEL);
302 }
303
304 static void xen_set_ldt(const void *addr, unsigned entries)
305 {
306         struct mmuext_op *op;
307         struct multicall_space mcs = xen_mc_entry(sizeof(*op));
308
309         op = mcs.args;
310         op->cmd = MMUEXT_SET_LDT;
311         op->arg1.linear_addr = (unsigned long)addr;
312         op->arg2.nr_ents = entries;
313
314         MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
315
316         xen_mc_issue(PARAVIRT_LAZY_CPU);
317 }
318
319 static void xen_load_gdt(const struct desc_ptr *dtr)
320 {
321         unsigned long *frames;
322         unsigned long va = dtr->address;
323         unsigned int size = dtr->size + 1;
324         unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
325         int f;
326         struct multicall_space mcs;
327
328         /* A GDT can be up to 64k in size, which corresponds to 8192
329            8-byte entries, or 16 4k pages.. */
330
331         BUG_ON(size > 65536);
332         BUG_ON(va & ~PAGE_MASK);
333
334         mcs = xen_mc_entry(sizeof(*frames) * pages);
335         frames = mcs.args;
336
337         for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
338                 frames[f] = virt_to_mfn(va);
339                 make_lowmem_page_readonly((void *)va);
340         }
341
342         MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct));
343
344         xen_mc_issue(PARAVIRT_LAZY_CPU);
345 }
346
347 static void load_TLS_descriptor(struct thread_struct *t,
348                                 unsigned int cpu, unsigned int i)
349 {
350         struct desc_struct *gdt = get_cpu_gdt_table(cpu);
351         xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
352         struct multicall_space mc = __xen_mc_entry(0);
353
354         MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
355 }
356
357 static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
358 {
359         /*
360          * XXX sleazy hack: If we're being called in a lazy-cpu zone,
361          * it means we're in a context switch, and %gs has just been
362          * saved.  This means we can zero it out to prevent faults on
363          * exit from the hypervisor if the next process has no %gs.
364          * Either way, it has been saved, and the new value will get
365          * loaded properly.  This will go away as soon as Xen has been
366          * modified to not save/restore %gs for normal hypercalls.
367          *
368          * On x86_64, this hack is not used for %gs, because gs points
369          * to KERNEL_GS_BASE (and uses it for PDA references), so we
370          * must not zero %gs on x86_64
371          *
372          * For x86_64, we need to zero %fs, otherwise we may get an
373          * exception between the new %fs descriptor being loaded and
374          * %fs being effectively cleared at __switch_to().
375          */
376         if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
377 #ifdef CONFIG_X86_32
378                 loadsegment(gs, 0);
379 #else
380                 loadsegment(fs, 0);
381 #endif
382         }
383
384         xen_mc_batch();
385
386         load_TLS_descriptor(t, cpu, 0);
387         load_TLS_descriptor(t, cpu, 1);
388         load_TLS_descriptor(t, cpu, 2);
389
390         xen_mc_issue(PARAVIRT_LAZY_CPU);
391 }
392
393 #ifdef CONFIG_X86_64
394 static void xen_load_gs_index(unsigned int idx)
395 {
396         if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
397                 BUG();
398 }
399 #endif
400
401 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
402                                 const void *ptr)
403 {
404         xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
405         u64 entry = *(u64 *)ptr;
406
407         preempt_disable();
408
409         xen_mc_flush();
410         if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
411                 BUG();
412
413         preempt_enable();
414 }
415
416 static int cvt_gate_to_trap(int vector, const gate_desc *val,
417                             struct trap_info *info)
418 {
419         if (val->type != 0xf && val->type != 0xe)
420                 return 0;
421
422         info->vector = vector;
423         info->address = gate_offset(*val);
424         info->cs = gate_segment(*val);
425         info->flags = val->dpl;
426         /* interrupt gates clear IF */
427         if (val->type == 0xe)
428                 info->flags |= 4;
429
430         return 1;
431 }
432
433 /* Locations of each CPU's IDT */
434 static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
435
436 /* Set an IDT entry.  If the entry is part of the current IDT, then
437    also update Xen. */
438 static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
439 {
440         unsigned long p = (unsigned long)&dt[entrynum];
441         unsigned long start, end;
442
443         preempt_disable();
444
445         start = __get_cpu_var(idt_desc).address;
446         end = start + __get_cpu_var(idt_desc).size + 1;
447
448         xen_mc_flush();
449
450         native_write_idt_entry(dt, entrynum, g);
451
452         if (p >= start && (p + 8) <= end) {
453                 struct trap_info info[2];
454
455                 info[1].address = 0;
456
457                 if (cvt_gate_to_trap(entrynum, g, &info[0]))
458                         if (HYPERVISOR_set_trap_table(info))
459                                 BUG();
460         }
461
462         preempt_enable();
463 }
464
465 static void xen_convert_trap_info(const struct desc_ptr *desc,
466                                   struct trap_info *traps)
467 {
468         unsigned in, out, count;
469
470         count = (desc->size+1) / sizeof(gate_desc);
471         BUG_ON(count > 256);
472
473         for (in = out = 0; in < count; in++) {
474                 gate_desc *entry = (gate_desc*)(desc->address) + in;
475
476                 if (cvt_gate_to_trap(in, entry, &traps[out]))
477                         out++;
478         }
479         traps[out].address = 0;
480 }
481
482 void xen_copy_trap_info(struct trap_info *traps)
483 {
484         const struct desc_ptr *desc = &__get_cpu_var(idt_desc);
485
486         xen_convert_trap_info(desc, traps);
487 }
488
489 /* Load a new IDT into Xen.  In principle this can be per-CPU, so we
490    hold a spinlock to protect the static traps[] array (static because
491    it avoids allocation, and saves stack space). */
492 static void xen_load_idt(const struct desc_ptr *desc)
493 {
494         static DEFINE_SPINLOCK(lock);
495         static struct trap_info traps[257];
496
497         spin_lock(&lock);
498
499         __get_cpu_var(idt_desc) = *desc;
500
501         xen_convert_trap_info(desc, traps);
502
503         xen_mc_flush();
504         if (HYPERVISOR_set_trap_table(traps))
505                 BUG();
506
507         spin_unlock(&lock);
508 }
509
510 /* Write a GDT descriptor entry.  Ignore LDT descriptors, since
511    they're handled differently. */
512 static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
513                                 const void *desc, int type)
514 {
515         preempt_disable();
516
517         switch (type) {
518         case DESC_LDT:
519         case DESC_TSS:
520                 /* ignore */
521                 break;
522
523         default: {
524                 xmaddr_t maddr = virt_to_machine(&dt[entry]);
525
526                 xen_mc_flush();
527                 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
528                         BUG();
529         }
530
531         }
532
533         preempt_enable();
534 }
535
536 static void xen_load_sp0(struct tss_struct *tss,
537                          struct thread_struct *thread)
538 {
539         struct multicall_space mcs = xen_mc_entry(0);
540         MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
541         xen_mc_issue(PARAVIRT_LAZY_CPU);
542 }
543
544 static void xen_set_iopl_mask(unsigned mask)
545 {
546         struct physdev_set_iopl set_iopl;
547
548         /* Force the change at ring 0. */
549         set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
550         HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
551 }
552
553 static void xen_io_delay(void)
554 {
555 }
556
557 #ifdef CONFIG_X86_LOCAL_APIC
558 static u32 xen_apic_read(u32 reg)
559 {
560         return 0;
561 }
562
563 static void xen_apic_write(u32 reg, u32 val)
564 {
565         /* Warn to see if there's any stray references */
566         WARN_ON(1);
567 }
568
569 static u64 xen_apic_icr_read(void)
570 {
571         return 0;
572 }
573
574 static void xen_apic_icr_write(u32 low, u32 id)
575 {
576         /* Warn to see if there's any stray references */
577         WARN_ON(1);
578 }
579
580 static void xen_apic_wait_icr_idle(void)
581 {
582         return;
583 }
584
585 static u32 xen_safe_apic_wait_icr_idle(void)
586 {
587         return 0;
588 }
589
590 static struct apic_ops xen_basic_apic_ops = {
591         .read = xen_apic_read,
592         .write = xen_apic_write,
593         .icr_read = xen_apic_icr_read,
594         .icr_write = xen_apic_icr_write,
595         .wait_icr_idle = xen_apic_wait_icr_idle,
596         .safe_wait_icr_idle = xen_safe_apic_wait_icr_idle,
597 };
598
599 #endif
600
601 static void xen_flush_tlb(void)
602 {
603         struct mmuext_op *op;
604         struct multicall_space mcs;
605
606         preempt_disable();
607
608         mcs = xen_mc_entry(sizeof(*op));
609
610         op = mcs.args;
611         op->cmd = MMUEXT_TLB_FLUSH_LOCAL;
612         MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
613
614         xen_mc_issue(PARAVIRT_LAZY_MMU);
615
616         preempt_enable();
617 }
618
619 static void xen_flush_tlb_single(unsigned long addr)
620 {
621         struct mmuext_op *op;
622         struct multicall_space mcs;
623
624         preempt_disable();
625
626         mcs = xen_mc_entry(sizeof(*op));
627         op = mcs.args;
628         op->cmd = MMUEXT_INVLPG_LOCAL;
629         op->arg1.linear_addr = addr & PAGE_MASK;
630         MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
631
632         xen_mc_issue(PARAVIRT_LAZY_MMU);
633
634         preempt_enable();
635 }
636
637 static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm,
638                                  unsigned long va)
639 {
640         struct {
641                 struct mmuext_op op;
642                 cpumask_t mask;
643         } *args;
644         cpumask_t cpumask = *cpus;
645         struct multicall_space mcs;
646
647         /*
648          * A couple of (to be removed) sanity checks:
649          *
650          * - current CPU must not be in mask
651          * - mask must exist :)
652          */
653         BUG_ON(cpus_empty(cpumask));
654         BUG_ON(cpu_isset(smp_processor_id(), cpumask));
655         BUG_ON(!mm);
656
657         /* If a CPU which we ran on has gone down, OK. */
658         cpus_and(cpumask, cpumask, cpu_online_map);
659         if (cpus_empty(cpumask))
660                 return;
661
662         mcs = xen_mc_entry(sizeof(*args));
663         args = mcs.args;
664         args->mask = cpumask;
665         args->op.arg2.vcpumask = &args->mask;
666
667         if (va == TLB_FLUSH_ALL) {
668                 args->op.cmd = MMUEXT_TLB_FLUSH_MULTI;
669         } else {
670                 args->op.cmd = MMUEXT_INVLPG_MULTI;
671                 args->op.arg1.linear_addr = va;
672         }
673
674         MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF);
675
676         xen_mc_issue(PARAVIRT_LAZY_MMU);
677 }
678
679 static void xen_clts(void)
680 {
681         struct multicall_space mcs;
682
683         mcs = xen_mc_entry(0);
684
685         MULTI_fpu_taskswitch(mcs.mc, 0);
686
687         xen_mc_issue(PARAVIRT_LAZY_CPU);
688 }
689
690 static void xen_write_cr0(unsigned long cr0)
691 {
692         struct multicall_space mcs;
693
694         /* Only pay attention to cr0.TS; everything else is
695            ignored. */
696         mcs = xen_mc_entry(0);
697
698         MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
699
700         xen_mc_issue(PARAVIRT_LAZY_CPU);
701 }
702
703 static void xen_write_cr2(unsigned long cr2)
704 {
705         x86_read_percpu(xen_vcpu)->arch.cr2 = cr2;
706 }
707
708 static unsigned long xen_read_cr2(void)
709 {
710         return x86_read_percpu(xen_vcpu)->arch.cr2;
711 }
712
713 static unsigned long xen_read_cr2_direct(void)
714 {
715         return x86_read_percpu(xen_vcpu_info.arch.cr2);
716 }
717
718 static void xen_write_cr4(unsigned long cr4)
719 {
720         cr4 &= ~X86_CR4_PGE;
721         cr4 &= ~X86_CR4_PSE;
722
723         native_write_cr4(cr4);
724 }
725
726 static unsigned long xen_read_cr3(void)
727 {
728         return x86_read_percpu(xen_cr3);
729 }
730
731 static void set_current_cr3(void *v)
732 {
733         x86_write_percpu(xen_current_cr3, (unsigned long)v);
734 }
735
736 static void __xen_write_cr3(bool kernel, unsigned long cr3)
737 {
738         struct mmuext_op *op;
739         struct multicall_space mcs;
740         unsigned long mfn;
741
742         if (cr3)
743                 mfn = pfn_to_mfn(PFN_DOWN(cr3));
744         else
745                 mfn = 0;
746
747         WARN_ON(mfn == 0 && kernel);
748
749         mcs = __xen_mc_entry(sizeof(*op));
750
751         op = mcs.args;
752         op->cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR;
753         op->arg1.mfn = mfn;
754
755         MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
756
757         if (kernel) {
758                 x86_write_percpu(xen_cr3, cr3);
759
760                 /* Update xen_current_cr3 once the batch has actually
761                    been submitted. */
762                 xen_mc_callback(set_current_cr3, (void *)cr3);
763         }
764 }
765
766 static void xen_write_cr3(unsigned long cr3)
767 {
768         BUG_ON(preemptible());
769
770         xen_mc_batch();  /* disables interrupts */
771
772         /* Update while interrupts are disabled, so its atomic with
773            respect to ipis */
774         x86_write_percpu(xen_cr3, cr3);
775
776         __xen_write_cr3(true, cr3);
777
778 #ifdef CONFIG_X86_64
779         {
780                 pgd_t *user_pgd = xen_get_user_pgd(__va(cr3));
781                 if (user_pgd)
782                         __xen_write_cr3(false, __pa(user_pgd));
783                 else
784                         __xen_write_cr3(false, 0);
785         }
786 #endif
787
788         xen_mc_issue(PARAVIRT_LAZY_CPU);  /* interrupts restored */
789 }
790
791 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
792 {
793         int ret;
794
795         ret = 0;
796
797         switch (msr) {
798 #ifdef CONFIG_X86_64
799                 unsigned which;
800                 u64 base;
801
802         case MSR_FS_BASE:               which = SEGBASE_FS; goto set;
803         case MSR_KERNEL_GS_BASE:        which = SEGBASE_GS_USER; goto set;
804         case MSR_GS_BASE:               which = SEGBASE_GS_KERNEL; goto set;
805
806         set:
807                 base = ((u64)high << 32) | low;
808                 if (HYPERVISOR_set_segment_base(which, base) != 0)
809                         ret = -EFAULT;
810                 break;
811 #endif
812
813         case MSR_STAR:
814         case MSR_CSTAR:
815         case MSR_LSTAR:
816         case MSR_SYSCALL_MASK:
817         case MSR_IA32_SYSENTER_CS:
818         case MSR_IA32_SYSENTER_ESP:
819         case MSR_IA32_SYSENTER_EIP:
820                 /* Fast syscall setup is all done in hypercalls, so
821                    these are all ignored.  Stub them out here to stop
822                    Xen console noise. */
823                 break;
824
825         default:
826                 ret = native_write_msr_safe(msr, low, high);
827         }
828
829         return ret;
830 }
831
832 /* Early in boot, while setting up the initial pagetable, assume
833    everything is pinned. */
834 static __init void xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn)
835 {
836 #ifdef CONFIG_FLATMEM
837         BUG_ON(mem_map);        /* should only be used early */
838 #endif
839         make_lowmem_page_readonly(__va(PFN_PHYS(pfn)));
840 }
841
842 /* Early release_pte assumes that all pts are pinned, since there's
843    only init_mm and anything attached to that is pinned. */
844 static void xen_release_pte_init(unsigned long pfn)
845 {
846         make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
847 }
848
849 static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn)
850 {
851         struct mmuext_op op;
852         op.cmd = cmd;
853         op.arg1.mfn = pfn_to_mfn(pfn);
854         if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF))
855                 BUG();
856 }
857
858 /* This needs to make sure the new pte page is pinned iff its being
859    attached to a pinned pagetable. */
860 static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned level)
861 {
862         struct page *page = pfn_to_page(pfn);
863
864         if (PagePinned(virt_to_page(mm->pgd))) {
865                 SetPagePinned(page);
866
867                 vm_unmap_aliases();
868                 if (!PageHighMem(page)) {
869                         make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
870                         if (level == PT_PTE && USE_SPLIT_PTLOCKS)
871                                 pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn);
872                 } else {
873                         /* make sure there are no stray mappings of
874                            this page */
875                         kmap_flush_unused();
876                 }
877         }
878 }
879
880 static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn)
881 {
882         xen_alloc_ptpage(mm, pfn, PT_PTE);
883 }
884
885 static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
886 {
887         xen_alloc_ptpage(mm, pfn, PT_PMD);
888 }
889
890 static int xen_pgd_alloc(struct mm_struct *mm)
891 {
892         pgd_t *pgd = mm->pgd;
893         int ret = 0;
894
895         BUG_ON(PagePinned(virt_to_page(pgd)));
896
897 #ifdef CONFIG_X86_64
898         {
899                 struct page *page = virt_to_page(pgd);
900                 pgd_t *user_pgd;
901
902                 BUG_ON(page->private != 0);
903
904                 ret = -ENOMEM;
905
906                 user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
907                 page->private = (unsigned long)user_pgd;
908
909                 if (user_pgd != NULL) {
910                         user_pgd[pgd_index(VSYSCALL_START)] =
911                                 __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE);
912                         ret = 0;
913                 }
914
915                 BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd))));
916         }
917 #endif
918
919         return ret;
920 }
921
922 static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
923 {
924 #ifdef CONFIG_X86_64
925         pgd_t *user_pgd = xen_get_user_pgd(pgd);
926
927         if (user_pgd)
928                 free_page((unsigned long)user_pgd);
929 #endif
930 }
931
932 /* This should never happen until we're OK to use struct page */
933 static void xen_release_ptpage(unsigned long pfn, unsigned level)
934 {
935         struct page *page = pfn_to_page(pfn);
936
937         if (PagePinned(page)) {
938                 if (!PageHighMem(page)) {
939                         if (level == PT_PTE && USE_SPLIT_PTLOCKS)
940                                 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn);
941                         make_lowmem_page_readwrite(__va(PFN_PHYS(pfn)));
942                 }
943                 ClearPagePinned(page);
944         }
945 }
946
947 static void xen_release_pte(unsigned long pfn)
948 {
949         xen_release_ptpage(pfn, PT_PTE);
950 }
951
952 static void xen_release_pmd(unsigned long pfn)
953 {
954         xen_release_ptpage(pfn, PT_PMD);
955 }
956
957 #if PAGETABLE_LEVELS == 4
958 static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn)
959 {
960         xen_alloc_ptpage(mm, pfn, PT_PUD);
961 }
962
963 static void xen_release_pud(unsigned long pfn)
964 {
965         xen_release_ptpage(pfn, PT_PUD);
966 }
967 #endif
968
969 #ifdef CONFIG_HIGHPTE
970 static void *xen_kmap_atomic_pte(struct page *page, enum km_type type)
971 {
972         pgprot_t prot = PAGE_KERNEL;
973
974         if (PagePinned(page))
975                 prot = PAGE_KERNEL_RO;
976
977         if (0 && PageHighMem(page))
978                 printk("mapping highpte %lx type %d prot %s\n",
979                        page_to_pfn(page), type,
980                        (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ");
981
982         return kmap_atomic_prot(page, type, prot);
983 }
984 #endif
985
986 #ifdef CONFIG_X86_32
987 static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
988 {
989         /* If there's an existing pte, then don't allow _PAGE_RW to be set */
990         if (pte_val_ma(*ptep) & _PAGE_PRESENT)
991                 pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
992                                pte_val_ma(pte));
993
994         return pte;
995 }
996
997 /* Init-time set_pte while constructing initial pagetables, which
998    doesn't allow RO pagetable pages to be remapped RW */
999 static __init void xen_set_pte_init(pte_t *ptep, pte_t pte)
1000 {
1001         pte = mask_rw_pte(ptep, pte);
1002
1003         xen_set_pte(ptep, pte);
1004 }
1005 #endif
1006
1007 static __init void xen_pagetable_setup_start(pgd_t *base)
1008 {
1009 }
1010
1011 void xen_setup_shared_info(void)
1012 {
1013         if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1014                 set_fixmap(FIX_PARAVIRT_BOOTMAP,
1015                            xen_start_info->shared_info);
1016
1017                 HYPERVISOR_shared_info =
1018                         (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1019         } else
1020                 HYPERVISOR_shared_info =
1021                         (struct shared_info *)__va(xen_start_info->shared_info);
1022
1023 #ifndef CONFIG_SMP
1024         /* In UP this is as good a place as any to set up shared info */
1025         xen_setup_vcpu_info_placement();
1026 #endif
1027
1028         xen_setup_mfn_list_list();
1029 }
1030
1031 static __init void xen_pagetable_setup_done(pgd_t *base)
1032 {
1033         xen_setup_shared_info();
1034 }
1035
1036 static __init void xen_post_allocator_init(void)
1037 {
1038         pv_mmu_ops.set_pte = xen_set_pte;
1039         pv_mmu_ops.set_pmd = xen_set_pmd;
1040         pv_mmu_ops.set_pud = xen_set_pud;
1041 #if PAGETABLE_LEVELS == 4
1042         pv_mmu_ops.set_pgd = xen_set_pgd;
1043 #endif
1044
1045         /* This will work as long as patching hasn't happened yet
1046            (which it hasn't) */
1047         pv_mmu_ops.alloc_pte = xen_alloc_pte;
1048         pv_mmu_ops.alloc_pmd = xen_alloc_pmd;
1049         pv_mmu_ops.release_pte = xen_release_pte;
1050         pv_mmu_ops.release_pmd = xen_release_pmd;
1051 #if PAGETABLE_LEVELS == 4
1052         pv_mmu_ops.alloc_pud = xen_alloc_pud;
1053         pv_mmu_ops.release_pud = xen_release_pud;
1054 #endif
1055
1056 #ifdef CONFIG_X86_64
1057         SetPagePinned(virt_to_page(level3_user_vsyscall));
1058 #endif
1059         xen_mark_init_mm_pinned();
1060 }
1061
1062 /* This is called once we have the cpu_possible_map */
1063 void xen_setup_vcpu_info_placement(void)
1064 {
1065         int cpu;
1066
1067         for_each_possible_cpu(cpu)
1068                 xen_vcpu_setup(cpu);
1069
1070         /* xen_vcpu_setup managed to place the vcpu_info within the
1071            percpu area for all cpus, so make use of it */
1072         if (have_vcpu_info_placement) {
1073                 printk(KERN_INFO "Xen: using vcpu_info placement\n");
1074
1075                 pv_irq_ops.save_fl = xen_save_fl_direct;
1076                 pv_irq_ops.restore_fl = xen_restore_fl_direct;
1077                 pv_irq_ops.irq_disable = xen_irq_disable_direct;
1078                 pv_irq_ops.irq_enable = xen_irq_enable_direct;
1079                 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1080         }
1081 }
1082
1083 static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1084                           unsigned long addr, unsigned len)
1085 {
1086         char *start, *end, *reloc;
1087         unsigned ret;
1088
1089         start = end = reloc = NULL;
1090
1091 #define SITE(op, x)                                                     \
1092         case PARAVIRT_PATCH(op.x):                                      \
1093         if (have_vcpu_info_placement) {                                 \
1094                 start = (char *)xen_##x##_direct;                       \
1095                 end = xen_##x##_direct_end;                             \
1096                 reloc = xen_##x##_direct_reloc;                         \
1097         }                                                               \
1098         goto patch_site
1099
1100         switch (type) {
1101                 SITE(pv_irq_ops, irq_enable);
1102                 SITE(pv_irq_ops, irq_disable);
1103                 SITE(pv_irq_ops, save_fl);
1104                 SITE(pv_irq_ops, restore_fl);
1105 #undef SITE
1106
1107         patch_site:
1108                 if (start == NULL || (end-start) > len)
1109                         goto default_patch;
1110
1111                 ret = paravirt_patch_insns(insnbuf, len, start, end);
1112
1113                 /* Note: because reloc is assigned from something that
1114                    appears to be an array, gcc assumes it's non-null,
1115                    but doesn't know its relationship with start and
1116                    end. */
1117                 if (reloc > start && reloc < end) {
1118                         int reloc_off = reloc - start;
1119                         long *relocp = (long *)(insnbuf + reloc_off);
1120                         long delta = start - (char *)addr;
1121
1122                         *relocp += delta;
1123                 }
1124                 break;
1125
1126         default_patch:
1127         default:
1128                 ret = paravirt_patch_default(type, clobbers, insnbuf,
1129                                              addr, len);
1130                 break;
1131         }
1132
1133         return ret;
1134 }
1135
1136 static void xen_set_fixmap(unsigned idx, unsigned long phys, pgprot_t prot)
1137 {
1138         pte_t pte;
1139
1140         phys >>= PAGE_SHIFT;
1141
1142         switch (idx) {
1143         case FIX_BTMAP_END ... FIX_BTMAP_BEGIN:
1144 #ifdef CONFIG_X86_F00F_BUG
1145         case FIX_F00F_IDT:
1146 #endif
1147 #ifdef CONFIG_X86_32
1148         case FIX_WP_TEST:
1149         case FIX_VDSO:
1150 # ifdef CONFIG_HIGHMEM
1151         case FIX_KMAP_BEGIN ... FIX_KMAP_END:
1152 # endif
1153 #else
1154         case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
1155 #endif
1156 #ifdef CONFIG_X86_LOCAL_APIC
1157         case FIX_APIC_BASE:     /* maps dummy local APIC */
1158 #endif
1159                 pte = pfn_pte(phys, prot);
1160                 break;
1161
1162         default:
1163                 pte = mfn_pte(phys, prot);
1164                 break;
1165         }
1166
1167         __native_set_fixmap(idx, pte);
1168
1169 #ifdef CONFIG_X86_64
1170         /* Replicate changes to map the vsyscall page into the user
1171            pagetable vsyscall mapping. */
1172         if (idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) {
1173                 unsigned long vaddr = __fix_to_virt(idx);
1174                 set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte);
1175         }
1176 #endif
1177 }
1178
1179 static const struct pv_info xen_info __initdata = {
1180         .paravirt_enabled = 1,
1181         .shared_kernel_pmd = 0,
1182
1183         .name = "Xen",
1184 };
1185
1186 static const struct pv_init_ops xen_init_ops __initdata = {
1187         .patch = xen_patch,
1188
1189         .banner = xen_banner,
1190         .memory_setup = xen_memory_setup,
1191         .arch_setup = xen_arch_setup,
1192         .post_allocator_init = xen_post_allocator_init,
1193 };
1194
1195 static const struct pv_time_ops xen_time_ops __initdata = {
1196         .time_init = xen_time_init,
1197
1198         .set_wallclock = xen_set_wallclock,
1199         .get_wallclock = xen_get_wallclock,
1200         .get_tsc_khz = xen_tsc_khz,
1201         .sched_clock = xen_sched_clock,
1202 };
1203
1204 static const struct pv_cpu_ops xen_cpu_ops __initdata = {
1205         .cpuid = xen_cpuid,
1206
1207         .set_debugreg = xen_set_debugreg,
1208         .get_debugreg = xen_get_debugreg,
1209
1210         .clts = xen_clts,
1211
1212         .read_cr0 = native_read_cr0,
1213         .write_cr0 = xen_write_cr0,
1214
1215         .read_cr4 = native_read_cr4,
1216         .read_cr4_safe = native_read_cr4_safe,
1217         .write_cr4 = xen_write_cr4,
1218
1219         .wbinvd = native_wbinvd,
1220
1221         .read_msr = native_read_msr_safe,
1222         .write_msr = xen_write_msr_safe,
1223         .read_tsc = native_read_tsc,
1224         .read_pmc = native_read_pmc,
1225
1226         .iret = xen_iret,
1227         .irq_enable_sysexit = xen_sysexit,
1228 #ifdef CONFIG_X86_64
1229         .usergs_sysret32 = xen_sysret32,
1230         .usergs_sysret64 = xen_sysret64,
1231 #endif
1232
1233         .load_tr_desc = paravirt_nop,
1234         .set_ldt = xen_set_ldt,
1235         .load_gdt = xen_load_gdt,
1236         .load_idt = xen_load_idt,
1237         .load_tls = xen_load_tls,
1238 #ifdef CONFIG_X86_64
1239         .load_gs_index = xen_load_gs_index,
1240 #endif
1241
1242         .alloc_ldt = xen_alloc_ldt,
1243         .free_ldt = xen_free_ldt,
1244
1245         .store_gdt = native_store_gdt,
1246         .store_idt = native_store_idt,
1247         .store_tr = xen_store_tr,
1248
1249         .write_ldt_entry = xen_write_ldt_entry,
1250         .write_gdt_entry = xen_write_gdt_entry,
1251         .write_idt_entry = xen_write_idt_entry,
1252         .load_sp0 = xen_load_sp0,
1253
1254         .set_iopl_mask = xen_set_iopl_mask,
1255         .io_delay = xen_io_delay,
1256
1257         /* Xen takes care of %gs when switching to usermode for us */
1258         .swapgs = paravirt_nop,
1259
1260         .lazy_mode = {
1261                 .enter = paravirt_enter_lazy_cpu,
1262                 .leave = xen_leave_lazy,
1263         },
1264 };
1265
1266 static const struct pv_apic_ops xen_apic_ops __initdata = {
1267 #ifdef CONFIG_X86_LOCAL_APIC
1268         .setup_boot_clock = paravirt_nop,
1269         .setup_secondary_clock = paravirt_nop,
1270         .startup_ipi_hook = paravirt_nop,
1271 #endif
1272 };
1273
1274 static const struct pv_mmu_ops xen_mmu_ops __initdata = {
1275         .pagetable_setup_start = xen_pagetable_setup_start,
1276         .pagetable_setup_done = xen_pagetable_setup_done,
1277
1278         .read_cr2 = xen_read_cr2,
1279         .write_cr2 = xen_write_cr2,
1280
1281         .read_cr3 = xen_read_cr3,
1282         .write_cr3 = xen_write_cr3,
1283
1284         .flush_tlb_user = xen_flush_tlb,
1285         .flush_tlb_kernel = xen_flush_tlb,
1286         .flush_tlb_single = xen_flush_tlb_single,
1287         .flush_tlb_others = xen_flush_tlb_others,
1288
1289         .pte_update = paravirt_nop,
1290         .pte_update_defer = paravirt_nop,
1291
1292         .pgd_alloc = xen_pgd_alloc,
1293         .pgd_free = xen_pgd_free,
1294
1295         .alloc_pte = xen_alloc_pte_init,
1296         .release_pte = xen_release_pte_init,
1297         .alloc_pmd = xen_alloc_pte_init,
1298         .alloc_pmd_clone = paravirt_nop,
1299         .release_pmd = xen_release_pte_init,
1300
1301 #ifdef CONFIG_HIGHPTE
1302         .kmap_atomic_pte = xen_kmap_atomic_pte,
1303 #endif
1304
1305 #ifdef CONFIG_X86_64
1306         .set_pte = xen_set_pte,
1307 #else
1308         .set_pte = xen_set_pte_init,
1309 #endif
1310         .set_pte_at = xen_set_pte_at,
1311         .set_pmd = xen_set_pmd_hyper,
1312
1313         .ptep_modify_prot_start = __ptep_modify_prot_start,
1314         .ptep_modify_prot_commit = __ptep_modify_prot_commit,
1315
1316         .pte_val = xen_pte_val,
1317         .pte_flags = native_pte_flags,
1318         .pgd_val = xen_pgd_val,
1319
1320         .make_pte = xen_make_pte,
1321         .make_pgd = xen_make_pgd,
1322
1323 #ifdef CONFIG_X86_PAE
1324         .set_pte_atomic = xen_set_pte_atomic,
1325         .set_pte_present = xen_set_pte_at,
1326         .pte_clear = xen_pte_clear,
1327         .pmd_clear = xen_pmd_clear,
1328 #endif  /* CONFIG_X86_PAE */
1329         .set_pud = xen_set_pud_hyper,
1330
1331         .make_pmd = xen_make_pmd,
1332         .pmd_val = xen_pmd_val,
1333
1334 #if PAGETABLE_LEVELS == 4
1335         .pud_val = xen_pud_val,
1336         .make_pud = xen_make_pud,
1337         .set_pgd = xen_set_pgd_hyper,
1338
1339         .alloc_pud = xen_alloc_pte_init,
1340         .release_pud = xen_release_pte_init,
1341 #endif  /* PAGETABLE_LEVELS == 4 */
1342
1343         .activate_mm = xen_activate_mm,
1344         .dup_mmap = xen_dup_mmap,
1345         .exit_mmap = xen_exit_mmap,
1346
1347         .lazy_mode = {
1348                 .enter = paravirt_enter_lazy_mmu,
1349                 .leave = xen_leave_lazy,
1350         },
1351
1352         .set_fixmap = xen_set_fixmap,
1353 };
1354
1355 static void xen_reboot(int reason)
1356 {
1357         struct sched_shutdown r = { .reason = reason };
1358
1359 #ifdef CONFIG_SMP
1360         smp_send_stop();
1361 #endif
1362
1363         if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1364                 BUG();
1365 }
1366
1367 static void xen_restart(char *msg)
1368 {
1369         xen_reboot(SHUTDOWN_reboot);
1370 }
1371
1372 static void xen_emergency_restart(void)
1373 {
1374         xen_reboot(SHUTDOWN_reboot);
1375 }
1376
1377 static void xen_machine_halt(void)
1378 {
1379         xen_reboot(SHUTDOWN_poweroff);
1380 }
1381
1382 static void xen_crash_shutdown(struct pt_regs *regs)
1383 {
1384         xen_reboot(SHUTDOWN_crash);
1385 }
1386
1387 static const struct machine_ops __initdata xen_machine_ops = {
1388         .restart = xen_restart,
1389         .halt = xen_machine_halt,
1390         .power_off = xen_machine_halt,
1391         .shutdown = xen_machine_halt,
1392         .crash_shutdown = xen_crash_shutdown,
1393         .emergency_restart = xen_emergency_restart,
1394 };
1395
1396
1397 static void __init xen_reserve_top(void)
1398 {
1399 #ifdef CONFIG_X86_32
1400         unsigned long top = HYPERVISOR_VIRT_START;
1401         struct xen_platform_parameters pp;
1402
1403         if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
1404                 top = pp.virt_start;
1405
1406         reserve_top_address(-top);
1407 #endif  /* CONFIG_X86_32 */
1408 }
1409
1410 /*
1411  * Like __va(), but returns address in the kernel mapping (which is
1412  * all we have until the physical memory mapping has been set up.
1413  */
1414 static void *__ka(phys_addr_t paddr)
1415 {
1416 #ifdef CONFIG_X86_64
1417         return (void *)(paddr + __START_KERNEL_map);
1418 #else
1419         return __va(paddr);
1420 #endif
1421 }
1422
1423 /* Convert a machine address to physical address */
1424 static unsigned long m2p(phys_addr_t maddr)
1425 {
1426         phys_addr_t paddr;
1427
1428         maddr &= PTE_PFN_MASK;
1429         paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT;
1430
1431         return paddr;
1432 }
1433
1434 /* Convert a machine address to kernel virtual */
1435 static void *m2v(phys_addr_t maddr)
1436 {
1437         return __ka(m2p(maddr));
1438 }
1439
1440 static void set_page_prot(void *addr, pgprot_t prot)
1441 {
1442         unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
1443         pte_t pte = pfn_pte(pfn, prot);
1444
1445         if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
1446                 BUG();
1447 }
1448
1449 static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1450 {
1451         unsigned pmdidx, pteidx;
1452         unsigned ident_pte;
1453         unsigned long pfn;
1454
1455         ident_pte = 0;
1456         pfn = 0;
1457         for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
1458                 pte_t *pte_page;
1459
1460                 /* Reuse or allocate a page of ptes */
1461                 if (pmd_present(pmd[pmdidx]))
1462                         pte_page = m2v(pmd[pmdidx].pmd);
1463                 else {
1464                         /* Check for free pte pages */
1465                         if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
1466                                 break;
1467
1468                         pte_page = &level1_ident_pgt[ident_pte];
1469                         ident_pte += PTRS_PER_PTE;
1470
1471                         pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE);
1472                 }
1473
1474                 /* Install mappings */
1475                 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1476                         pte_t pte;
1477
1478                         if (pfn > max_pfn_mapped)
1479                                 max_pfn_mapped = pfn;
1480
1481                         if (!pte_none(pte_page[pteidx]))
1482                                 continue;
1483
1484                         pte = pfn_pte(pfn, PAGE_KERNEL_EXEC);
1485                         pte_page[pteidx] = pte;
1486                 }
1487         }
1488
1489         for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE)
1490                 set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO);
1491
1492         set_page_prot(pmd, PAGE_KERNEL_RO);
1493 }
1494
1495 #ifdef CONFIG_X86_64
1496 static void convert_pfn_mfn(void *v)
1497 {
1498         pte_t *pte = v;
1499         int i;
1500
1501         /* All levels are converted the same way, so just treat them
1502            as ptes. */
1503         for (i = 0; i < PTRS_PER_PTE; i++)
1504                 pte[i] = xen_make_pte(pte[i].pte);
1505 }
1506
1507 /*
1508  * Set up the inital kernel pagetable.
1509  *
1510  * We can construct this by grafting the Xen provided pagetable into
1511  * head_64.S's preconstructed pagetables.  We copy the Xen L2's into
1512  * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt.  This
1513  * means that only the kernel has a physical mapping to start with -
1514  * but that's enough to get __va working.  We need to fill in the rest
1515  * of the physical mapping once some sort of allocator has been set
1516  * up.
1517  */
1518 static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1519                                                 unsigned long max_pfn)
1520 {
1521         pud_t *l3;
1522         pmd_t *l2;
1523
1524         /* Zap identity mapping */
1525         init_level4_pgt[0] = __pgd(0);
1526
1527         /* Pre-constructed entries are in pfn, so convert to mfn */
1528         convert_pfn_mfn(init_level4_pgt);
1529         convert_pfn_mfn(level3_ident_pgt);
1530         convert_pfn_mfn(level3_kernel_pgt);
1531
1532         l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
1533         l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
1534
1535         memcpy(level2_ident_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1536         memcpy(level2_kernel_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1537
1538         l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd);
1539         l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud);
1540         memcpy(level2_fixmap_pgt, l2, sizeof(pmd_t) * PTRS_PER_PMD);
1541
1542         /* Set up identity map */
1543         xen_map_identity_early(level2_ident_pgt, max_pfn);
1544
1545         /* Make pagetable pieces RO */
1546         set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
1547         set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
1548         set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
1549         set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
1550         set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1551         set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
1552
1553         /* Pin down new L4 */
1554         pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
1555                           PFN_DOWN(__pa_symbol(init_level4_pgt)));
1556
1557         /* Unpin Xen-provided one */
1558         pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1559
1560         /* Switch over */
1561         pgd = init_level4_pgt;
1562
1563         /*
1564          * At this stage there can be no user pgd, and no page
1565          * structure to attach it to, so make sure we just set kernel
1566          * pgd.
1567          */
1568         xen_mc_batch();
1569         __xen_write_cr3(true, __pa(pgd));
1570         xen_mc_issue(PARAVIRT_LAZY_CPU);
1571
1572         reserve_early(__pa(xen_start_info->pt_base),
1573                       __pa(xen_start_info->pt_base +
1574                            xen_start_info->nr_pt_frames * PAGE_SIZE),
1575                       "XEN PAGETABLES");
1576
1577         return pgd;
1578 }
1579 #else   /* !CONFIG_X86_64 */
1580 static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
1581
1582 static __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
1583                                                 unsigned long max_pfn)
1584 {
1585         pmd_t *kernel_pmd;
1586
1587         init_pg_tables_start = __pa(pgd);
1588         init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE;
1589         max_pfn_mapped = PFN_DOWN(init_pg_tables_end + 512*1024);
1590
1591         kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1592         memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
1593
1594         xen_map_identity_early(level2_kernel_pgt, max_pfn);
1595
1596         memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
1597         set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY],
1598                         __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT));
1599
1600         set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
1601         set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
1602         set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
1603
1604         pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
1605
1606         xen_write_cr3(__pa(swapper_pg_dir));
1607
1608         pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir)));
1609
1610         return swapper_pg_dir;
1611 }
1612 #endif  /* CONFIG_X86_64 */
1613
1614 /* First C function to be called on Xen boot */
1615 asmlinkage void __init xen_start_kernel(void)
1616 {
1617         pgd_t *pgd;
1618
1619         if (!xen_start_info)
1620                 return;
1621
1622         xen_domain_type = XEN_PV_DOMAIN;
1623
1624         BUG_ON(memcmp(xen_start_info->magic, "xen-3", 5) != 0);
1625
1626         xen_setup_features();
1627
1628         /* Install Xen paravirt ops */
1629         pv_info = xen_info;
1630         pv_init_ops = xen_init_ops;
1631         pv_time_ops = xen_time_ops;
1632         pv_cpu_ops = xen_cpu_ops;
1633         pv_apic_ops = xen_apic_ops;
1634         pv_mmu_ops = xen_mmu_ops;
1635
1636         xen_init_irq_ops();
1637
1638 #ifdef CONFIG_X86_LOCAL_APIC
1639         /*
1640          * set up the basic apic ops.
1641          */
1642         apic_ops = &xen_basic_apic_ops;
1643 #endif
1644
1645         if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1646                 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1647                 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
1648         }
1649
1650         machine_ops = xen_machine_ops;
1651
1652 #ifdef CONFIG_X86_64
1653         /* Disable until direct per-cpu data access. */
1654         have_vcpu_info_placement = 0;
1655         x86_64_init_pda();
1656 #endif
1657
1658         xen_smp_init();
1659
1660         /* Get mfn list */
1661         if (!xen_feature(XENFEAT_auto_translated_physmap))
1662                 xen_build_dynamic_phys_to_machine();
1663
1664         pgd = (pgd_t *)xen_start_info->pt_base;
1665
1666         /* Prevent unwanted bits from being set in PTEs. */
1667         __supported_pte_mask &= ~_PAGE_GLOBAL;
1668         if (!xen_initial_domain())
1669                 __supported_pte_mask &= ~(_PAGE_PWT | _PAGE_PCD);
1670
1671         /* Don't do the full vcpu_info placement stuff until we have a
1672            possible map and a non-dummy shared_info. */
1673         per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1674
1675         xen_raw_console_write("mapping kernel into physical memory\n");
1676         pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
1677
1678         init_mm.pgd = pgd;
1679
1680         /* keep using Xen gdt for now; no urgent need to change it */
1681
1682         pv_info.kernel_rpl = 1;
1683         if (xen_feature(XENFEAT_supervisor_mode_kernel))
1684                 pv_info.kernel_rpl = 0;
1685
1686         /* set the limit of our address space */
1687         xen_reserve_top();
1688
1689 #ifdef CONFIG_X86_32
1690         /* set up basic CPUID stuff */
1691         cpu_detect(&new_cpu_data);
1692         new_cpu_data.hard_math = 1;
1693         new_cpu_data.x86_capability[0] = cpuid_edx(1);
1694 #endif
1695
1696         /* Poke various useful things into boot_params */
1697         boot_params.hdr.type_of_loader = (9 << 4) | 0;
1698         boot_params.hdr.ramdisk_image = xen_start_info->mod_start
1699                 ? __pa(xen_start_info->mod_start) : 0;
1700         boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1701         boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
1702
1703         if (!xen_initial_domain()) {
1704                 add_preferred_console("xenboot", 0, NULL);
1705                 add_preferred_console("tty", 0, NULL);
1706                 add_preferred_console("hvc", 0, NULL);
1707         }
1708
1709         xen_raw_console_write("about to get started...\n");
1710
1711         /* Start the world */
1712 #ifdef CONFIG_X86_32
1713         i386_start_kernel();
1714 #else
1715         x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1716 #endif
1717 }