1 /******************************************************************************
2 * arch/ia64/xen/xen_pv_ops.c
4 * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
5 * VA Linux Systems Japan K.K.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 #include <linux/console.h>
24 #include <linux/irq.h>
25 #include <linux/kernel.h>
27 #include <linux/unistd.h>
29 #include <asm/xen/hypervisor.h>
30 #include <asm/xen/xencomm.h>
31 #include <asm/xen/privop.h>
36 /***************************************************************************
39 static struct pv_info xen_info __initdata = {
40 .kernel_rpl = 2, /* or 1: determin at runtime */
41 .paravirt_enabled = 1,
45 #define IA64_RSC_PL_SHIFT 2
46 #define IA64_RSC_PL_BIT_SIZE 2
47 #define IA64_RSC_PL_MASK \
48 (((1UL << IA64_RSC_PL_BIT_SIZE) - 1) << IA64_RSC_PL_SHIFT)
53 /* Xenified Linux/ia64 may run on pl = 1 or 2.
54 * determin at run time. */
55 unsigned long rsc = ia64_getreg(_IA64_REG_AR_RSC);
56 unsigned int rpl = (rsc & IA64_RSC_PL_MASK) >> IA64_RSC_PL_SHIFT;
57 xen_info.kernel_rpl = rpl;
60 /***************************************************************************
62 * initialization hooks.
66 xen_panic_hypercall(struct unw_frame_info *info, void *arg)
68 current->thread.ksp = (__u64)info->sw - 16;
69 HYPERVISOR_shutdown(SHUTDOWN_crash);
70 /* we're never actually going to get here... */
74 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
76 unw_init_running(xen_panic_hypercall, NULL);
77 /* we're never actually going to get here... */
81 static struct notifier_block xen_panic_block = {
82 xen_panic_event, NULL, 0 /* try to go last */
85 static void xen_pm_power_off(void)
88 HYPERVISOR_shutdown(SHUTDOWN_poweroff);
95 "Running on Xen! pl = %d start_info_pfn=0x%lx nr_pages=%ld "
98 HYPERVISOR_shared_info->arch.start_info_pfn,
99 xen_start_info->nr_pages, xen_start_info->flags);
103 xen_reserve_memory(struct rsvd_region *region)
105 region->start = (unsigned long)__va(
106 (HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
107 region->end = region->start + PAGE_SIZE;
112 xen_arch_setup_early(void)
114 struct shared_info *s;
115 BUG_ON(!xen_pv_domain());
117 s = HYPERVISOR_shared_info;
118 xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
120 /* Must be done before any hypercall. */
121 xencomm_initialize();
123 xen_setup_features();
124 /* Register a call for panic conditions. */
125 atomic_notifier_chain_register(&panic_notifier_list,
127 pm_power_off = xen_pm_power_off;
129 xen_ia64_enable_opt_feature();
133 xen_arch_setup_console(char **cmdline_p)
135 add_preferred_console("xenboot", 0, NULL);
136 add_preferred_console("tty", 0, NULL);
138 add_preferred_console("hvc", 0, NULL);
140 #if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
146 xen_arch_setup_nomca(void)
152 xen_post_smp_prepare_boot_cpu(void)
154 xen_setup_vcpu_info_placement();
157 static const struct pv_init_ops xen_init_ops __initconst = {
158 .banner = xen_banner,
160 .reserve_memory = xen_reserve_memory,
162 .arch_setup_early = xen_arch_setup_early,
163 .arch_setup_console = xen_arch_setup_console,
164 .arch_setup_nomca = xen_arch_setup_nomca,
166 .post_smp_prepare_boot_cpu = xen_post_smp_prepare_boot_cpu,
169 /***************************************************************************
174 extern unsigned long xen_fsyscall_table[NR_syscalls];
175 extern char xen_fsys_bubble_down[];
176 struct pv_fsys_data xen_fsys_data __initdata = {
177 .fsyscall_table = (unsigned long *)xen_fsyscall_table,
178 .fsys_bubble_down = (void *)xen_fsys_bubble_down,
181 /***************************************************************************
183 * patchdata addresses
186 #define DECLARE(name) \
187 extern unsigned long __xen_start_gate_##name##_patchlist[]; \
188 extern unsigned long __xen_end_gate_##name##_patchlist[]
191 DECLARE(brl_fsys_bubble_down);
193 DECLARE(mckinley_e9);
195 extern unsigned long __xen_start_gate_section[];
197 #define ASSIGN(name) \
198 .start_##name##_patchlist = \
199 (unsigned long)__xen_start_gate_##name##_patchlist, \
200 .end_##name##_patchlist = \
201 (unsigned long)__xen_end_gate_##name##_patchlist
203 static struct pv_patchdata xen_patchdata __initdata = {
205 ASSIGN(brl_fsys_bubble_down),
209 .gate_section = (void*)__xen_start_gate_section,
212 /***************************************************************************
218 xen_set_itm_with_offset(unsigned long val)
220 /* ia64_cpu_local_tick() calls this with interrupt enabled. */
221 /* WARN_ON(!irqs_disabled()); */
222 xen_set_itm(val - XEN_MAPPEDREGS->itc_offset);
226 xen_get_itm_with_offset(void)
228 /* unused at this moment */
229 printk(KERN_DEBUG "%s is called.\n", __func__);
231 WARN_ON(!irqs_disabled());
232 return ia64_native_getreg(_IA64_REG_CR_ITM) +
233 XEN_MAPPEDREGS->itc_offset;
236 /* ia64_set_itc() is only called by
237 * cpu_init() with ia64_set_itc(0) and ia64_sync_itc().
238 * So XEN_MAPPEDRESG->itc_offset cal be considered as almost constant.
241 xen_set_itc(unsigned long val)
245 WARN_ON(!irqs_disabled());
246 mitc = ia64_native_getreg(_IA64_REG_AR_ITC);
247 XEN_MAPPEDREGS->itc_offset = val - mitc;
248 XEN_MAPPEDREGS->itc_last = val;
255 unsigned long itc_offset;
256 unsigned long itc_last;
257 unsigned long ret_itc_last;
259 itc_offset = XEN_MAPPEDREGS->itc_offset;
261 itc_last = XEN_MAPPEDREGS->itc_last;
262 res = ia64_native_getreg(_IA64_REG_AR_ITC);
266 ret_itc_last = cmpxchg(&XEN_MAPPEDREGS->itc_last,
268 } while (unlikely(ret_itc_last != itc_last));
272 /* ia64_itc_udelay() calls ia64_get_itc() with interrupt enabled.
273 Should it be paravirtualized instead? */
274 WARN_ON(!irqs_disabled());
275 itc_offset = XEN_MAPPEDREGS->itc_offset;
276 itc_last = XEN_MAPPEDREGS->itc_last;
277 res = ia64_native_getreg(_IA64_REG_AR_ITC);
281 XEN_MAPPEDREGS->itc_last = res;
286 static void xen_setreg(int regnum, unsigned long val)
289 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
290 xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
292 #ifdef CONFIG_IA32_SUPPORT
293 case _IA64_REG_AR_EFLAG:
297 case _IA64_REG_AR_ITC:
300 case _IA64_REG_CR_TPR:
303 case _IA64_REG_CR_ITM:
304 xen_set_itm_with_offset(val);
306 case _IA64_REG_CR_EOI:
310 ia64_native_setreg_func(regnum, val);
315 static unsigned long xen_getreg(int regnum)
323 #ifdef CONFIG_IA32_SUPPORT
324 case _IA64_REG_AR_EFLAG:
325 res = xen_get_eflag();
328 case _IA64_REG_AR_ITC:
331 case _IA64_REG_CR_ITM:
332 res = xen_get_itm_with_offset();
334 case _IA64_REG_CR_IVR:
337 case _IA64_REG_CR_TPR:
341 res = ia64_native_getreg_func(regnum);
347 /* turning on interrupts is a bit more complicated.. write to the
348 * memory-mapped virtual psr.i bit first (to avoid race condition),
349 * then if any interrupts were pending, we have to execute a hyperprivop
350 * to ensure the pending interrupt gets delivered; else we're done! */
354 int old = xen_get_virtual_psr_i();
355 xen_set_virtual_psr_i(1);
357 if (!old && xen_get_virtual_pend())
361 /* turning off interrupts can be paravirtualized simply by writing
362 * to a memory-mapped virtual psr.i bit (implemented as a 16-bit bool) */
366 xen_set_virtual_psr_i(0);
373 return xen_get_virtual_psr_i() ? IA64_PSR_I : 0;
377 xen_intrin_local_irq_restore(unsigned long mask)
379 if (mask & IA64_PSR_I)
385 static const struct pv_cpu_ops xen_cpu_ops __initconst = {
388 .get_cpuid = xen_get_cpuid,
389 .get_pmd = xen_get_pmd,
390 .getreg = xen_getreg,
391 .setreg = xen_setreg,
393 .get_rr = xen_get_rr,
394 .set_rr = xen_set_rr,
395 .set_rr0_to_rr4 = xen_set_rr0_to_rr4,
398 .get_psr_i = xen_get_psr_i,
399 .intrin_local_irq_restore
400 = xen_intrin_local_irq_restore,
403 /******************************************************************************
404 * replacement of hand written assembly codes.
407 extern char xen_switch_to;
408 extern char xen_leave_syscall;
409 extern char xen_work_processed_syscall;
410 extern char xen_leave_kernel;
412 const struct pv_cpu_asm_switch xen_cpu_asm_switch = {
413 .switch_to = (unsigned long)&xen_switch_to,
414 .leave_syscall = (unsigned long)&xen_leave_syscall,
415 .work_processed_syscall = (unsigned long)&xen_work_processed_syscall,
416 .leave_kernel = (unsigned long)&xen_leave_kernel,
419 /***************************************************************************
421 * iosapic read/write hooks.
424 xen_pcat_compat_init(void)
429 static struct irq_chip*
430 xen_iosapic_get_irq_chip(unsigned long trigger)
436 xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
438 struct physdev_apic apic_op;
441 apic_op.apic_physbase = (unsigned long)iosapic -
442 __IA64_UNCACHED_OFFSET;
444 ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
447 return apic_op.value;
451 xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
453 struct physdev_apic apic_op;
455 apic_op.apic_physbase = (unsigned long)iosapic -
456 __IA64_UNCACHED_OFFSET;
459 HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
462 static const struct pv_iosapic_ops xen_iosapic_ops __initconst = {
463 .pcat_compat_init = xen_pcat_compat_init,
464 .__get_irq_chip = xen_iosapic_get_irq_chip,
466 .__read = xen_iosapic_read,
467 .__write = xen_iosapic_write,
470 /***************************************************************************
471 * pv_ops initialization
475 xen_setup_pv_ops(void)
479 pv_init_ops = xen_init_ops;
480 pv_fsys_data = xen_fsys_data;
481 pv_patchdata = xen_patchdata;
482 pv_cpu_ops = xen_cpu_ops;
483 pv_iosapic_ops = xen_iosapic_ops;
484 pv_irq_ops = xen_irq_ops;
485 pv_time_ops = xen_time_ops;
487 paravirt_cpu_asm_init(&xen_cpu_asm_switch);